code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import sys
import numpy as np
import os
sys.path.append("./pb")
from pb.localization_pb2 import Pose
# 文件取出的顺序变
def getpath():
for root,dirs,files in os.walk(r"./dump"):
for file in files:
#获取文件路径
filepath= os.path.join(root,file)
path_this.append(filepath)
print(filepath)
with open("path.txt","a" ) as f:
f.write(filepath+"\n")
f.close()
# 可以按照文件夹顺序拿到
def getpath2():
path ="./dump" #指定需要读取文件的目录
files =os.listdir(path) #采用listdir来读取所有文件
files.sort() #排序
for file_ in files: #循环读取每个文件名
filepath= os.path.join(path,file_)
# print(filepath)
filepath = filepath + "/localization_output/localization_output"
path_this.append(filepath)
# print(filepath)
with open("path.txt","a" ) as f:
f.write(filepath+"\n")
f.close()
# 获取位置点坐标
def getstate():
x = []
y = []
for ppath in path_this:
pre_str = open(ppath,'rb').read()
state.ParseFromString(pre_str)
x.append(state.position.x)
y.append(state.position.y)
with open("data_pos.txt","a")as f:
f.write(str(state.position))
f.close()
with open("data_vel.txt","a" ) as f:
f.write(str(state.velocity))
f.close()
print(state.position)
return x,y
if __name__ == '__main__':
path_this=[]
getpath2()
state = Pose()
x ,y = getstate()
x = np.array(x)
y = np.array(y)
point = np.vstack([x,y])
print(point[0, :])
np.save('./point.npz',point)
# print(x)
| [
"sys.path.append",
"numpy.save",
"os.walk",
"pb.localization_pb2.Pose",
"numpy.array",
"os.path.join",
"os.listdir",
"numpy.vstack"
] | [((42, 65), 'sys.path.append', 'sys.path.append', (['"""./pb"""'], {}), "('./pb')\n", (57, 65), False, 'import sys\n'), ((159, 176), 'os.walk', 'os.walk', (['"""./dump"""'], {}), "('./dump')\n", (166, 176), False, 'import os\n'), ((569, 585), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (579, 585), False, 'import os\n'), ((1569, 1575), 'pb.localization_pb2.Pose', 'Pose', ([], {}), '()\n', (1573, 1575), False, 'from pb.localization_pb2 import Pose\n'), ((1606, 1617), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1614, 1617), True, 'import numpy as np\n'), ((1626, 1637), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1634, 1637), True, 'import numpy as np\n'), ((1650, 1667), 'numpy.vstack', 'np.vstack', (['[x, y]'], {}), '([x, y])\n', (1659, 1667), True, 'import numpy as np\n'), ((1694, 1723), 'numpy.save', 'np.save', (['"""./point.npz"""', 'point'], {}), "('./point.npz', point)\n", (1701, 1723), True, 'import numpy as np\n'), ((698, 723), 'os.path.join', 'os.path.join', (['path', 'file_'], {}), '(path, file_)\n', (710, 723), False, 'import os\n'), ((257, 281), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (269, 281), False, 'import os\n')] |
# -*- coding: utf-8 -*-
from numpy import linspace, where, sin, pi
def get_data(self):
"""Generate the toothsaw vector
Parameters
----------
self : ImportGenToothSaw
An ImportGenToothSaw object
Returns
-------
vect: ndarray
The generated toothsaw vector
"""
time = linspace(start=0, stop=self.Tf, num=self.N, endpoint=False)
T = 1 / self.f
time = (time + self.Dt) % T
if self.type_signal == 0: # forward toothsaw carrier
Y = (
where(time <= 0.5 * T, 1, 0) * time
+ where(time > 0.5 * T, 1, 0) * (time - T)
) / (0.5 * T)
elif self.type_signal == 1: # backwards toothsaw carrier
Y = -(
where(time <= 0.5 * T, 1, 0) * time
+ where(time > 0.5 * T, 1, 0) * (time - T)
) / (0.5 * T)
elif self.type_signal == 2: # symmetrical toothsaw carrier
t1 = T / 4
t2 = T - t1
Y = (
where(time <= t1, 1, 0) * time / t1
+ where(time > t1, 1, 0)
* where(time < t2, 1, 0)
* (-time + 0.5 * T)
/ (-t1 + 0.5 * T)
+ where(time >= t2, 1, 0) * (time - T) / (T - t2)
)
return self.edit_matrix(self.A * Y)
| [
"numpy.where",
"numpy.linspace"
] | [((324, 383), 'numpy.linspace', 'linspace', ([], {'start': '(0)', 'stop': 'self.Tf', 'num': 'self.N', 'endpoint': '(False)'}), '(start=0, stop=self.Tf, num=self.N, endpoint=False)\n', (332, 383), False, 'from numpy import linspace, where, sin, pi\n'), ((521, 549), 'numpy.where', 'where', (['(time <= 0.5 * T)', '(1)', '(0)'], {}), '(time <= 0.5 * T, 1, 0)\n', (526, 549), False, 'from numpy import linspace, where, sin, pi\n'), ((571, 598), 'numpy.where', 'where', (['(time > 0.5 * T)', '(1)', '(0)'], {}), '(time > 0.5 * T, 1, 0)\n', (576, 598), False, 'from numpy import linspace, where, sin, pi\n'), ((723, 751), 'numpy.where', 'where', (['(time <= 0.5 * T)', '(1)', '(0)'], {}), '(time <= 0.5 * T, 1, 0)\n', (728, 751), False, 'from numpy import linspace, where, sin, pi\n'), ((773, 800), 'numpy.where', 'where', (['(time > 0.5 * T)', '(1)', '(0)'], {}), '(time > 0.5 * T, 1, 0)\n', (778, 800), False, 'from numpy import linspace, where, sin, pi\n'), ((1151, 1174), 'numpy.where', 'where', (['(time >= t2)', '(1)', '(0)'], {}), '(time >= t2, 1, 0)\n', (1156, 1174), False, 'from numpy import linspace, where, sin, pi\n'), ((965, 988), 'numpy.where', 'where', (['(time <= t1)', '(1)', '(0)'], {}), '(time <= t1, 1, 0)\n', (970, 988), False, 'from numpy import linspace, where, sin, pi\n'), ((1015, 1037), 'numpy.where', 'where', (['(time > t1)', '(1)', '(0)'], {}), '(time > t1, 1, 0)\n', (1020, 1037), False, 'from numpy import linspace, where, sin, pi\n'), ((1052, 1074), 'numpy.where', 'where', (['(time < t2)', '(1)', '(0)'], {}), '(time < t2, 1, 0)\n', (1057, 1074), False, 'from numpy import linspace, where, sin, pi\n')] |
import numpy as np
from CIS import *
from Hartree_Fock import *
basis = ((0,0), (0,1), (1,0), (1,1), (2,0), (2,1))
Beryllium = CIS(4,basis)
Beryllium_HF = HF(4,basis)
# === Configuration Interaction Singles ===
A = np.zeros((5,5))
# --- <c|H|c> ---
A[0,0] = Beryllium.c_H_c()
# --- <c|H|p_i^a> ---
A[0,1] = Beryllium.c_H_ia(0,4)
A[0,2] = Beryllium.c_H_ia(1,5)
A[0,3] = Beryllium.c_H_ia(2,4)
A[0,4] = Beryllium.c_H_ia(3,5)
# --- <p_i^a|H|c> ---
A[1,0] = Beryllium.c_H_ia(0,4)
A[2,0] = Beryllium.c_H_ia(1,5)
A[3,0] = Beryllium.c_H_ia(2,4)
A[4,0] = Beryllium.c_H_ia(3,5)
# --- <p_i^a|H|p_j^b> ---
# <12|H|21>
A[1,1] = Beryllium.ia_H_jb(0,4,0,4)
A[1,2] = Beryllium.ia_H_jb(0,4,1,5)
A[2,1] = Beryllium.ia_H_jb(1,5,0,4)
A[2,2] = Beryllium.ia_H_jb(1,5,1,5)
# <12|H|31>
A[1,3] = Beryllium.ia_H_jb(0,4,2,4)
A[1,4] = Beryllium.ia_H_jb(0,4,3,5)
A[2,3] = Beryllium.ia_H_jb(1,5,2,4)
A[2,4] = Beryllium.ia_H_jb(1,5,3,5)
# <13|H|21>
A[3,1] = Beryllium.ia_H_jb(2,4,0,4)
A[3,2] = Beryllium.ia_H_jb(3,5,0,4)
A[4,1] = Beryllium.ia_H_jb(1,5,2,4)
A[4,2] = Beryllium.ia_H_jb(1,5,3,5)
# <13|H|31>
A[3,3] = Beryllium.ia_H_jb(2,4,2,4)
A[3,4] = Beryllium.ia_H_jb(2,4,3,5)
A[4,3] = Beryllium.ia_H_jb(3,5,2,4)
A[4,4] = Beryllium.ia_H_jb(3,5,3,5)
eigvals, eigvecs = np.linalg.eigh(A)
print(A)
print('Ref. Energy: ', A[0,0])
print('Energy CIS: ', eigvals[0])
# === Hartree Fock ===
#print('Energy HF: ', Beryllium_HF.HF_iter()[0],' in ', Beryllium_HF.HF_iter()[1], ' iterations')
| [
"numpy.linalg.eigh",
"numpy.zeros"
] | [((217, 233), 'numpy.zeros', 'np.zeros', (['(5, 5)'], {}), '((5, 5))\n', (225, 233), True, 'import numpy as np\n'), ((1247, 1264), 'numpy.linalg.eigh', 'np.linalg.eigh', (['A'], {}), '(A)\n', (1261, 1264), True, 'import numpy as np\n')] |
import os, pickle, datetime, string, random
import numpy as np
from src.utils.ibr_argument_parser import IBRParser
from tqdm import trange
import json
from src.traffic_world import TrafficWorld
from src.iterative_best_response import run_iterative_best_response
import src.utils.solver_helper as helper
def run_simulation(log_dir, params, theta_ij):
""" Runs a simulation with IBR using the determined svo matrix theta_ij
Returns: List of utilities and individual rewards
"""
# Generate directory structure and log name
params["start_time_string"] = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
# Determine number of control points in the optimization
i_mpc_start = 0
params["N"] = max(1, int(params["T"] / params["dt"]))
params["number_ctrl_pts_executed"] = max(1, int(np.floor(params["N"] * params["p_exec"])))
# Create the world and vehicle objects
world = TrafficWorld(params["n_lanes"], 0, 999999)
# Create the vehicle placement based on a Poisson distribution
MAX_VELOCITY = 25 * 0.447 # m/s
VEHICLE_LENGTH = 4.5 # m
time_duration_s = (params["n_other"] * 3600.0 / params["car_density"]) * 10 # amount of time to generate traffic
initial_vehicle_positions = helper.poission_positions(
params["car_density"],
int(time_duration_s),
params["n_lanes"],
MAX_VELOCITY,
VEHICLE_LENGTH,
position_random_seed=params["seed"],
)
position_list = initial_vehicle_positions[:params["n_other"]]
list_of_svo = theta_ij
(
ambulance,
amb_x0,
all_other_vehicles,
all_other_x0,
) = helper.initialize_cars_from_positions(params["N"], params["dt"], world, True, position_list, list_of_svo)
# Save the vehicles and world for this simulation
data_dir = log_dir + "data"
os.makedirs(log_dir, exist_ok=True)
os.makedirs(data_dir, exist_ok=True) # this should move into run_ibr
pickle.dump(all_other_vehicles, open(log_dir + "/other_vehicles.p", "wb"))
pickle.dump(ambulance, open(log_dir + "/ambulance.p", "wb"))
pickle.dump(world, open(log_dir + "/world.p", "wb"))
print("Results saved in log %s:" % log_dir)
# Initialize the state and control arrays
params["pid"] = os.getpid()
if params["n_other"] != len(all_other_vehicles):
raise Exception("n_other larger than position list")
with open(log_dir + "params.json", "w") as fp:
json.dump(params, fp, indent=2)
xamb_actual, xothers_actual = run_iterative_best_response(
params,
log_dir,
False,
i_mpc_start,
amb_x0,
all_other_x0,
ambulance,
all_other_vehicles,
world,
)
all_trajectories = [xamb_actual] + xothers_actual
all_trajectories = np.array(all_trajectories)
np.save(open(log_dir + "/trajectories.npy", 'wb'), all_trajectories)
return all_trajectories
if __name__ == "__main__":
parser = IBRParser()
parser.add_argument("--max-svo", type=float, default=np.pi / 2.0, help="Max SVO we allow for random")
parser.add_argument("--n-sims", type=int, default=10, help="Number of simulation")
parser.add_argument('--svo-file',
type=str,
default=None,
help="load a pickle file with list of svos for non ambulance")
parser.set_defaults(
n_other=4,
n_mpc=5,
T=4,
)
args = parser.parse_args()
params = vars(args)
alpha_num = string.ascii_lowercase[:8] + string.digits
experiment_string = ("".join(random.choice(alpha_num)
for j in range(4)) + "-" + "".join(random.choice(alpha_num) for j in range(4)) + "-" +
datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
experiment_dir = (os.path.expanduser("~") + "/mpc_results/" + experiment_string)
history_file = experiment_dir + "/" + experiment_string + "_history.p"
print("History Saved in %s" % history_file)
history = []
for ep_ix in trange(params["n_sims"]):
log_dir = experiment_dir + "/" + experiment_string + '_%05d' % ep_ix + "/"
theta_ij = np.random.rand(params["n_other"], 1) * params["max_svo"]
if args.svo_file is not None:
with open(args.svo_file, 'rb') as f:
svo_list = pickle.load(f)
svo_ix = svo_list[ep_ix % len(svo_list)]
theta_ij = np.array(svo_ix)
all_vehicle_trajectories = run_simulation(log_dir, params, theta_ij)
V_i_list = -(all_vehicle_trajectories[:, 0, -1] - all_vehicle_trajectories[:, 0, 0])
ego_theta = 0.0 #current default in sims
theta_ij = np.insert(theta_ij, 0, ego_theta)
# Train a network to learn a function V(\theta_ij)
history.append((theta_ij, V_i_list))
with open(history_file, "wb") as f:
pickle.dump(history, f)
| [
"os.path.expanduser",
"json.dump",
"pickle.dump",
"os.getpid",
"os.makedirs",
"tqdm.trange",
"numpy.floor",
"src.traffic_world.TrafficWorld",
"random.choice",
"src.utils.solver_helper.initialize_cars_from_positions",
"src.utils.ibr_argument_parser.IBRParser",
"numpy.insert",
"pickle.load",
... | [((917, 959), 'src.traffic_world.TrafficWorld', 'TrafficWorld', (["params['n_lanes']", '(0)', '(999999)'], {}), "(params['n_lanes'], 0, 999999)\n", (929, 959), False, 'from src.traffic_world import TrafficWorld\n'), ((1650, 1760), 'src.utils.solver_helper.initialize_cars_from_positions', 'helper.initialize_cars_from_positions', (["params['N']", "params['dt']", 'world', '(True)', 'position_list', 'list_of_svo'], {}), "(params['N'], params['dt'], world, \n True, position_list, list_of_svo)\n", (1687, 1760), True, 'import src.utils.solver_helper as helper\n'), ((1847, 1882), 'os.makedirs', 'os.makedirs', (['log_dir'], {'exist_ok': '(True)'}), '(log_dir, exist_ok=True)\n', (1858, 1882), False, 'import os, pickle, datetime, string, random\n'), ((1887, 1923), 'os.makedirs', 'os.makedirs', (['data_dir'], {'exist_ok': '(True)'}), '(data_dir, exist_ok=True)\n', (1898, 1923), False, 'import os, pickle, datetime, string, random\n'), ((2274, 2285), 'os.getpid', 'os.getpid', ([], {}), '()\n', (2283, 2285), False, 'import os, pickle, datetime, string, random\n'), ((2527, 2655), 'src.iterative_best_response.run_iterative_best_response', 'run_iterative_best_response', (['params', 'log_dir', '(False)', 'i_mpc_start', 'amb_x0', 'all_other_x0', 'ambulance', 'all_other_vehicles', 'world'], {}), '(params, log_dir, False, i_mpc_start, amb_x0,\n all_other_x0, ambulance, all_other_vehicles, world)\n', (2554, 2655), False, 'from src.iterative_best_response import run_iterative_best_response\n'), ((2809, 2835), 'numpy.array', 'np.array', (['all_trajectories'], {}), '(all_trajectories)\n', (2817, 2835), True, 'import numpy as np\n'), ((2979, 2990), 'src.utils.ibr_argument_parser.IBRParser', 'IBRParser', ([], {}), '()\n', (2988, 2990), False, 'from src.utils.ibr_argument_parser import IBRParser\n'), ((4076, 4100), 'tqdm.trange', 'trange', (["params['n_sims']"], {}), "(params['n_sims'])\n", (4082, 4100), False, 'from tqdm import trange\n'), ((2460, 2491), 'json.dump', 'json.dump', (['params', 'fp'], {'indent': '(2)'}), '(params, fp, indent=2)\n', (2469, 2491), False, 'import json\n'), ((4734, 4767), 'numpy.insert', 'np.insert', (['theta_ij', '(0)', 'ego_theta'], {}), '(theta_ij, 0, ego_theta)\n', (4743, 4767), True, 'import numpy as np\n'), ((576, 599), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (597, 599), False, 'import os, pickle, datetime, string, random\n'), ((818, 858), 'numpy.floor', 'np.floor', (["(params['N'] * params['p_exec'])"], {}), "(params['N'] * params['p_exec'])\n", (826, 858), True, 'import numpy as np\n'), ((3855, 3878), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (3873, 3878), False, 'import os, pickle, datetime, string, random\n'), ((4205, 4241), 'numpy.random.rand', 'np.random.rand', (["params['n_other']", '(1)'], {}), "(params['n_other'], 1)\n", (4219, 4241), True, 'import numpy as np\n'), ((4929, 4952), 'pickle.dump', 'pickle.dump', (['history', 'f'], {}), '(history, f)\n', (4940, 4952), False, 'import os, pickle, datetime, string, random\n'), ((3781, 3804), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3802, 3804), False, 'import os, pickle, datetime, string, random\n'), ((4377, 4391), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4388, 4391), False, 'import os, pickle, datetime, string, random\n'), ((4476, 4492), 'numpy.array', 'np.array', (['svo_ix'], {}), '(svo_ix)\n', (4484, 4492), True, 'import numpy as np\n'), ((3704, 3728), 'random.choice', 'random.choice', (['alpha_num'], {}), '(alpha_num)\n', (3717, 3728), False, 'import os, pickle, datetime, string, random\n'), ((3611, 3635), 'random.choice', 'random.choice', (['alpha_num'], {}), '(alpha_num)\n', (3624, 3635), False, 'import os, pickle, datetime, string, random\n')] |
import pandas as pd
import matplotlib.pyplot as plt
import os
import numpy as np
import seaborn as sns
import argparse
parser = argparse.ArgumentParser('QQ Plot for histograms of two variables')
parser.add_argument('file', type=str,
metavar='DF',
help='Location where pkl file saved')
parser.add_argument('--fig-size', type=float, default=6,
help='Figure size (inches)')
parser.add_argument('--font-size',type=float, default=20)
parser.add_argument('--dpi', type=int, default=80)
parser.add_argument('--nbins', type=int, default=10)
parser.add_argument('--xvar', type=str, default='loss')
parser.add_argument('--yvar', type=str, default='model_entropy')
parser.add_argument('--no-show', action='store_false', dest='show')
parser.add_argument('--show', action='store_true', dest='show')
parser.add_argument('--save', action='store_true', dest='save')
parser.add_argument('--no-save', action='store_false',dest='save')
parser.add_argument('--name', type=str, default='qqplot.pdf')
parser.add_argument('--xlim', type=float, default=None, nargs='*')
parser.add_argument('--ylim', type=float, default=None, nargs='*')
parser.add_argument('--filter', type=str, choices=['all','top1','top5','wrong'],default='all')
parser.set_defaults(show=True)
parser.set_defaults(save=False)
args = parser.parse_args()
from common import labdict
sns.set_palette(palette='colorblind')
colors = sns.color_palette()
fsz = args.font_size
figsz = (args.fig_size, args.fig_size)
plt.rc('font', size=fsz)
plt.rc('axes', titlesize=fsz)
plt.rc('axes', labelsize=fsz)
plt.rc('xtick', labelsize=fsz)
plt.rc('ytick', labelsize=fsz)
plt.rc('legend', fontsize=.75*fsz)
plt.rc('figure', titlesize=fsz)
dpi = args.dpi
show=args.show
df = pd.read_pickle(args.file)
Nsamples = len(df)
plt.close('all')
fig, ax = plt.subplots(1, figsize=figsz)
if args.filter in ['top1','top5']:
b = df[args.filter]
elif args.filter=='wrong':
b = np.logical_not(df['top5'])
else:
b = np.arange(len(df))
X = df[args.xvar][b]
Y = df[args.yvar][b]
X = X[X>0]
Y = Y[Y>0]
X = np.log(X)
Y = np.log(Y)
Nbins = args.nbins
Yc, Ybins = pd.qcut(Y,Nbins,retbins=True)
Xc, Xbins = pd.qcut(X,Nbins,retbins=True)
Yvc = Yc.value_counts(sort=False)
Xvc = Xc.value_counts(sort=False)
fit, residuals, rank, sv, rcond = np.polyfit(Xbins[1:], Ybins[1:],1, full=True)
Ypred= np.poly1d(fit)(Xbins[1:])
m = np.mean(Ybins[1:])
SStot = np.sum((Ybins[1:] - m)**2)
Rsq = 1-residuals[0]/SStot
print('QQ-Plot R^2: %.4g'%Rsq)
#ax.plot(np.exp(Xbins[1:]), np.exp(Ypred), color='k')
ax.plot(np.exp(Xbins[1:]), np.exp(Xbins[1:]), color='k')
ax.scatter(np.exp(Xbins[1:]), np.exp(Ybins[1:]))
ax.grid()
ax.set_axisbelow(True)
if args.xlim is None:
pass
#ax.set_xlim((np.exp(0.5*Xbins[1]),np.exp(1.5*Xbins[-1])))
else:
ax.set_xlim(*args.xlim)
if args.ylim is None:
pass
#ax.set_ylim(np.exp(0.5*Ybins[1]),np.exp(1.5*Ybins[-1]))
else:
ax.set_ylim(*args.ylim)
ax.set_xscale('log',nonposx='clip')
ax.set_yscale('log',nonposy='clip')
extra = []
extra.append(ax.set_xlabel(labdict[args.xvar]+' quantiles'))
extra.append(ax.set_ylabel(labdict[args.yvar]+' quantiles'))
fig.tight_layout()
#
if show:
plt.show()
#
if args.save:
pth = os.path.split(args.file)[0]
fig.savefig(os.path.join(pth,args.name),
format='pdf',bbox_inches='tight',dpi=dpi)
| [
"numpy.poly1d",
"numpy.sum",
"argparse.ArgumentParser",
"numpy.log",
"numpy.polyfit",
"matplotlib.pyplot.show",
"matplotlib.pyplot.close",
"os.path.join",
"numpy.logical_not",
"numpy.mean",
"matplotlib.pyplot.rc",
"seaborn.color_palette",
"numpy.exp",
"pandas.read_pickle",
"pandas.qcut",... | [((130, 196), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""QQ Plot for histograms of two variables"""'], {}), "('QQ Plot for histograms of two variables')\n", (153, 196), False, 'import argparse\n'), ((1357, 1394), 'seaborn.set_palette', 'sns.set_palette', ([], {'palette': '"""colorblind"""'}), "(palette='colorblind')\n", (1372, 1394), True, 'import seaborn as sns\n'), ((1404, 1423), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (1421, 1423), True, 'import seaborn as sns\n'), ((1484, 1508), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': 'fsz'}), "('font', size=fsz)\n", (1490, 1508), True, 'import matplotlib.pyplot as plt\n'), ((1509, 1538), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': 'fsz'}), "('axes', titlesize=fsz)\n", (1515, 1538), True, 'import matplotlib.pyplot as plt\n'), ((1539, 1568), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': 'fsz'}), "('axes', labelsize=fsz)\n", (1545, 1568), True, 'import matplotlib.pyplot as plt\n'), ((1569, 1599), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': 'fsz'}), "('xtick', labelsize=fsz)\n", (1575, 1599), True, 'import matplotlib.pyplot as plt\n'), ((1600, 1630), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': 'fsz'}), "('ytick', labelsize=fsz)\n", (1606, 1630), True, 'import matplotlib.pyplot as plt\n'), ((1631, 1668), 'matplotlib.pyplot.rc', 'plt.rc', (['"""legend"""'], {'fontsize': '(0.75 * fsz)'}), "('legend', fontsize=0.75 * fsz)\n", (1637, 1668), True, 'import matplotlib.pyplot as plt\n'), ((1666, 1697), 'matplotlib.pyplot.rc', 'plt.rc', (['"""figure"""'], {'titlesize': 'fsz'}), "('figure', titlesize=fsz)\n", (1672, 1697), True, 'import matplotlib.pyplot as plt\n'), ((1735, 1760), 'pandas.read_pickle', 'pd.read_pickle', (['args.file'], {}), '(args.file)\n', (1749, 1760), True, 'import pandas as pd\n'), ((1781, 1797), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1790, 1797), True, 'import matplotlib.pyplot as plt\n'), ((1809, 1839), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': 'figsz'}), '(1, figsize=figsz)\n', (1821, 1839), True, 'import matplotlib.pyplot as plt\n'), ((2063, 2072), 'numpy.log', 'np.log', (['X'], {}), '(X)\n', (2069, 2072), True, 'import numpy as np\n'), ((2077, 2086), 'numpy.log', 'np.log', (['Y'], {}), '(Y)\n', (2083, 2086), True, 'import numpy as np\n'), ((2119, 2150), 'pandas.qcut', 'pd.qcut', (['Y', 'Nbins'], {'retbins': '(True)'}), '(Y, Nbins, retbins=True)\n', (2126, 2150), True, 'import pandas as pd\n'), ((2161, 2192), 'pandas.qcut', 'pd.qcut', (['X', 'Nbins'], {'retbins': '(True)'}), '(X, Nbins, retbins=True)\n', (2168, 2192), True, 'import pandas as pd\n'), ((2294, 2340), 'numpy.polyfit', 'np.polyfit', (['Xbins[1:]', 'Ybins[1:]', '(1)'], {'full': '(True)'}), '(Xbins[1:], Ybins[1:], 1, full=True)\n', (2304, 2340), True, 'import numpy as np\n'), ((2378, 2396), 'numpy.mean', 'np.mean', (['Ybins[1:]'], {}), '(Ybins[1:])\n', (2385, 2396), True, 'import numpy as np\n'), ((2405, 2433), 'numpy.sum', 'np.sum', (['((Ybins[1:] - m) ** 2)'], {}), '((Ybins[1:] - m) ** 2)\n', (2411, 2433), True, 'import numpy as np\n'), ((2347, 2361), 'numpy.poly1d', 'np.poly1d', (['fit'], {}), '(fit)\n', (2356, 2361), True, 'import numpy as np\n'), ((2554, 2571), 'numpy.exp', 'np.exp', (['Xbins[1:]'], {}), '(Xbins[1:])\n', (2560, 2571), True, 'import numpy as np\n'), ((2573, 2590), 'numpy.exp', 'np.exp', (['Xbins[1:]'], {}), '(Xbins[1:])\n', (2579, 2590), True, 'import numpy as np\n'), ((2614, 2631), 'numpy.exp', 'np.exp', (['Xbins[1:]'], {}), '(Xbins[1:])\n', (2620, 2631), True, 'import numpy as np\n'), ((2633, 2650), 'numpy.exp', 'np.exp', (['Ybins[1:]'], {}), '(Ybins[1:])\n', (2639, 2650), True, 'import numpy as np\n'), ((3182, 3192), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3190, 3192), True, 'import matplotlib.pyplot as plt\n'), ((1935, 1961), 'numpy.logical_not', 'np.logical_not', (["df['top5']"], {}), "(df['top5'])\n", (1949, 1961), True, 'import numpy as np\n'), ((3223, 3247), 'os.path.split', 'os.path.split', (['args.file'], {}), '(args.file)\n', (3236, 3247), False, 'import os\n'), ((3267, 3295), 'os.path.join', 'os.path.join', (['pth', 'args.name'], {}), '(pth, args.name)\n', (3279, 3295), False, 'import os\n')] |
# Copyright 2019 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import os
import numpy as np
from ..utilities import Vec3, Output
class FlowData():
"""
Generate a FlowData object to handle data I/O
"""
#TODO handle none case, maybe defaul values apply like 0 origin and auto determine spacing and dimensions
def __init__(self,
x,
y,
z,
u,
v,
w,
spacing=None,
dimensions=None,
origin=None):
"""
Initialize FlowData object with coordinates, velocity fields,
and meta data.
Args:
x (np.array): Cartesian coordinate data.
y (np.array): Cartesian coordinate data.
z (np.array): Cartesian coordinate data.
u (np.array): x-component of velocity.
v (np.array): y-component of velocity.
w (np.array): z-component of velocity.
spacing (float, optional): Spatial resolution.
Defaults to None.
dimensions (iterable, optional): named dimensions
(e.g. x1, x2, x3). Defaults to None.
origin (iterable, optional): Coordinates of origin.
Defaults to None.
"""
self.x = x
self.y = y
self.z = z
self.u = u
self.v = v
self.w = w
#TODO Make these VEC3?
self.spacing = spacing
self.dimensions = dimensions
self.origin = origin
# Technically resolution is a restating of above, but it is useful to have
self.resolution = Vec3(len(np.unique(x)), len(np.unique(y)),
len(np.unique(z)))
def save_as_vtk(self, filename):
"""
Save FlowData Object to vtk
Args:
filename (str): Write-to path for vtk file
"""
n_points = self.dimensions.x1 * self.dimensions.x2 * self.dimensions.x3
vtk_file = Output(filename)
vtk_file.write_line('# vtk DataFile Version 3.0')
vtk_file.write_line('array.mean0D')
vtk_file.write_line('ASCII')
vtk_file.write_line('DATASET STRUCTURED_POINTS')
vtk_file.write_line('DIMENSIONS {}'.format(self.dimensions))
vtk_file.write_line('ORIGIN {}'.format(self.origin))
vtk_file.write_line('SPACING {}'.format(self.spacing))
vtk_file.write_line('POINT_DATA {}'.format(n_points))
vtk_file.write_line('FIELD attributes 1')
vtk_file.write_line('UAvg 3 {} float'.format(n_points))
for u, v, w in zip(self.u, self.v, self.w):
vtk_file.write_line('{}'.format(Vec3(u, v, w)))
@staticmethod
def crop(ff, x_bnds, y_bnds, z_bnds):
"""
Crop FlowData object to within stated bounds.
Args:
ff (:py:class:`floris.tools.flow_data.FlowData`):
FlowData object.
x_bnds (iterable): min and max of x-coordinate.
y_bnds (iterable): min and max of y-coordinate.
z_bnds (iterable): min and max of z-coordinate.
Returns:
(:py:class:`floris.tools.flow_data.FlowData`):
cropped FlowData object.
"""
map_values = (ff.x > x_bnds[0]) & (ff.x < x_bnds[1]) & (
ff.y > y_bnds[0]) & (ff.y < y_bnds[1]) & (ff.z > z_bnds[0]) & (
ff.z < z_bnds[1])
x = ff.x[map_values]
y = ff.y[map_values]
z = ff.z[map_values]
# Work out new dimensions
dimensions = (len(np.unique(x)), len(np.unique(y)), len(np.unique(z)))
# Work out origin
origin = (
ff.origin.x1 + np.min(x),
ff.origin.x2 + np.min(y),
ff.origin.x3 + np.min(z),
)
return FlowData(
x - np.min(x),
y - np.min(y),
z - np.min(z),
ff.u[map_values],
ff.v[map_values],
ff.w[map_values],
spacing=ff.spacing, # doesn't change
dimensions=dimensions,
origin=origin)
| [
"numpy.min",
"numpy.unique"
] | [((2172, 2184), 'numpy.unique', 'np.unique', (['x'], {}), '(x)\n', (2181, 2184), True, 'import numpy as np\n'), ((2191, 2203), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (2200, 2203), True, 'import numpy as np\n'), ((2241, 2253), 'numpy.unique', 'np.unique', (['z'], {}), '(z)\n', (2250, 2253), True, 'import numpy as np\n'), ((4086, 4098), 'numpy.unique', 'np.unique', (['x'], {}), '(x)\n', (4095, 4098), True, 'import numpy as np\n'), ((4105, 4117), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (4114, 4117), True, 'import numpy as np\n'), ((4124, 4136), 'numpy.unique', 'np.unique', (['z'], {}), '(z)\n', (4133, 4136), True, 'import numpy as np\n'), ((4212, 4221), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (4218, 4221), True, 'import numpy as np\n'), ((4250, 4259), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (4256, 4259), True, 'import numpy as np\n'), ((4288, 4297), 'numpy.min', 'np.min', (['z'], {}), '(z)\n', (4294, 4297), True, 'import numpy as np\n'), ((4351, 4360), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (4357, 4360), True, 'import numpy as np\n'), ((4378, 4387), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (4384, 4387), True, 'import numpy as np\n'), ((4405, 4414), 'numpy.min', 'np.min', (['z'], {}), '(z)\n', (4411, 4414), True, 'import numpy as np\n')] |
r""":mod:`mirgecom.diffusion` computes the diffusion operator.
.. autofunction:: gradient_flux
.. autofunction:: diffusion_flux
.. autofunction:: diffusion_operator
.. autoclass:: DiffusionBoundary
.. autoclass:: DirichletDiffusionBoundary
.. autoclass:: NeumannDiffusionBoundary
"""
__copyright__ = """
Copyright (C) 2020 University of Illinois Board of Trustees
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import abc
import numpy as np
import numpy.linalg as la # noqa
from pytools.obj_array import make_obj_array, obj_array_vectorize_n_args
from meshmode.mesh import BTAG_ALL, BTAG_NONE # noqa
from meshmode.dof_array import thaw
from grudge.dof_desc import DOFDesc, as_dofdesc
from grudge.eager import interior_trace_pair, cross_rank_trace_pairs
from grudge.trace_pair import TracePair
def gradient_flux(discr, quad_tag, u_tpair):
r"""Compute the numerical flux for $\nabla u$."""
actx = u_tpair.int.array_context
dd = u_tpair.dd
dd_quad = dd.with_discr_tag(quad_tag)
dd_allfaces_quad = dd_quad.with_dtag("all_faces")
normal_quad = thaw(actx, discr.normal(dd_quad))
def to_quad(a):
return discr.project(dd, dd_quad, a)
def flux(u, normal):
return -u * normal
return discr.project(dd_quad, dd_allfaces_quad, flux(
to_quad(u_tpair.avg), normal_quad))
def diffusion_flux(discr, quad_tag, alpha_tpair, grad_u_tpair):
r"""Compute the numerical flux for $\nabla \cdot (\alpha \nabla u)$."""
actx = grad_u_tpair.int[0].array_context
dd = grad_u_tpair.dd
dd_quad = dd.with_discr_tag(quad_tag)
dd_allfaces_quad = dd_quad.with_dtag("all_faces")
normal_quad = thaw(actx, discr.normal(dd_quad))
def to_quad(a):
return discr.project(dd, dd_quad, a)
def flux(alpha, grad_u, normal):
return -alpha * np.dot(grad_u, normal)
flux_tpair = TracePair(dd_quad,
interior=flux(
to_quad(alpha_tpair.int), to_quad(grad_u_tpair.int), normal_quad),
exterior=flux(
to_quad(alpha_tpair.ext), to_quad(grad_u_tpair.ext), normal_quad)
)
return discr.project(dd_quad, dd_allfaces_quad, flux_tpair.avg)
class DiffusionBoundary(metaclass=abc.ABCMeta):
"""
Diffusion boundary base class.
.. automethod:: get_gradient_flux
.. automethod:: get_diffusion_flux
"""
@abc.abstractmethod
def get_gradient_flux(self, discr, quad_tag, dd, alpha, u):
"""Compute the flux for grad(u) on the boundary corresponding to *dd*."""
raise NotImplementedError
@abc.abstractmethod
def get_diffusion_flux(self, discr, quad_tag, dd, alpha, grad_u):
"""Compute the flux for diff(u) on the boundary corresponding to *dd*."""
raise NotImplementedError
class DirichletDiffusionBoundary(DiffusionBoundary):
r"""
Dirichlet boundary condition for the diffusion operator.
For the boundary condition $u|_\Gamma = f$, uses external data
.. math::
u^+ &= 2 f - u^-
(\nabla u)^+ &= (\nabla u)^-
to compute boundary fluxes as shown in [Hesthaven_2008]_, Section 7.1.
.. automethod:: __init__
"""
def __init__(self, value):
"""
Initialize the boundary condition.
Parameters
----------
value: float or meshmode.dof_array.DOFArray
the value(s) of $f$ along the boundary
"""
self.value = value
def get_gradient_flux(self, discr, quad_tag, dd, alpha, u): # noqa: D102
u_int = discr.project("vol", dd, u)
u_tpair = TracePair(dd, interior=u_int, exterior=2*self.value-u_int)
return gradient_flux(discr, quad_tag, u_tpair)
def get_diffusion_flux(self, discr, quad_tag, dd, alpha, grad_u): # noqa: D102
alpha_int = discr.project("vol", dd, alpha)
alpha_tpair = TracePair(dd, interior=alpha_int, exterior=alpha_int)
grad_u_int = discr.project("vol", dd, grad_u)
grad_u_tpair = TracePair(dd, interior=grad_u_int, exterior=grad_u_int)
return diffusion_flux(discr, quad_tag, alpha_tpair, grad_u_tpair)
class NeumannDiffusionBoundary(DiffusionBoundary):
r"""
Neumann boundary condition for the diffusion operator.
For the boundary condition $(\nabla u \cdot \mathbf{\hat{n}})|_\Gamma = g$, uses
external data
.. math::
u^+ = u^-
when computing the boundary fluxes for $\nabla u$, and uses
.. math::
(-\alpha \nabla u\cdot\mathbf{\hat{n}})|_\Gamma &=
-\alpha^- (\nabla u\cdot\mathbf{\hat{n}})|_\Gamma
&= -\alpha^- g
when computing the boundary fluxes for $\nabla \cdot (\alpha \nabla u)$.
.. automethod:: __init__
"""
def __init__(self, value):
"""
Initialize the boundary condition.
Parameters
----------
value: float or meshmode.dof_array.DOFArray
the value(s) of $g$ along the boundary
"""
self.value = value
def get_gradient_flux(self, discr, quad_tag, dd, alpha, u): # noqa: D102
u_int = discr.project("vol", dd, u)
u_tpair = TracePair(dd, interior=u_int, exterior=u_int)
return gradient_flux(discr, quad_tag, u_tpair)
def get_diffusion_flux(self, discr, quad_tag, dd, alpha, grad_u): # noqa: D102
dd_quad = dd.with_discr_tag(quad_tag)
dd_allfaces_quad = dd_quad.with_dtag("all_faces")
# Compute the flux directly instead of constructing an external grad_u value
# (and the associated TracePair); this approach is simpler in the
# spatially-varying alpha case (the other approach would result in a
# grad_u_tpair that lives in the quadrature discretization; diffusion_flux
# would need to be modified to accept such values).
alpha_int_quad = discr.project("vol", dd_quad, alpha)
value_quad = discr.project(dd, dd_quad, self.value)
flux_quad = -alpha_int_quad*value_quad
return discr.project(dd_quad, dd_allfaces_quad, flux_quad)
def diffusion_operator(discr, quad_tag, alpha, boundaries, u, return_grad_u=False):
r"""
Compute the diffusion operator.
The diffusion operator is defined as
$\nabla\cdot(\alpha\nabla u)$, where $\alpha$ is the diffusivity and
$u$ is a scalar field.
Uses unstabilized central numerical fluxes.
Parameters
----------
discr: grudge.eager.EagerDGDiscretization
the discretization to use
quad_tag:
quadrature tag indicating which discretization in *discr* to use for
overintegration
alpha: numbers.Number or meshmode.dof_array.DOFArray
the diffusivity value(s)
boundaries:
dictionary (or list of dictionaries) mapping boundary tags to
:class:`DiffusionBoundary` instances
u: meshmode.dof_array.DOFArray or numpy.ndarray
the DOF array (or object array of DOF arrays) to which the operator should be
applied
return_grad_u: bool
an optional flag indicating whether $\nabla u$ should also be returned
Returns
-------
diff_u: meshmode.dof_array.DOFArray or numpy.ndarray
the diffusion operator applied to *u*
grad_u: numpy.ndarray
the gradient of *u*; only returned if *return_grad_u* is True
"""
if isinstance(u, np.ndarray):
if not isinstance(boundaries, list):
raise TypeError("boundaries must be a list if u is an object array")
if len(boundaries) != len(u):
raise TypeError("boundaries must be the same length as u")
return obj_array_vectorize_n_args(lambda boundaries_i, u_i:
diffusion_operator(discr, quad_tag, alpha, boundaries_i, u_i,
return_grad_u=return_grad_u), make_obj_array(boundaries), u)
for btag, bdry in boundaries.items():
if not isinstance(bdry, DiffusionBoundary):
raise TypeError(f"Unrecognized boundary type for tag {btag}. "
"Must be an instance of DiffusionBoundary.")
dd_quad = DOFDesc("vol", quad_tag)
dd_allfaces_quad = DOFDesc("all_faces", quad_tag)
grad_u = discr.inverse_mass(
discr.weak_grad(-u)
- # noqa: W504
discr.face_mass(
dd_allfaces_quad,
gradient_flux(discr, quad_tag, interior_trace_pair(discr, u))
+ sum(
bdry.get_gradient_flux(discr, quad_tag, as_dofdesc(btag), alpha, u)
for btag, bdry in boundaries.items())
+ sum(
gradient_flux(discr, quad_tag, u_tpair)
for u_tpair in cross_rank_trace_pairs(discr, u))
)
)
alpha_quad = discr.project("vol", dd_quad, alpha)
grad_u_quad = discr.project("vol", dd_quad, grad_u)
diff_u = discr.inverse_mass(
discr.weak_div(dd_quad, -alpha_quad*grad_u_quad)
- # noqa: W504
discr.face_mass(
dd_allfaces_quad,
diffusion_flux(discr, quad_tag, interior_trace_pair(discr, alpha),
interior_trace_pair(discr, grad_u))
+ sum(
bdry.get_diffusion_flux(discr, quad_tag, as_dofdesc(btag), alpha,
grad_u) for btag, bdry in boundaries.items())
+ sum(
diffusion_flux(discr, quad_tag, alpha_tpair, grad_u_tpair)
for alpha_tpair, grad_u_tpair in zip(
cross_rank_trace_pairs(discr, alpha),
cross_rank_trace_pairs(discr, grad_u)))
)
)
if return_grad_u:
return diff_u, grad_u
else:
return diff_u
| [
"grudge.dof_desc.as_dofdesc",
"grudge.eager.cross_rank_trace_pairs",
"grudge.dof_desc.DOFDesc",
"pytools.obj_array.make_obj_array",
"grudge.eager.interior_trace_pair",
"grudge.trace_pair.TracePair",
"numpy.dot"
] | [((9047, 9071), 'grudge.dof_desc.DOFDesc', 'DOFDesc', (['"""vol"""', 'quad_tag'], {}), "('vol', quad_tag)\n", (9054, 9071), False, 'from grudge.dof_desc import DOFDesc, as_dofdesc\n'), ((9095, 9125), 'grudge.dof_desc.DOFDesc', 'DOFDesc', (['"""all_faces"""', 'quad_tag'], {}), "('all_faces', quad_tag)\n", (9102, 9125), False, 'from grudge.dof_desc import DOFDesc, as_dofdesc\n'), ((4562, 4624), 'grudge.trace_pair.TracePair', 'TracePair', (['dd'], {'interior': 'u_int', 'exterior': '(2 * self.value - u_int)'}), '(dd, interior=u_int, exterior=2 * self.value - u_int)\n', (4571, 4624), False, 'from grudge.trace_pair import TracePair\n'), ((4835, 4888), 'grudge.trace_pair.TracePair', 'TracePair', (['dd'], {'interior': 'alpha_int', 'exterior': 'alpha_int'}), '(dd, interior=alpha_int, exterior=alpha_int)\n', (4844, 4888), False, 'from grudge.trace_pair import TracePair\n'), ((4966, 5021), 'grudge.trace_pair.TracePair', 'TracePair', (['dd'], {'interior': 'grad_u_int', 'exterior': 'grad_u_int'}), '(dd, interior=grad_u_int, exterior=grad_u_int)\n', (4975, 5021), False, 'from grudge.trace_pair import TracePair\n'), ((6154, 6199), 'grudge.trace_pair.TracePair', 'TracePair', (['dd'], {'interior': 'u_int', 'exterior': 'u_int'}), '(dd, interior=u_int, exterior=u_int)\n', (6163, 6199), False, 'from grudge.trace_pair import TracePair\n'), ((2821, 2843), 'numpy.dot', 'np.dot', (['grad_u', 'normal'], {}), '(grad_u, normal)\n', (2827, 2843), True, 'import numpy as np\n'), ((8770, 8796), 'pytools.obj_array.make_obj_array', 'make_obj_array', (['boundaries'], {}), '(boundaries)\n', (8784, 8796), False, 'from pytools.obj_array import make_obj_array, obj_array_vectorize_n_args\n'), ((9310, 9339), 'grudge.eager.interior_trace_pair', 'interior_trace_pair', (['discr', 'u'], {}), '(discr, u)\n', (9329, 9339), False, 'from grudge.eager import interior_trace_pair, cross_rank_trace_pairs\n'), ((9987, 10020), 'grudge.eager.interior_trace_pair', 'interior_trace_pair', (['discr', 'alpha'], {}), '(discr, alpha)\n', (10006, 10020), False, 'from grudge.eager import interior_trace_pair, cross_rank_trace_pairs\n'), ((10038, 10072), 'grudge.eager.interior_trace_pair', 'interior_trace_pair', (['discr', 'grad_u'], {}), '(discr, grad_u)\n', (10057, 10072), False, 'from grudge.eager import interior_trace_pair, cross_rank_trace_pairs\n'), ((9604, 9636), 'grudge.eager.cross_rank_trace_pairs', 'cross_rank_trace_pairs', (['discr', 'u'], {}), '(discr, u)\n', (9626, 9636), False, 'from grudge.eager import interior_trace_pair, cross_rank_trace_pairs\n'), ((9416, 9432), 'grudge.dof_desc.as_dofdesc', 'as_dofdesc', (['btag'], {}), '(btag)\n', (9426, 9432), False, 'from grudge.dof_desc import DOFDesc, as_dofdesc\n'), ((10150, 10166), 'grudge.dof_desc.as_dofdesc', 'as_dofdesc', (['btag'], {}), '(btag)\n', (10160, 10166), False, 'from grudge.dof_desc import DOFDesc, as_dofdesc\n'), ((10409, 10445), 'grudge.eager.cross_rank_trace_pairs', 'cross_rank_trace_pairs', (['discr', 'alpha'], {}), '(discr, alpha)\n', (10431, 10445), False, 'from grudge.eager import interior_trace_pair, cross_rank_trace_pairs\n'), ((10467, 10504), 'grudge.eager.cross_rank_trace_pairs', 'cross_rank_trace_pairs', (['discr', 'grad_u'], {}), '(discr, grad_u)\n', (10489, 10504), False, 'from grudge.eager import interior_trace_pair, cross_rank_trace_pairs\n')] |
# test_eng.py
# Copyright (c) 2013-2016 <NAME>
# See LICENSE for details
# pylint: disable=C0103,C0111,C0302,E0611,R0913,R0915,W0108,W0212
# Standard library imports
import functools
import sys
import pytest
from numpy import array, ndarray
# Putil imports
import putil.eng
from putil.test import AE, AI, CS
###
# Global variables
###
DFLT = 'def'
PY2 = bool(sys.hexversion < 0x03000000)
###
# Helper functions
###
isdflt = lambda obj: bool(obj == DFLT)
h = lambda num: '100.'+('0'*num)
o = lambda num: '1.'+('0'*num)
pv = lambda py2arg, py3arg: py2arg if PY2 else py3arg
sarg = lambda msg: 'Argument `{0}` is not valid'.format(msg)
t = lambda num: '10.'+('0'*num)
def to_sci_string(number):
"""
Returns a string with the number formatted in scientific notation. This
function does not have all the configurability of the public function
to_scientific_string, it is a convenience function to test _to_eng_tuple
"""
mant, exp = putil.eng._to_eng_tuple(number)
return '{mant}E{exp_sign}{exp}'.format(
mant=mant, exp_sign='-' if exp < 0 else '+', exp=abs(exp)
)
###
# Test functions
###
@pytest.mark.parametrize(
'text, sep, num, lstrip, rstrip, ref', [
('a, b, c, d', ',', 1, DFLT, DFLT, ('a', ' b', ' c', ' d')),
('a , b , c , d ', ',', 1, DFLT, DFLT, ('a ', ' b ', ' c ', ' d ')),
('a , b , c , d ', ',', 1, True, DFLT, ('a ', 'b ', 'c ', 'd ')),
('a , b , c , d ', ',', 1, DFLT, True, ('a', ' b', ' c', ' d')),
('a , b , c , d ', ',', 1, True, True, ('a', 'b', 'c', 'd')),
('a, b, c, d', ',', 2, DFLT, DFLT, ('a, b', ' c, d')),
('a, b, c, d', ',', 3, DFLT, DFLT, ('a, b, c', ' d')),
('a, b, c, d', ',', 4, DFLT, DFLT, ('a, b, c, d',)),
('a, b, c, d', ',', 5, DFLT, DFLT, ('a, b, c, d',)),
]
)
def test_split_every(text, sep, num, lstrip, rstrip, ref):
""" Test _split_every function behavior """
# DFLT in lstrip or rstrip means default argument values should be used
obj = putil.eng._split_every
obj = obj if isdflt(lstrip) else functools.partial(obj, lstrip=lstrip)
obj = obj if isdflt(rstrip) else functools.partial(obj, rstrip=rstrip)
assert obj(text, sep, num) == ref
@pytest.mark.parametrize(
'num, ref', [
(0.000000000000000000000001001234567890, '1.00123456789E-24'),
(0.000000000000000000000001, '1E-24'),
(0.00000000000000000000001001234567890, '10.0123456789E-24'),
(0.00000000000000000000001, '10E-24'),
(0.0000000000000000000001001234567890, '100.123456789E-24'),
(0.0000000000000000000001, '100E-24'),
(0.000000000000000000001001234567890, '1.00123456789E-21'),
(0.000000000000000000001, '1E-21'),
(0.00000000000000000001001234567890, '10.0123456789E-21'),
(0.00000000000000000001, '10E-21'),
(0.0000000000000000001001234567890, '100.123456789E-21'),
(0.0000000000000000001, '100E-21'),
(0.000000000000000001001234567890, '1.00123456789E-18'),
(0.000000000000000001, '1E-18'),
(0.00000000000000001001234567890, '10.0123456789E-18'),
(0.00000000000000001, '10E-18'),
(0.0000000000000001001234567890, '100.123456789E-18'),
(0.0000000000000001, '100E-18'),
(0.000000000000001001234567890, '1.00123456789E-15'),
(0.000000000000001, '1E-15'),
(0.00000000000001001234567890, '10.0123456789E-15'),
(0.00000000000001, '10E-15'),
(0.0000000000001001234567890, '100.123456789E-15'),
(0.0000000000001, '100E-15'),
(0.000000000001001234567890, '1.00123456789E-12'),
(0.000000000001, '1E-12'),
(0.00000000001001234567890, '10.0123456789E-12'),
(0.00000000001, '10E-12'),
(0.0000000001001234567890, '100.123456789E-12'),
(0.0000000001, '100E-12'),
(0.000000001001234567890, '1.00123456789E-9'),
(0.000000001, '1E-9'),
(0.00000001001234567890, '10.0123456789E-9'),
(0.00000001, '10E-9'),
(0.0000001001234567890, '100.123456789E-9'),
(0.0000001, '100E-9'),
(0.000001001234567890, '1.00123456789E-6'),
(0.000001, '1E-6'),
(0.00001001234567890, '10.0123456789E-6'),
(0.00001, '10E-6'),
(0.0001001234567890, '100.123456789E-6'),
(0.0001, '100E-6'),
(0.001001234567890, '1.00123456789E-3'),
(0.001, '1E-3'),
(0.01001234567890, '10.0123456789E-3'),
(0.01, '10E-3'),
(0.1001234567890, '100.123456789E-3'),
(0.1, '100E-3'),
(0, '0E+0'),
(1, '1E+0'),
(1.1234567890, '1.123456789E+0'),
(10, '10E+0'),
(10.1234567890, '10.123456789E+0'),
(100, '100E+0'),
(100.1234567890, '100.123456789E+0'),
(1000, '1E+3'),
(1000.1234567890, pv('1.00012345679E+3', '1.000123456789E+3')),
(10000, '10E+3'),
(10000.1234567890, pv('10.0001234568E+3', '10.000123456789E+3')),
(100000, '100E+3'),
(100000.1234567890, pv('100.000123457E+3', '100.000123456789E+3')),
(1000000, '1E+6'),
(1000000.1234567890, pv('1.00000012346E+6', '1.000000123456789E+6')),
(10000000, '10E+6'),
(10000000.1234567890, pv('10.0000001235E+6', '10.00000012345679E+6')),
(100000000, '100E+6'),
(100000000.1234567890, pv('100.000000123E+6', '100.00000012345679E+6')),
(1000000000, '1E+9'),
(1000000000.1234567890, pv('1.00000000012E+9', '1.0000000001234568E+9')),
(10000000000, '10E+9'),
(10000000000.1234567890, pv(t(9)+'1E+9', '10.000000000123457E+9')),
(100000000000, '100E+9'),
(100000000000.1234567890, pv('100E+9', '100.00000000012346E+9')),
(1000000000000, '1E+12'),
(1000000000000.1234567890, pv('1E+12', '1.0000000000001234E+12')),
(10000000000000, '10E+12'),
(10000000000000.1234567890, pv('10E+12', '10.000000000000123E+12')),
(100000000000000, '100E+12'),
(100000000000000.1234567890, pv('100E+12', '100.00000000000012E+12')),
(1000000000000000, '1E+15'),
(1000000000000000.1234567890, pv('1E+15', '1.0000000000000001E+15')),
(10000000000000000, '10E+15'),
(10000000000000000.1234567890, '10E+15'),
(100000000000000000, '100E+15'),
(100000000000000000.1234567890, '100E+15'),
(1000000000000000000, '1E+18'),
(1000000000000000000.1234567890, '1E+18'),
(10000000000000000000, '10E+18'),
(10000000000000000000.1234567890, '10E+18'),
(100000000000000000000, '100E+18'),
(100000000000000000000.1234567890, '100E+18'),
(1000000000000000000000, '1E+21'),
(1000000000000000000000.1234567890, '1E+21'),
(10000000000000000000000, '10E+21'),
(10000000000000000000000.1234567890, '10E+21'),
(100000000000000000000000, '100E+21'),
(100000000000000000000000.1234567890, pv('100E+21', h(13)+'1E+21')),
(1000000000000000000000000, '1E+24'),
(1000000000000000000000000.1234567890, '1E+24'),
(10000000000000000000000000, '10E+24'),
(10000000000000000000000000.1234567890, '10E+24'),
(100000000000000000000000000, '100E+24'),
(100000000000000000000000000.1234567890, '100E+24'),
(-0.000000000000000000000001001234567890, '-1.00123456789E-24'),
(-0.000000000000000000000001, '-1E-24'),
(-0.00000000000000000000001001234567890, '-10.0123456789E-24'),
(-0.00000000000000000000001, '-10E-24'),
(-0.0000000000000000000001001234567890, '-100.123456789E-24'),
(-0.0000000000000000000001, '-100E-24'),
(-0.000000000000000000001001234567890, '-1.00123456789E-21'),
(-0.000000000000000000001, '-1E-21'),
(-0.00000000000000000001001234567890, '-10.0123456789E-21'),
(-0.00000000000000000001, '-10E-21'),
(-0.0000000000000000001001234567890, '-100.123456789E-21'),
(-0.0000000000000000001, '-100E-21'),
(-0.000000000000000001001234567890, '-1.00123456789E-18'),
(-0.000000000000000001, '-1E-18'),
(-0.00000000000000001001234567890, '-10.0123456789E-18'),
(-0.00000000000000001, '-10E-18'),
(-0.0000000000000001001234567890, '-100.123456789E-18'),
(-0.0000000000000001, '-100E-18'),
(-0.000000000000001001234567890, '-1.00123456789E-15'),
(-0.000000000000001, '-1E-15'),
(-0.00000000000001001234567890, '-10.0123456789E-15'),
(-0.00000000000001, '-10E-15'),
(-0.0000000000001001234567890, '-100.123456789E-15'),
(-0.0000000000001, '-100E-15'),
(-0.000000000001001234567890, '-1.00123456789E-12'),
(-0.000000000001, '-1E-12'),
(-0.00000000001001234567890, '-10.0123456789E-12'),
(-0.00000000001, '-10E-12'),
(-0.0000000001001234567890, '-100.123456789E-12'),
(-0.0000000001, '-100E-12'),
(-0.000000001001234567890, '-1.00123456789E-9'),
(-0.000000001, '-1E-9'),
(-0.00000001001234567890, '-10.0123456789E-9'),
(-0.00000001, '-10E-9'),
(-0.0000001001234567890, '-100.123456789E-9'),
(-0.0000001, '-100E-9'),
(-0.000001001234567890, '-1.00123456789E-6'),
(-0.000001, '-1E-6'),
(-0.00001001234567890, '-10.0123456789E-6'),
(-0.00001, '-10E-6'),
(-0.0001001234567890, '-100.123456789E-6'),
(-0.0001, '-100E-6'),
(-0.001001234567890, '-1.00123456789E-3'),
(-0.001, '-1E-3'),
(-0.01001234567890, '-10.0123456789E-3'),
(-0.01, '-10E-3'),
(-0.1001234567890, '-100.123456789E-3'),
(-0.1, '-100E-3'),
(-1, '-1E+0'),
(-1.1234567890, '-1.123456789E+0'),
(-10, '-10E+0'),
(-10.1234567890, '-10.123456789E+0'),
(-100, '-100E+0'),
(-100.1234567890, '-100.123456789E+0'),
(-1000, '-1E+3'),
(-1000.1234567890, pv('-1.00012345679E+3', '-1.000123456789E+3')),
(-10000, '-10E+3'),
(-10000.1234567890, pv('-10.0001234568E+3', '-10.000123456789E+3')),
(-100000, '-100E+3'),
(-100000.1234567890, pv('-100.000123457E+3', '-100.000123456789E+3')),
(-1000000, '-1E+6'),
(-1000000.1234567890, pv('-1.00000012346E+6', '-1.000000123456789E+6')),
(-10000000, '-10E+6'),
(-10000000.1234567890, pv('-10.0000001235E+6', '-10.00000012345679E+6')),
(-100000000, '-100E+6'),
(-100000000.1234567890, pv('-'+h(6)+'123E+6', '-100.00000012345679E+6')),
(-1000000000, '-1E+9'),
(-1000000000.1234567890, pv('-'+o(9)+'12E+9', '-1.0000000001234568E+9')),
(-10000000000, '-10E+9'),
(-10000000000.1234567890, pv('-'+t(9)+'1E+9', '-'+t(9)+'123457E+9')),
(-100000000000, '-100E+9'),
(-100000000000.1234567890, pv('-100E+9', '-100.00000000012346E+9')),
(-1000000000000, '-1E+12'),
(-1000000000000.1234567890, pv('-1E+12', '-1.0000000000001234E+12')),
(-10000000000000, '-10E+12'),
(-10000000000000.1234567890, pv('-10E+12', '-10.000000000000123E+12')),
(-100000000000000, '-100E+12'),
(-100000000000000.1234567890, pv('-100E+12', '-100.00000000000012E+12')),
(-1000000000000000, '-1E+15'),
(-1000000000000000.1234567890, pv('-1E+15', '-1.0000000000000001E+15')),
(-10000000000000000, '-10E+15'),
(-10000000000000000.1234567890, '-10E+15'),
(-100000000000000000, '-100E+15'),
(-100000000000000000.1234567890, '-100E+15'),
(-1000000000000000000, '-1E+18'),
(-1000000000000000000.1234567890, '-1E+18'),
(-10000000000000000000, '-10E+18'),
(-10000000000000000000.1234567890, '-10E+18'),
(-100000000000000000000, '-100E+18'),
(-100000000000000000000.1234567890, '-100E+18'),
(-1000000000000000000000, '-1E+21'),
(-1000000000000000000000.1234567890, '-1E+21'),
(-10000000000000000000000, '-10E+21'),
(-10000000000000000000000.1234567890, '-10E+21'),
(-100000000000000000000000, '-100E+21'),
(-100000000000000000000000.1234567890, pv('-100E+21', '-'+h(13)+'1E+21')),
(-1000000000000000000000000, '-1E+24'),
(-1000000000000000000000000.1234567890, '-1E+24'),
(-10000000000000000000000000, '-10E+24'),
(-10000000000000000000000000.1234567890, '-10E+24'),
(-100000000000000000000000000, '-100E+24'),
(-100000000000000000000000000.1234567890, '-100E+24'),
('100000.1234567890', '100.000123456789E+3'),
('-100000.1234567890', '-100.000123456789E+3'),
]
)
def test_to_sci_string(num, ref):
""" Test _to_eng_string function behavior """
assert to_sci_string(num) == ref
@pytest.mark.parametrize(
'num, ref', [
(0, '0'),
(0.0, '0.0'),
(4, '4'),
(4.0, '4.0'),
(45, '45'),
(450, '450'),
(1234567, '1234567'),
(4.5, '4.5'),
(4.1234, '4.1234'),
(4123.4E4, '41234000'),
(0.1, '0.1'),
(1.43E-2, '0.0143'),
(100000000.0, '100000000.0'),
(1000000, '1000000'),
(1e3, '1000.0'),
]
)
def test_no_exp(num, ref):
""" Test no_exp function behavior """
assert putil.eng.no_exp(num) == ref
@pytest.mark.eng
def test_no_ex_exceptions():
""" Test no_exp function exceptions """
AI(putil.eng.no_exp, 'number', number='a')
@pytest.mark.eng
@pytest.mark.parametrize(
'args, name', [
(dict(number=['5'], frac_length=3, rjust=True), 'number'),
(dict(number=5, frac_length=3.5, rjust=True), 'frac_length'),
(dict(number=5, frac_length=-2, rjust=True), 'frac_length'),
(dict(number=5, frac_length=3, rjust='a'), 'rjust')
]
)
def test_peng_exceptions(args, name):
""" Test peng function exceptions """
AI(putil.eng.peng, name, **args)
@pytest.mark.parametrize(
'num, mant, rjust, ref', [
(3.0333333333, 1, False, '3.0'),
(0, 3, True, ' 0.000 '),
(0, 3, False, '0.000'),
(125.5, 0, False, '126'),
(1e-25, 3, True, ' 1.000y'),
(1e-24, 3, True, ' 1.000y'),
(1e-23, 3, True, ' 10.000y'),
(1e-22, 3, True, ' 100.000y'),
(1e-21, 3, True, ' 1.000z'),
(1e-20, 3, True, ' 10.000z'),
(1e-19, 3, True, ' 100.000z'),
(1e-18, 3, True, ' 1.000a'),
(1e-17, 3, True, ' 10.000a'),
(1e-16, 3, True, ' 100.000a'),
(1e-15, 3, True, ' 1.000f'),
(1e-14, 3, True, ' 10.000f'),
(1e-13, 3, True, ' 100.000f'),
(1e-12, 3, True, ' 1.000p'),
(1e-11, 3, True, ' 10.000p'),
(1e-10, 3, True, ' 100.000p'),
(1e-9, 3, True, ' 1.000n'),
(1e-8, 3, True, ' 10.000n'),
(1e-7, 3, True, ' 100.000n'),
(1e-6, 3, True, ' 1.000u'),
(1e-5, 3, True, ' 10.000u'),
(1e-4, 3, True, ' 100.000u'),
(1e-3, 3, True, ' 1.000m'),
(1e-2, 3, True, ' 10.000m'),
(1e-1, 3, True, ' 100.000m'),
(1e-0, 3, True, ' 1.000 '),
(1e+1, 3, True, ' 10.000 '),
(1e+2, 3, True, ' 100.000 '),
(1e+3, 3, True, ' 1.000k'),
(1e+4, 3, True, ' 10.000k'),
(1e+5, 3, True, ' 100.000k'),
(1e+6, 3, True, ' 1.000M'),
(1e+7, 3, True, ' 10.000M'),
(1e+8, 3, True, ' 100.000M'),
(1e+9, 3, True, ' 1.000G'),
(1e+10, 3, True, ' 10.000G'),
(1e+11, 3, True, ' 100.000G'),
(1e+12, 3, True, ' 1.000T'),
(1e+13, 3, True, ' 10.000T'),
(1e+14, 3, True, ' 100.000T'),
(1e+15, 3, True, ' 1.000P'),
(1e+16, 3, True, ' 10.000P'),
(1e+17, 3, True, ' 100.000P'),
(1e+18, 3, True, ' 1.000E'),
(1e+19, 3, True, ' 10.000E'),
(1e+20, 3, True, ' 100.000E'),
(1e+21, 3, True, ' 1.000Z'),
(1e+22, 3, True, ' 10.000Z'),
(1e+23, 3, True, ' 100.000Z'),
(1e+24, 3, True, ' 1.000Y'),
(1e+25, 3, True, ' 10.000Y'),
(1e+26, 3, True, ' 100.000Y'),
(1e+27, 3, True, ' 999.999Y'),
(12.45, 1, True, ' 12.5 '),
(998.999e3, 1, True, ' 999.0k'),
(998.999e3, 1, False, '999.0k'),
(999.999e3, 1, True, ' 1.0M'),
(999.999e3, 1, DFLT, ' 1.0M'),
(999.999e3, 1, False, '1.0M'),
(0.995, 0, False, '995m'),
(0.9999, 0, False, '1'),
(1.9999, 0, False, '2'),
(999.99, 0, False, '1k'),
(9.99, 1, False, '10.0'),
(5.25e3, 1, True, ' 5.3k'),
(1.05e3, 0, True, ' 1k'),
(-1e-25, 3, True, ' -1.000y'),
(-1e-24, 3, True, ' -1.000y'),
(-1e-23, 3, True, ' -10.000y'),
(-1e-22, 3, True, '-100.000y'),
(-1e-21, 3, True, ' -1.000z'),
(-1e-20, 3, True, ' -10.000z'),
(-1e-19, 3, True, '-100.000z'),
(-1e-18, 3, True, ' -1.000a'),
(-1e-17, 3, True, ' -10.000a'),
(-1e-16, 3, True, '-100.000a'),
(-1e-15, 3, True, ' -1.000f'),
(-1e-14, 3, True, ' -10.000f'),
(-1e-13, 3, True, '-100.000f'),
(-1e-12, 3, True, ' -1.000p'),
(-1e-11, 3, True, ' -10.000p'),
(-1e-10, 3, True, '-100.000p'),
(-1e-9, 3, True, ' -1.000n'),
(-1e-8, 3, True, ' -10.000n'),
(-1e-7, 3, True, '-100.000n'),
(-1e-6, 3, True, ' -1.000u'),
(-1e-5, 3, True, ' -10.000u'),
(-1e-4, 3, True, '-100.000u'),
(-1e-3, 3, True, ' -1.000m'),
(-1e-2, 3, True, ' -10.000m'),
(-1e-1, 3, True, '-100.000m'),
(-1e-0, 3, True, ' -1.000 '),
(-1e+1, 3, True, ' -10.000 '),
(-1e+2, 3, True, '-100.000 '),
(-1e+3, 3, True, ' -1.000k'),
(-1e+4, 3, True, ' -10.000k'),
(-1e+5, 3, True, '-100.000k'),
(-1e+6, 3, True, ' -1.000M'),
(-1e+7, 3, True, ' -10.000M'),
(-1e+8, 3, True, '-100.000M'),
(-1e+9, 3, True, ' -1.000G'),
(-1e+10, 3, True, ' -10.000G'),
(-1e+11, 3, True, '-100.000G'),
(-1e+12, 3, True, ' -1.000T'),
(-1e+13, 3, True, ' -10.000T'),
(-1e+14, 3, True, '-100.000T'),
(-1e+15, 3, True, ' -1.000P'),
(-1e+16, 3, True, ' -10.000P'),
(-1e+17, 3, True, '-100.000P'),
(-1e+18, 3, True, ' -1.000E'),
(-1e+19, 3, True, ' -10.000E'),
(-1e+20, 3, True, '-100.000E'),
(-1e+21, 3, True, ' -1.000Z'),
(-1e+22, 3, True, ' -10.000Z'),
(-1e+23, 3, True, '-100.000Z'),
(-1e+24, 3, True, ' -1.000Y'),
(-1e+25, 3, True, ' -10.000Y'),
(-1e+26, 3, True, '-100.000Y'),
(-1e+27, 3, True, '-999.999Y'),
(-12.45, 1, True, ' -12.5 '),
(-998.999e3, 1, True, '-999.0k'),
(-998.999e3, 1, False, '-999.0k'),
(-999.999e3, 1, True, ' -1.0M'),
(-999.999e3, 1, DFLT, ' -1.0M'),
(-999.999e3, 1, False, '-1.0M'),
(-0.995, 0, False, '-995m'),
(-0.9999, 0, False, '-1'),
(-1.9999, 0, False, '-2'),
(-999.99, 0, False, '-1k'),
(-9.99, 1, False, '-10.0'),
(-5.25e3, 1, True, ' -5.3k'),
(-1.05e3, 0, True, ' -1k')
]
)
def test_peng(num, mant, rjust, ref):
""" Test peng function behavior """
obj = putil.eng.peng
obj = obj if isdflt(rjust) else functools.partial(obj, rjust=rjust)
assert obj(num, mant) == ref
@pytest.mark.eng
@pytest.mark.parametrize('arg', [None, 5, '', ' 5x', 'a5M', '- - a5M'])
@pytest.mark.parametrize(
'func', [
putil.eng.peng_float,
putil.eng.peng_frac,
putil.eng.peng_int,
putil.eng.peng_mant,
putil.eng.peng_power,
putil.eng.peng_suffix,
]
)
def test_peng_snum_exceptions(func, arg):
"""
Test exceptions of functions that receive a string representing
a number in engineering notation
"""
AI(func, 'snum', **dict(snum=arg))
@pytest.mark.parametrize(
'arg, ref', [
(putil.eng.peng(5234.567, 3, True), 5.235e3),
(' 5.235k ', 5.235e3),
(' -5.235k ', -5.235e3),
]
)
def test_peng_float(arg, ref):
""" Test peng_float function behavior """
assert putil.eng.peng_float(arg) == ref
@pytest.mark.parametrize(
'arg, ref', [
(putil.eng.peng(5234.567, 6, True), 234567),
(putil.eng.peng(5234, 0, True), 0)
]
)
def test_peng_frac(arg, ref):
""" Test peng_frac function behavior """
assert putil.eng.peng_frac(arg) == ref
def test_peng_int():
""" Test peng_int function behavior """
assert putil.eng.peng_int(putil.eng.peng(5234.567, 6, True)) == 5
def test_peng_mant():
""" Test peng_mant function behavior """
assert putil.eng.peng_mant(putil.eng.peng(5234.567, 3, True)) == 5.235
def test_peng_power():
""" Test peng_power function behavior """
tup = putil.eng.peng_power(putil.eng.peng(1234.567, 3, True))
assert tup == ('k', 1000.0)
assert isinstance(tup[1], float)
@pytest.mark.parametrize(
'arg, ref', [
(putil.eng.peng(1, 3, True), ' '),
(putil.eng.peng(-10.5e-6, 3, False), 'u')
]
)
def test_peng_suffix(arg, ref):
""" Test peng_suffix function behavior """
assert putil.eng.peng_suffix(arg) == ref
@pytest.mark.eng
@pytest.mark.parametrize(
'args, extype, name', [
(dict(suffix='X', offset=-1), RuntimeError, 'suffix'),
(dict(suffix='M', offset='a'), RuntimeError, 'offset'),
(dict(suffix='M', offset=20), ValueError, 'offset'),
]
)
@pytest.mark.eng
def test_peng_suffix_math_exceptions(args, extype, name):
""" Test peng_suffix_math function exceptions """
AE(putil.eng.peng_suffix_math, extype, sarg(name), **args)
@pytest.mark.parametrize('args, ref', [((' ', 3), 'G'), (('u', -2), 'p')])
def test_peng_suffix_math(args, ref):
""" Test peng_suffix_math function behavior """
assert putil.eng.peng_suffix_math(*args) == ref
@pytest.mark.parametrize(
'num, frac_length, exp_length, sign_always, ref', [
('5.35E+3', DFLT, DFLT, DFLT, '5.35E+3'),
(0, DFLT, DFLT, DFLT, '0E+0'),
(0.1, DFLT, DFLT, DFLT, '1E-1'),
(0.01, DFLT, DFLT, DFLT, '1E-2'),
(0.001, DFLT, DFLT, DFLT, '1E-3'),
(0.00101, DFLT, DFLT, DFLT, '1.01E-3'),
(0.123456789012, DFLT, DFLT, DFLT, '1.23456789012E-1'),
(1234567.89012, DFLT, DFLT, DFLT, '1.23456789012E+6'),
(1, DFLT, DFLT, DFLT, '1E+0'),
(20, DFLT, DFLT, DFLT, '2E+1'),
(100, DFLT, DFLT, DFLT, '1E+2'),
(200, DFLT, DFLT, DFLT, '2E+2'),
(333, DFLT, DFLT, DFLT, '3.33E+2'),
(4567, DFLT, DFLT, DFLT, '4.567E+3'),
(4567.890, DFLT, DFLT, DFLT, '4.56789E+3'),
(500, 3, DFLT, DFLT, '5.000E+2'),
(4567.890, 8, DFLT, DFLT, '4.56789000E+3'),
(99.999, 1, DFLT, DFLT, '1.0E+2'),
(4567.890, DFLT, DFLT, True, '+4.56789E+3'),
(500, 3, DFLT, True, '+5.000E+2'),
(4567.890, 8, DFLT, True, '+4.56789000E+3'),
(99.999, 1, DFLT, True, '+1.0E+2'),
(500, 3, 2, True, '+5.000E+02'),
(4567.890, 8, 3, True, '+4.56789000E+003'),
(9999999999.999, 1, 1, True, '+1.0E+10'),
(-0.1, DFLT, DFLT, DFLT, '-1E-1'),
(-0.01, DFLT, DFLT, DFLT, '-1E-2'),
(-0.001, DFLT, DFLT, DFLT, '-1E-3'),
(-0.00101, DFLT, DFLT, DFLT, '-1.01E-3'),
(-0.123456789012, DFLT, DFLT, DFLT, '-1.23456789012E-1'),
(-1234567.89012, DFLT, DFLT, DFLT, '-1.23456789012E+6'),
(-1, DFLT, DFLT, DFLT, '-1E+0'),
(-20, DFLT, DFLT, DFLT, '-2E+1'),
(-100, DFLT, DFLT, DFLT, '-1E+2'),
(-200, DFLT, DFLT, DFLT, '-2E+2'),
(-333, DFLT, DFLT, DFLT, '-3.33E+2'),
(-4567, DFLT, DFLT, DFLT, '-4.567E+3'),
(-4567.890, DFLT, DFLT, DFLT, '-4.56789E+3'),
(-500, 3, DFLT, DFLT, '-5.000E+2'),
(-4567.890, 8, DFLT, DFLT, '-4.56789000E+3'),
(-99.999, 1, DFLT, DFLT, '-1.0E+2'),
(-4567.890, DFLT, DFLT, True, '-4.56789E+3'),
(-500, 3, DFLT, True, '-5.000E+2'),
(-4567.890, 8, DFLT, True, '-4.56789000E+3'),
(-99.999, 1, DFLT, True, '-1.0E+2'),
(-500, 3, 2, True, '-5.000E+02'),
(-4567.890, 8, 3, True, '-4.56789000E+003'),
(-9999999999.999, 1, 1, True, '-1.0E+10'),
]
)
def test_to_scientific_string(num, frac_length, exp_length, sign_always, ref):
""" Test _to_scientific function behavior """
fp = functools.partial
obj = putil.eng.to_scientific_string
obj = obj if isdflt(frac_length) else fp(obj, frac_length=frac_length)
obj = obj if isdflt(exp_length) else fp(obj, exp_length=exp_length)
obj = obj if isdflt(sign_always) else fp(obj, sign_always=sign_always)
assert obj(num) == ref
CVECTOR = [-1+2j, 3+4j, 5+6j, 7+8j, 9-10j, 11+12j, -13+14j, 15678-16j]
@pytest.mark.parametrize(
'vector, args, ref, header', [
(
None,
DFLT,
'None',
''
),
(
[1, 2, 3, 4, 5, 6, 7, 8],
DFLT,
'[ 1, 2, 3, 4, 5, 6, 7, 8 ]',
''
),
(
[1, 2, 3, 4, 5, 6, 7, 8],
dict(indent=20),
'[ 1, 2, 3, 4, 5, 6, 7, 8 ]',
''
),
(
[1, 2, 3, 4, 5, 6, 7, 8],
dict(indent=20),
'[ 1, 2, 3, 4, 5, 6, 7, 8 ]',
''
),
(
[1, 2, 3, 4, 5, 6, 7, 8],
dict(limit=True),
'[ 1, 2, 3, ..., 6, 7, 8 ]',
''
),
(
[1, 2, 3, 4, 5, 6, 7, 8],
dict(limit=True, indent=20),
'[ 1, 2, 3, ..., 6, 7, 8 ]',
''
),
# Float and integer item #ref = (
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 0.8],
dict(eng=True),
'[ 1.000m, 20.000u, 300.000M, 4.000p,'
' 5.250k, -6.000n, 700.000 , 800.000m ]',
''
),
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 0.8],
dict(eng=True, indent=20),
'[ 1.000m, 20.000u, 300.000M, 4.000p,'
' 5.250k, -6.000n, 700.000 , 800.000m ]',
''
),
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 0.8],
dict(limit=True, eng=True),
'[ 1.000m, 20.000u, 300.000M,'
' ...,'
' -6.000n, 700.000 , 800.000m ]',
''
),
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 0.8],
dict(limit=True, eng=True, indent=20),
'[ 1.000m, 20.000u, 300.000M,'
' ...,'
' -6.000n, 700.000 , 800.000m ]',
''
),
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 0.8],
dict(eng=True, frac_length=1),
'[ 1.0m, 20.0u, 300.0M, 4.0p,'
' 5.3k, -6.0n, 700.0 , 800.0m ]',
''
),
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 0.8],
dict(eng=True, frac_length=1, indent=20),
'[ 1.0m, 20.0u, 300.0M, 4.0p,'
' 5.3k, -6.0n, 700.0 , 800.0m ]',
''
),
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 0.8],
dict(limit=True, eng=True, frac_length=1),
'[ 1.0m, 20.0u, 300.0M, ..., -6.0n, 700.0 , 800.0m ]',
''
),
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 0.8],
dict(limit=True, indent=20, eng=True, frac_length=1),
'[ 1.0m, 20.0u, 300.0M, ..., -6.0n, 700.0 , 800.0m ]',
''
),
(
[1, 2, 3, 4, 5, 6, 7, 8],
dict(width=8),
#12345678
'[ 1, 2,\n'
' 3, 4,\n'
' 5, 6,\n'
' 7, 8 ]',
''
),
(
[1, 2, 3, 4, 5, 6, 7, 8],
dict(width=10),
'[ 1, 2, 3,\n'
' 4, 5, 6,\n'
' 7, 8 ]',
''
),
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 8, 9],
dict(width=20, eng=True, frac_length=0),
'[ 1m, 20u,\n'
' 300M, 4p,\n'
' 5k, -6n,\n'
' 700 , 8 ,\n'
' 9 ]',
''
),
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 0.8],
dict(width=30, eng=True, frac_length=1),
'[ 1.0m, 20.0u, 300.0M,\n'
' 4.0p, 5.3k, -6.0n,\n'
' 700.0 , 800.0m ]',
''
),
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 8, 9],
dict(width=20, eng=True, frac_length=0, limit=True),
'[ 1m,\n'
' 20u,\n'
' 300M,\n'
' ...\n'
' 700 ,\n'
' 8 ,\n'
' 9 ]',
''
),
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 8, 9],
dict(width=30, eng=True, frac_length=1, limit=True),
'[ 1.0m, 20.0u, 300.0M,\n'
' ...\n'
' 700.0 , 8.0 , 9.0 ]',
''
),
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 8, 9],
dict(width=30, eng=True, frac_length=1, limit=True, indent=8),
'Vector: [ 1.0m, 20.0u, 300.0M,\n'
' ...\n'
' 700.0 , 8.0 , 9.0 ]',
'Vector: '
),
(
[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 0.8],
dict(width=30, eng=True, frac_length=1, indent=8),
'Vector: [ 1.0m, 20.0u, 300.0M,\n'
' 4.0p, 5.3k, -6.0n,\n'
' 700.0 , 800.0m ]',
'Vector: '
),
(
[
1.23456789, 2.45678901, 3.45678901, 4.56789012,
5.67890123, 6.78901234, 7.89012345
],
dict(limit=True, width=80-22, indent=22),
'Independent variable: [ 1.23456789, 2.45678901, 3.45678901,\n'
' ...\n'
' 5.67890123, 6.78901234, 7.89012345 ]',
'Independent variable: '
),
(
[
1.23456789, 2.45678901, 3.45678901, 4.56789012,
5.67890123, 6.78901234, 7.89012345
],
dict(width=49, indent=17),
'Independent var: [ 1.23456789, 2.45678901, 3.45678901, '
'4.56789012,\n'
' 5.67890123, 6.78901234, 7.89012345 ]',
'Independent var: '
),
# Complex items
(
CVECTOR,
DFLT,
'[ -1+2j, 3+4j, 5+6j, 7+8j, 9-10j, 11+12j, -13+14j, 15678-16j ]',
''
),
(
CVECTOR,
dict(indent=20),
'[ -1+2j, 3+4j, 5+6j, 7+8j, 9-10j, 11+12j, -13+14j, 15678-16j ]',
''
),
(
CVECTOR,
dict(limit=True),
'[ -1+2j, 3+4j, 5+6j, ..., 11+12j, -13+14j, 15678-16j ]',
''
),
(
CVECTOR,
dict(limit=True, indent=20),
'[ -1+2j, 3+4j, 5+6j, ..., 11+12j, -13+14j, 15678-16j ]',
''
),
(
CVECTOR,
dict(eng=True),
'[ -1.000 + 2.000 j, 3.000 + 4.000 j,'
' 5.000 + 6.000 j,'
' 7.000 + 8.000 j, 9.000 - 10.000 j,'
' 11.000 + 12.000 j,'
' -13.000 + 14.000 j, 15.678k- 16.000 j ]',
''
),
(
CVECTOR,
dict(eng=True, indent=20),
'[ -1.000 + 2.000 j, 3.000 + 4.000 j,'
' 5.000 + 6.000 j,'
' 7.000 + 8.000 j, 9.000 - 10.000 j,'
' 11.000 + 12.000 j,'
' -13.000 + 14.000 j, 15.678k- 16.000 j ]',
''
),
(
CVECTOR,
dict(limit=True, eng=True),
'[ -1.000 + 2.000 j, 3.000 + 4.000 j,'
' 5.000 + 6.000 j,'
' ..., 11.000 + 12.000 j, -13.000 + 14.000 j,'
' 15.678k- 16.000 j ]',
''
),
(
CVECTOR,
dict(limit=True, eng=True, indent=20),
'[ -1.000 + 2.000 j, 3.000 + 4.000 j,'
' 5.000 + 6.000 j,'
' ..., 11.000 + 12.000 j, -13.000 + 14.000 j,'
' 15.678k- 16.000 j ]',
''
),
(
CVECTOR,
dict(eng=True, frac_length=1),
'[ -1.0 + 2.0 j, 3.0 + 4.0 j, 5.0 + 6.0 j,'
' 7.0 + 8.0 j, 9.0 - 10.0 j, 11.0 + 12.0 j,'
' -13.0 + 14.0 j, 15.7k- 16.0 j ]',
''
),
(
CVECTOR,
dict(eng=True, frac_length=1, indent=20),
'[ -1.0 + 2.0 j, 3.0 + 4.0 j, 5.0 + 6.0 j,'
' 7.0 + 8.0 j, 9.0 - 10.0 j, 11.0 + 12.0 j,'
' -13.0 + 14.0 j, 15.7k- 16.0 j ]',
''
),
(
CVECTOR,
dict(limit=True, eng=True, frac_length=1),
'[ -1.0 + 2.0 j, 3.0 + 4.0 j, 5.0 + 6.0 j,'
' ..., 11.0 + 12.0 j, -13.0 + 14.0 j, 15.7k- 16.0 j ]',
''
),
(
CVECTOR,
dict(limit=True, eng=True, frac_length=1, indent=20),
'[ -1.0 + 2.0 j, 3.0 + 4.0 j, 5.0 + 6.0 j,'
' ..., 11.0 + 12.0 j, -13.0 + 14.0 j, 15.7k- 16.0 j ]',
''
),
(
CVECTOR,
dict(width=22),
'[ -1+2j, 3+4j, 5+6j,\n'
' 7+8j, 9-10j, 11+12j,\n'
' -13+14j, 15678-16j ]',
''
),
(
CVECTOR,
dict(width=20),
'[ -1+2j, 3+4j, 5+6j,\n'
' 7+8j, 9-10j,\n'
' 11+12j, -13+14j,\n'
' 15678-16j ]',
''
),
(
CVECTOR,
dict(width=29, eng=True, frac_length=0),
'[ -1 + 2 j, 3 + 4 j,\n'
' 5 + 6 j, 7 + 8 j,\n'
' 9 - 10 j, 11 + 12 j,\n'
' -13 + 14 j, 16k- 16 j ]',
''
),
(
CVECTOR,
dict(width=37, eng=True, frac_length=1),
'[ -1.0 + 2.0 j, 3.0 + 4.0 j,\n'
' 5.0 + 6.0 j, 7.0 + 8.0 j,\n'
' 9.0 - 10.0 j, 11.0 + 12.0 j,\n'
' -13.0 + 14.0 j, 15.7k- 16.0 j ]',
''
),
(
CVECTOR,
dict(width=16, eng=True, frac_length=0),
'[ -1 + 2 j,\n'
' 3 + 4 j,\n'
' 5 + 6 j,\n'
' 7 + 8 j,\n'
' 9 - 10 j,\n'
' 11 + 12 j,\n'
' -13 + 14 j,\n'
' 16k- 16 j ]',
''
),
(
CVECTOR,
dict(width=16, eng=True, frac_length=0, limit=True),
'[ -1 + 2 j,\n'
' 3 + 4 j,\n'
' 5 + 6 j,\n'
' ...\n'
' 11 + 12 j,\n'
' -13 + 14 j,\n'
' 16k- 16 j ]',
''
),
(
CVECTOR,
dict(width=56, eng=True, frac_length=1, limit=True),
'[ -1.0 + 2.0 j, 3.0 + 4.0 j, 5.0 + 6.0 j,\n'
' ...\n'
' 11.0 + 12.0 j, -13.0 + 14.0 j, 15.7k- 16.0 j ]',
''
),
(
CVECTOR,
dict(width=64, eng=True, frac_length=1, limit=True, indent=8),
'Vector: [ -1.0 + 2.0 j, 3.0 + 4.0 j, 5.0 + 6.0 j,\n'
' ...\n'
' 11.0 + 12.0 j, -13.0 + 14.0 j, 15.7k- 16.0 j ]',
'Vector: '
),
(
CVECTOR,
dict(width=20, indent=8),
'Vector: [ -1+2j, 3+4j, 5+6j,\n'
' 7+8j, 9-10j,\n'
' 11+12j, -13+14j,\n'
' 15678-16j ]',
'Vector: '
),
(
CVECTOR,
dict(width=30, indent=8, limit=True),
'Vector: [ -1+2j, 3+4j, 5+6j,\n'
' ...\n'
' 11+12j, -13+14j, 15678-16j ]',
'Vector: '
),
(
CVECTOR,
dict(width=20, indent=8, limit=True),
'Vector: [ -1+2j,\n'
' 3+4j,\n'
' 5+6j,\n'
' ...\n'
' 11+12j,\n'
' -13+14j,\n'
' 15678-16j ]',
'Vector: '
),
(
array(
[
-0.10081675027325637-0.06910517142735251j,
0.018754229185649937+0.017142783560861786j,
0+18j
]
),
DFLT,
'[ -0.100816750273-0.0691051714274j, '
'0.0187542291856+0.0171427835609j, 18j ]',
''
),
(
array(
[
-0.10081675027325637-0.06910517142735251j,
0.018754229185649937+0.017142783560861786j,
0+18j
]
),
dict(width=60, limit=True, indent=20),
'Dependent variable: [ -0.100816750273-0.0691051714274j,\n'
' 0.0187542291856+0.0171427835609j, 18j ]',
'Dependent variable: '
),
(
array(
[
-0.10081675027325637-0.06910517142735251j,
0.018754229185649937+0.017142783560861786j,
0+18j,
0.118754229185649937+0.117142783560861786j,
0.218754229185649937+0.217142783560861786j,
0+28j,
10+2j,
]
),
dict(width=60),
'[ -0.100816750273-0.0691051714274j,\n'
' 0.0187542291856+0.0171427835609j, 18j,\n'
' 0.118754229186+0.117142783561j,\n'
' 0.218754229186+0.217142783561j, 28j, 10+2j ]',
''
),
(
array(
[
-0.10081675027325637-0.06910517142735251j,
0.018754229185649937+0.017142783560861786j,
0+18j,
0.118754229185649937+0.117142783560861786j,
0.218754229185649937+0.217142783560861786j,
0+28j,
10+2j,
]
),
dict(width=60, limit=True),
'[ -0.100816750273-0.0691051714274j,\n'
' 0.0187542291856+0.0171427835609j,\n'
' 18j,\n'
' ...\n'
' 0.218754229186+0.217142783561j,\n'
' 28j,\n'
' 10+2j ]',
''
),
]
)
def test_pprint_vector(vector, args, ref, header):
""" Test pprint_vector function behavior """
obj = putil.eng.pprint_vector
obj = obj if isdflt(args) else functools.partial(obj, **args)
CS(header+obj(vector), ref)
@pytest.mark.parametrize(
'args', [
dict(
vector=[1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 8, 9],
width=5, eng=True, frac_length=1, limit=True
),
dict(
vector=[-1+2j, 3, 5+6j, 7+8j, 9-10j, 11+12j, -13+14j, 15678-16j],
width=8, limit=True
)
]
)
@pytest.mark.eng
def test_pprint_vector_exceptions(args):
""" Test pprint_vector function exceptions """
msg = 'Argument `width` is too small'
AE(putil.eng.pprint_vector, ValueError, msg, **args)
@pytest.mark.parametrize(
'num, dec, ref', [
(None, DFLT, None),
(1.3333, 2, 1.33),
(1.5555E-12, 2, 1.56E-12),
(3, 2, 3),
(array([1.3333, 2.666666]), 2, array([1.33, 2.67])),
(array([1.3333E-12, 2.666666E-12]), 2, array([1.33E-12, 2.67E-12])),
(array([1, 3]), 2, array([1, 3])),
]
)
def test_round_mantissa(num, dec, ref):
""" Test round_mantissa function behavior """
obj = putil.eng.round_mantissa
obj = obj if isdflt(dec) else functools.partial(obj, decimals=dec)
test = obj(num) == ref
assert test.all() if isinstance(num, ndarray) else test
| [
"functools.partial",
"numpy.array",
"pytest.mark.parametrize",
"putil.test.AE",
"putil.test.AI"
] | [((1132, 1778), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""text, sep, num, lstrip, rstrip, ref"""', "[('a, b, c, d', ',', 1, DFLT, DFLT, ('a', ' b', ' c', ' d')), (\n 'a , b , c , d ', ',', 1, DFLT, DFLT, ('a ', ' b ', ' c ', ' d ')), (\n 'a , b , c , d ', ',', 1, True, DFLT, ('a ', 'b ', 'c ', 'd ')), (\n 'a , b , c , d ', ',', 1, DFLT, True, ('a', ' b', ' c', ' d')), (\n 'a , b , c , d ', ',', 1, True, True, ('a', 'b', 'c', 'd')), (\n 'a, b, c, d', ',', 2, DFLT, DFLT, ('a, b', ' c, d')), ('a, b, c, d',\n ',', 3, DFLT, DFLT, ('a, b, c', ' d')), ('a, b, c, d', ',', 4, DFLT,\n DFLT, ('a, b, c, d',)), ('a, b, c, d', ',', 5, DFLT, DFLT, ('a, b, c, d',))\n ]"], {}), "('text, sep, num, lstrip, rstrip, ref', [(\n 'a, b, c, d', ',', 1, DFLT, DFLT, ('a', ' b', ' c', ' d')), (\n 'a , b , c , d ', ',', 1, DFLT, DFLT, ('a ', ' b ', ' c ', ' d ')), (\n 'a , b , c , d ', ',', 1, True, DFLT, ('a ', 'b ', 'c ', 'd ')), (\n 'a , b , c , d ', ',', 1, DFLT, True, ('a', ' b', ' c', ' d')), (\n 'a , b , c , d ', ',', 1, True, True, ('a', 'b', 'c', 'd')), (\n 'a, b, c, d', ',', 2, DFLT, DFLT, ('a, b', ' c, d')), ('a, b, c, d',\n ',', 3, DFLT, DFLT, ('a, b, c', ' d')), ('a, b, c, d', ',', 4, DFLT,\n DFLT, ('a, b, c, d',)), ('a, b, c, d', ',', 5, DFLT, DFLT, (\n 'a, b, c, d',))])\n", (1155, 1778), False, 'import pytest\n'), ((11893, 12209), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""num, ref"""', "[(0, '0'), (0.0, '0.0'), (4, '4'), (4.0, '4.0'), (45, '45'), (450, '450'),\n (1234567, '1234567'), (4.5, '4.5'), (4.1234, '4.1234'), (41234000.0,\n '41234000'), (0.1, '0.1'), (0.0143, '0.0143'), (100000000.0,\n '100000000.0'), (1000000, '1000000'), (1000.0, '1000.0')]"], {}), "('num, ref', [(0, '0'), (0.0, '0.0'), (4, '4'), (4.0,\n '4.0'), (45, '45'), (450, '450'), (1234567, '1234567'), (4.5, '4.5'), (\n 4.1234, '4.1234'), (41234000.0, '41234000'), (0.1, '0.1'), (0.0143,\n '0.0143'), (100000000.0, '100000000.0'), (1000000, '1000000'), (1000.0,\n '1000.0')])\n", (11916, 12209), False, 'import pytest\n'), ((12969, 17688), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""num, mant, rjust, ref"""', "[(3.0333333333, 1, False, '3.0'), (0, 3, True, ' 0.000 '), (0, 3, False,\n '0.000'), (125.5, 0, False, '126'), (1e-25, 3, True, ' 1.000y'), (\n 1e-24, 3, True, ' 1.000y'), (1e-23, 3, True, ' 10.000y'), (1e-22, 3,\n True, ' 100.000y'), (1e-21, 3, True, ' 1.000z'), (1e-20, 3, True,\n ' 10.000z'), (1e-19, 3, True, ' 100.000z'), (1e-18, 3, True,\n ' 1.000a'), (1e-17, 3, True, ' 10.000a'), (1e-16, 3, True,\n ' 100.000a'), (1e-15, 3, True, ' 1.000f'), (1e-14, 3, True,\n ' 10.000f'), (1e-13, 3, True, ' 100.000f'), (1e-12, 3, True,\n ' 1.000p'), (1e-11, 3, True, ' 10.000p'), (1e-10, 3, True,\n ' 100.000p'), (1e-09, 3, True, ' 1.000n'), (1e-08, 3, True,\n ' 10.000n'), (1e-07, 3, True, ' 100.000n'), (1e-06, 3, True,\n ' 1.000u'), (1e-05, 3, True, ' 10.000u'), (0.0001, 3, True,\n ' 100.000u'), (0.001, 3, True, ' 1.000m'), (0.01, 3, True,\n ' 10.000m'), (0.1, 3, True, ' 100.000m'), (1.0, 3, True, ' 1.000 '),\n (10.0, 3, True, ' 10.000 '), (100.0, 3, True, ' 100.000 '), (1000.0, 3,\n True, ' 1.000k'), (10000.0, 3, True, ' 10.000k'), (100000.0, 3, True,\n ' 100.000k'), (1000000.0, 3, True, ' 1.000M'), (10000000.0, 3, True,\n ' 10.000M'), (100000000.0, 3, True, ' 100.000M'), (1000000000.0, 3, \n True, ' 1.000G'), (10000000000.0, 3, True, ' 10.000G'), (\n 100000000000.0, 3, True, ' 100.000G'), (1000000000000.0, 3, True,\n ' 1.000T'), (10000000000000.0, 3, True, ' 10.000T'), (\n 100000000000000.0, 3, True, ' 100.000T'), (1000000000000000.0, 3, True,\n ' 1.000P'), (1e+16, 3, True, ' 10.000P'), (1e+17, 3, True,\n ' 100.000P'), (1e+18, 3, True, ' 1.000E'), (1e+19, 3, True,\n ' 10.000E'), (1e+20, 3, True, ' 100.000E'), (1e+21, 3, True,\n ' 1.000Z'), (1e+22, 3, True, ' 10.000Z'), (1e+23, 3, True,\n ' 100.000Z'), (1e+24, 3, True, ' 1.000Y'), (1e+25, 3, True,\n ' 10.000Y'), (1e+26, 3, True, ' 100.000Y'), (1e+27, 3, True,\n ' 999.999Y'), (12.45, 1, True, ' 12.5 '), (998999.0, 1, True,\n ' 999.0k'), (998999.0, 1, False, '999.0k'), (999999.0, 1, True,\n ' 1.0M'), (999999.0, 1, DFLT, ' 1.0M'), (999999.0, 1, False, '1.0M'\n ), (0.995, 0, False, '995m'), (0.9999, 0, False, '1'), (1.9999, 0, \n False, '2'), (999.99, 0, False, '1k'), (9.99, 1, False, '10.0'), (\n 5250.0, 1, True, ' 5.3k'), (1050.0, 0, True, ' 1k'), (-1e-25, 3, \n True, ' -1.000y'), (-1e-24, 3, True, ' -1.000y'), (-1e-23, 3, True,\n ' -10.000y'), (-1e-22, 3, True, '-100.000y'), (-1e-21, 3, True,\n ' -1.000z'), (-1e-20, 3, True, ' -10.000z'), (-1e-19, 3, True,\n '-100.000z'), (-1e-18, 3, True, ' -1.000a'), (-1e-17, 3, True,\n ' -10.000a'), (-1e-16, 3, True, '-100.000a'), (-1e-15, 3, True,\n ' -1.000f'), (-1e-14, 3, True, ' -10.000f'), (-1e-13, 3, True,\n '-100.000f'), (-1e-12, 3, True, ' -1.000p'), (-1e-11, 3, True,\n ' -10.000p'), (-1e-10, 3, True, '-100.000p'), (-1e-09, 3, True,\n ' -1.000n'), (-1e-08, 3, True, ' -10.000n'), (-1e-07, 3, True,\n '-100.000n'), (-1e-06, 3, True, ' -1.000u'), (-1e-05, 3, True,\n ' -10.000u'), (-0.0001, 3, True, '-100.000u'), (-0.001, 3, True,\n ' -1.000m'), (-0.01, 3, True, ' -10.000m'), (-0.1, 3, True,\n '-100.000m'), (-1.0, 3, True, ' -1.000 '), (-10.0, 3, True,\n ' -10.000 '), (-100.0, 3, True, '-100.000 '), (-1000.0, 3, True,\n ' -1.000k'), (-10000.0, 3, True, ' -10.000k'), (-100000.0, 3, True,\n '-100.000k'), (-1000000.0, 3, True, ' -1.000M'), (-10000000.0, 3, True,\n ' -10.000M'), (-100000000.0, 3, True, '-100.000M'), (-1000000000.0, 3, \n True, ' -1.000G'), (-10000000000.0, 3, True, ' -10.000G'), (-\n 100000000000.0, 3, True, '-100.000G'), (-1000000000000.0, 3, True,\n ' -1.000T'), (-10000000000000.0, 3, True, ' -10.000T'), (-\n 100000000000000.0, 3, True, '-100.000T'), (-1000000000000000.0, 3, True,\n ' -1.000P'), (-1e+16, 3, True, ' -10.000P'), (-1e+17, 3, True,\n '-100.000P'), (-1e+18, 3, True, ' -1.000E'), (-1e+19, 3, True,\n ' -10.000E'), (-1e+20, 3, True, '-100.000E'), (-1e+21, 3, True,\n ' -1.000Z'), (-1e+22, 3, True, ' -10.000Z'), (-1e+23, 3, True,\n '-100.000Z'), (-1e+24, 3, True, ' -1.000Y'), (-1e+25, 3, True,\n ' -10.000Y'), (-1e+26, 3, True, '-100.000Y'), (-1e+27, 3, True,\n '-999.999Y'), (-12.45, 1, True, ' -12.5 '), (-998999.0, 1, True,\n '-999.0k'), (-998999.0, 1, False, '-999.0k'), (-999999.0, 1, True,\n ' -1.0M'), (-999999.0, 1, DFLT, ' -1.0M'), (-999999.0, 1, False,\n '-1.0M'), (-0.995, 0, False, '-995m'), (-0.9999, 0, False, '-1'), (-\n 1.9999, 0, False, '-2'), (-999.99, 0, False, '-1k'), (-9.99, 1, False,\n '-10.0'), (-5250.0, 1, True, ' -5.3k'), (-1050.0, 0, True, ' -1k')]"], {}), "('num, mant, rjust, ref', [(3.0333333333, 1, False,\n '3.0'), (0, 3, True, ' 0.000 '), (0, 3, False, '0.000'), (125.5, 0, \n False, '126'), (1e-25, 3, True, ' 1.000y'), (1e-24, 3, True,\n ' 1.000y'), (1e-23, 3, True, ' 10.000y'), (1e-22, 3, True,\n ' 100.000y'), (1e-21, 3, True, ' 1.000z'), (1e-20, 3, True,\n ' 10.000z'), (1e-19, 3, True, ' 100.000z'), (1e-18, 3, True,\n ' 1.000a'), (1e-17, 3, True, ' 10.000a'), (1e-16, 3, True,\n ' 100.000a'), (1e-15, 3, True, ' 1.000f'), (1e-14, 3, True,\n ' 10.000f'), (1e-13, 3, True, ' 100.000f'), (1e-12, 3, True,\n ' 1.000p'), (1e-11, 3, True, ' 10.000p'), (1e-10, 3, True,\n ' 100.000p'), (1e-09, 3, True, ' 1.000n'), (1e-08, 3, True,\n ' 10.000n'), (1e-07, 3, True, ' 100.000n'), (1e-06, 3, True,\n ' 1.000u'), (1e-05, 3, True, ' 10.000u'), (0.0001, 3, True,\n ' 100.000u'), (0.001, 3, True, ' 1.000m'), (0.01, 3, True,\n ' 10.000m'), (0.1, 3, True, ' 100.000m'), (1.0, 3, True, ' 1.000 '),\n (10.0, 3, True, ' 10.000 '), (100.0, 3, True, ' 100.000 '), (1000.0, 3,\n True, ' 1.000k'), (10000.0, 3, True, ' 10.000k'), (100000.0, 3, True,\n ' 100.000k'), (1000000.0, 3, True, ' 1.000M'), (10000000.0, 3, True,\n ' 10.000M'), (100000000.0, 3, True, ' 100.000M'), (1000000000.0, 3, \n True, ' 1.000G'), (10000000000.0, 3, True, ' 10.000G'), (\n 100000000000.0, 3, True, ' 100.000G'), (1000000000000.0, 3, True,\n ' 1.000T'), (10000000000000.0, 3, True, ' 10.000T'), (\n 100000000000000.0, 3, True, ' 100.000T'), (1000000000000000.0, 3, True,\n ' 1.000P'), (1e+16, 3, True, ' 10.000P'), (1e+17, 3, True,\n ' 100.000P'), (1e+18, 3, True, ' 1.000E'), (1e+19, 3, True,\n ' 10.000E'), (1e+20, 3, True, ' 100.000E'), (1e+21, 3, True,\n ' 1.000Z'), (1e+22, 3, True, ' 10.000Z'), (1e+23, 3, True,\n ' 100.000Z'), (1e+24, 3, True, ' 1.000Y'), (1e+25, 3, True,\n ' 10.000Y'), (1e+26, 3, True, ' 100.000Y'), (1e+27, 3, True,\n ' 999.999Y'), (12.45, 1, True, ' 12.5 '), (998999.0, 1, True,\n ' 999.0k'), (998999.0, 1, False, '999.0k'), (999999.0, 1, True,\n ' 1.0M'), (999999.0, 1, DFLT, ' 1.0M'), (999999.0, 1, False, '1.0M'\n ), (0.995, 0, False, '995m'), (0.9999, 0, False, '1'), (1.9999, 0, \n False, '2'), (999.99, 0, False, '1k'), (9.99, 1, False, '10.0'), (\n 5250.0, 1, True, ' 5.3k'), (1050.0, 0, True, ' 1k'), (-1e-25, 3, \n True, ' -1.000y'), (-1e-24, 3, True, ' -1.000y'), (-1e-23, 3, True,\n ' -10.000y'), (-1e-22, 3, True, '-100.000y'), (-1e-21, 3, True,\n ' -1.000z'), (-1e-20, 3, True, ' -10.000z'), (-1e-19, 3, True,\n '-100.000z'), (-1e-18, 3, True, ' -1.000a'), (-1e-17, 3, True,\n ' -10.000a'), (-1e-16, 3, True, '-100.000a'), (-1e-15, 3, True,\n ' -1.000f'), (-1e-14, 3, True, ' -10.000f'), (-1e-13, 3, True,\n '-100.000f'), (-1e-12, 3, True, ' -1.000p'), (-1e-11, 3, True,\n ' -10.000p'), (-1e-10, 3, True, '-100.000p'), (-1e-09, 3, True,\n ' -1.000n'), (-1e-08, 3, True, ' -10.000n'), (-1e-07, 3, True,\n '-100.000n'), (-1e-06, 3, True, ' -1.000u'), (-1e-05, 3, True,\n ' -10.000u'), (-0.0001, 3, True, '-100.000u'), (-0.001, 3, True,\n ' -1.000m'), (-0.01, 3, True, ' -10.000m'), (-0.1, 3, True,\n '-100.000m'), (-1.0, 3, True, ' -1.000 '), (-10.0, 3, True,\n ' -10.000 '), (-100.0, 3, True, '-100.000 '), (-1000.0, 3, True,\n ' -1.000k'), (-10000.0, 3, True, ' -10.000k'), (-100000.0, 3, True,\n '-100.000k'), (-1000000.0, 3, True, ' -1.000M'), (-10000000.0, 3, True,\n ' -10.000M'), (-100000000.0, 3, True, '-100.000M'), (-1000000000.0, 3, \n True, ' -1.000G'), (-10000000000.0, 3, True, ' -10.000G'), (-\n 100000000000.0, 3, True, '-100.000G'), (-1000000000000.0, 3, True,\n ' -1.000T'), (-10000000000000.0, 3, True, ' -10.000T'), (-\n 100000000000000.0, 3, True, '-100.000T'), (-1000000000000000.0, 3, True,\n ' -1.000P'), (-1e+16, 3, True, ' -10.000P'), (-1e+17, 3, True,\n '-100.000P'), (-1e+18, 3, True, ' -1.000E'), (-1e+19, 3, True,\n ' -10.000E'), (-1e+20, 3, True, '-100.000E'), (-1e+21, 3, True,\n ' -1.000Z'), (-1e+22, 3, True, ' -10.000Z'), (-1e+23, 3, True,\n '-100.000Z'), (-1e+24, 3, True, ' -1.000Y'), (-1e+25, 3, True,\n ' -10.000Y'), (-1e+26, 3, True, '-100.000Y'), (-1e+27, 3, True,\n '-999.999Y'), (-12.45, 1, True, ' -12.5 '), (-998999.0, 1, True,\n '-999.0k'), (-998999.0, 1, False, '-999.0k'), (-999999.0, 1, True,\n ' -1.0M'), (-999999.0, 1, DFLT, ' -1.0M'), (-999999.0, 1, False,\n '-1.0M'), (-0.995, 0, False, '-995m'), (-0.9999, 0, False, '-1'), (-\n 1.9999, 0, False, '-2'), (-999.99, 0, False, '-1k'), (-9.99, 1, False,\n '-10.0'), (-5250.0, 1, True, ' -5.3k'), (-1050.0, 0, True, ' -1k')])\n", (12992, 17688), False, 'import pytest\n'), ((18537, 18607), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""arg"""', "[None, 5, '', ' 5x', 'a5M', '- - a5M']"], {}), "('arg', [None, 5, '', ' 5x', 'a5M', '- - a5M'])\n", (18560, 18607), False, 'import pytest\n'), ((18609, 18780), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', '[putil.eng.peng_float, putil.eng.peng_frac, putil.eng.peng_int, putil.eng.\n peng_mant, putil.eng.peng_power, putil.eng.peng_suffix]'], {}), "('func', [putil.eng.peng_float, putil.eng.peng_frac,\n putil.eng.peng_int, putil.eng.peng_mant, putil.eng.peng_power, putil.\n eng.peng_suffix])\n", (18632, 18780), False, 'import pytest\n'), ((20831, 20904), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""args, ref"""', "[((' ', 3), 'G'), (('u', -2), 'p')]"], {}), "('args, ref', [((' ', 3), 'G'), (('u', -2), 'p')])\n", (20854, 20904), False, 'import pytest\n'), ((21050, 23136), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""num, frac_length, exp_length, sign_always, ref"""', "[('5.35E+3', DFLT, DFLT, DFLT, '5.35E+3'), (0, DFLT, DFLT, DFLT, '0E+0'), (\n 0.1, DFLT, DFLT, DFLT, '1E-1'), (0.01, DFLT, DFLT, DFLT, '1E-2'), (\n 0.001, DFLT, DFLT, DFLT, '1E-3'), (0.00101, DFLT, DFLT, DFLT, '1.01E-3'\n ), (0.123456789012, DFLT, DFLT, DFLT, '1.23456789012E-1'), (\n 1234567.89012, DFLT, DFLT, DFLT, '1.23456789012E+6'), (1, DFLT, DFLT,\n DFLT, '1E+0'), (20, DFLT, DFLT, DFLT, '2E+1'), (100, DFLT, DFLT, DFLT,\n '1E+2'), (200, DFLT, DFLT, DFLT, '2E+2'), (333, DFLT, DFLT, DFLT,\n '3.33E+2'), (4567, DFLT, DFLT, DFLT, '4.567E+3'), (4567.89, DFLT, DFLT,\n DFLT, '4.56789E+3'), (500, 3, DFLT, DFLT, '5.000E+2'), (4567.89, 8,\n DFLT, DFLT, '4.56789000E+3'), (99.999, 1, DFLT, DFLT, '1.0E+2'), (\n 4567.89, DFLT, DFLT, True, '+4.56789E+3'), (500, 3, DFLT, True,\n '+5.000E+2'), (4567.89, 8, DFLT, True, '+4.56789000E+3'), (99.999, 1,\n DFLT, True, '+1.0E+2'), (500, 3, 2, True, '+5.000E+02'), (4567.89, 8, 3,\n True, '+4.56789000E+003'), (9999999999.999, 1, 1, True, '+1.0E+10'), (-\n 0.1, DFLT, DFLT, DFLT, '-1E-1'), (-0.01, DFLT, DFLT, DFLT, '-1E-2'), (-\n 0.001, DFLT, DFLT, DFLT, '-1E-3'), (-0.00101, DFLT, DFLT, DFLT,\n '-1.01E-3'), (-0.123456789012, DFLT, DFLT, DFLT, '-1.23456789012E-1'),\n (-1234567.89012, DFLT, DFLT, DFLT, '-1.23456789012E+6'), (-1, DFLT,\n DFLT, DFLT, '-1E+0'), (-20, DFLT, DFLT, DFLT, '-2E+1'), (-100, DFLT,\n DFLT, DFLT, '-1E+2'), (-200, DFLT, DFLT, DFLT, '-2E+2'), (-333, DFLT,\n DFLT, DFLT, '-3.33E+2'), (-4567, DFLT, DFLT, DFLT, '-4.567E+3'), (-\n 4567.89, DFLT, DFLT, DFLT, '-4.56789E+3'), (-500, 3, DFLT, DFLT,\n '-5.000E+2'), (-4567.89, 8, DFLT, DFLT, '-4.56789000E+3'), (-99.999, 1,\n DFLT, DFLT, '-1.0E+2'), (-4567.89, DFLT, DFLT, True, '-4.56789E+3'), (-\n 500, 3, DFLT, True, '-5.000E+2'), (-4567.89, 8, DFLT, True,\n '-4.56789000E+3'), (-99.999, 1, DFLT, True, '-1.0E+2'), (-500, 3, 2, \n True, '-5.000E+02'), (-4567.89, 8, 3, True, '-4.56789000E+003'), (-\n 9999999999.999, 1, 1, True, '-1.0E+10')]"], {}), "('num, frac_length, exp_length, sign_always, ref', [\n ('5.35E+3', DFLT, DFLT, DFLT, '5.35E+3'), (0, DFLT, DFLT, DFLT, '0E+0'),\n (0.1, DFLT, DFLT, DFLT, '1E-1'), (0.01, DFLT, DFLT, DFLT, '1E-2'), (\n 0.001, DFLT, DFLT, DFLT, '1E-3'), (0.00101, DFLT, DFLT, DFLT, '1.01E-3'\n ), (0.123456789012, DFLT, DFLT, DFLT, '1.23456789012E-1'), (\n 1234567.89012, DFLT, DFLT, DFLT, '1.23456789012E+6'), (1, DFLT, DFLT,\n DFLT, '1E+0'), (20, DFLT, DFLT, DFLT, '2E+1'), (100, DFLT, DFLT, DFLT,\n '1E+2'), (200, DFLT, DFLT, DFLT, '2E+2'), (333, DFLT, DFLT, DFLT,\n '3.33E+2'), (4567, DFLT, DFLT, DFLT, '4.567E+3'), (4567.89, DFLT, DFLT,\n DFLT, '4.56789E+3'), (500, 3, DFLT, DFLT, '5.000E+2'), (4567.89, 8,\n DFLT, DFLT, '4.56789000E+3'), (99.999, 1, DFLT, DFLT, '1.0E+2'), (\n 4567.89, DFLT, DFLT, True, '+4.56789E+3'), (500, 3, DFLT, True,\n '+5.000E+2'), (4567.89, 8, DFLT, True, '+4.56789000E+3'), (99.999, 1,\n DFLT, True, '+1.0E+2'), (500, 3, 2, True, '+5.000E+02'), (4567.89, 8, 3,\n True, '+4.56789000E+003'), (9999999999.999, 1, 1, True, '+1.0E+10'), (-\n 0.1, DFLT, DFLT, DFLT, '-1E-1'), (-0.01, DFLT, DFLT, DFLT, '-1E-2'), (-\n 0.001, DFLT, DFLT, DFLT, '-1E-3'), (-0.00101, DFLT, DFLT, DFLT,\n '-1.01E-3'), (-0.123456789012, DFLT, DFLT, DFLT, '-1.23456789012E-1'),\n (-1234567.89012, DFLT, DFLT, DFLT, '-1.23456789012E+6'), (-1, DFLT,\n DFLT, DFLT, '-1E+0'), (-20, DFLT, DFLT, DFLT, '-2E+1'), (-100, DFLT,\n DFLT, DFLT, '-1E+2'), (-200, DFLT, DFLT, DFLT, '-2E+2'), (-333, DFLT,\n DFLT, DFLT, '-3.33E+2'), (-4567, DFLT, DFLT, DFLT, '-4.567E+3'), (-\n 4567.89, DFLT, DFLT, DFLT, '-4.56789E+3'), (-500, 3, DFLT, DFLT,\n '-5.000E+2'), (-4567.89, 8, DFLT, DFLT, '-4.56789000E+3'), (-99.999, 1,\n DFLT, DFLT, '-1.0E+2'), (-4567.89, DFLT, DFLT, True, '-4.56789E+3'), (-\n 500, 3, DFLT, True, '-5.000E+2'), (-4567.89, 8, DFLT, True,\n '-4.56789000E+3'), (-99.999, 1, DFLT, True, '-1.0E+2'), (-500, 3, 2, \n True, '-5.000E+02'), (-4567.89, 8, 3, True, '-4.56789000E+003'), (-\n 9999999999.999, 1, 1, True, '-1.0E+10')])\n", (21073, 23136), False, 'import pytest\n'), ((12467, 12509), 'putil.test.AI', 'AI', (['putil.eng.no_exp', '"""number"""'], {'number': '"""a"""'}), "(putil.eng.no_exp, 'number', number='a')\n", (12469, 12509), False, 'from putil.test import AE, AI, CS\n'), ((12933, 12965), 'putil.test.AI', 'AI', (['putil.eng.peng', 'name'], {}), '(putil.eng.peng, name, **args)\n', (12935, 12965), False, 'from putil.test import AE, AI, CS\n'), ((39456, 39508), 'putil.test.AE', 'AE', (['putil.eng.pprint_vector', 'ValueError', 'msg'], {}), '(putil.eng.pprint_vector, ValueError, msg, **args)\n', (39458, 39508), False, 'from putil.test import AE, AI, CS\n'), ((2038, 2075), 'functools.partial', 'functools.partial', (['obj'], {'lstrip': 'lstrip'}), '(obj, lstrip=lstrip)\n', (2055, 2075), False, 'import functools\n'), ((2113, 2150), 'functools.partial', 'functools.partial', (['obj'], {'rstrip': 'rstrip'}), '(obj, rstrip=rstrip)\n', (2130, 2150), False, 'import functools\n'), ((18448, 18483), 'functools.partial', 'functools.partial', (['obj'], {'rjust': 'rjust'}), '(obj, rjust=rjust)\n', (18465, 18483), False, 'import functools\n'), ((38897, 38927), 'functools.partial', 'functools.partial', (['obj'], {}), '(obj, **args)\n', (38914, 38927), False, 'import functools\n'), ((40017, 40053), 'functools.partial', 'functools.partial', (['obj'], {'decimals': 'dec'}), '(obj, decimals=dec)\n', (40034, 40053), False, 'import functools\n'), ((36445, 36559), 'numpy.array', 'array', (['[-0.10081675027325637 - 0.06910517142735251j, 0.018754229185649937 + \n 0.017142783560861786j, 0 + 18.0j]'], {}), '([-0.10081675027325637 - 0.06910517142735251j, 0.018754229185649937 + \n 0.017142783560861786j, 0 + 18.0j])\n', (36450, 36559), False, 'from numpy import array, ndarray\n'), ((36828, 36942), 'numpy.array', 'array', (['[-0.10081675027325637 - 0.06910517142735251j, 0.018754229185649937 + \n 0.017142783560861786j, 0 + 18.0j]'], {}), '([-0.10081675027325637 - 0.06910517142735251j, 0.018754229185649937 + \n 0.017142783560861786j, 0 + 18.0j])\n', (36833, 36942), False, 'from numpy import array, ndarray\n'), ((37307, 37541), 'numpy.array', 'array', (['[-0.10081675027325637 - 0.06910517142735251j, 0.018754229185649937 + \n 0.017142783560861786j, 0 + 18.0j, 0.11875422918564994 + \n 0.11714278356086179j, 0.21875422918564993 + 0.21714278356086178j, 0 + \n 28.0j, 10 + 2.0j]'], {}), '([-0.10081675027325637 - 0.06910517142735251j, 0.018754229185649937 + \n 0.017142783560861786j, 0 + 18.0j, 0.11875422918564994 + \n 0.11714278356086179j, 0.21875422918564993 + 0.21714278356086178j, 0 + \n 28.0j, 10 + 2.0j])\n', (37312, 37541), False, 'from numpy import array, ndarray\n'), ((37998, 38232), 'numpy.array', 'array', (['[-0.10081675027325637 - 0.06910517142735251j, 0.018754229185649937 + \n 0.017142783560861786j, 0 + 18.0j, 0.11875422918564994 + \n 0.11714278356086179j, 0.21875422918564993 + 0.21714278356086178j, 0 + \n 28.0j, 10 + 2.0j]'], {}), '([-0.10081675027325637 - 0.06910517142735251j, 0.018754229185649937 + \n 0.017142783560861786j, 0 + 18.0j, 0.11875422918564994 + \n 0.11714278356086179j, 0.21875422918564993 + 0.21714278356086178j, 0 + \n 28.0j, 10 + 2.0j])\n', (38003, 38232), False, 'from numpy import array, ndarray\n'), ((39678, 39703), 'numpy.array', 'array', (['[1.3333, 2.666666]'], {}), '([1.3333, 2.666666])\n', (39683, 39703), False, 'from numpy import array, ndarray\n'), ((39708, 39727), 'numpy.array', 'array', (['[1.33, 2.67]'], {}), '([1.33, 2.67])\n', (39713, 39727), False, 'from numpy import array, ndarray\n'), ((39739, 39772), 'numpy.array', 'array', (['[1.3333e-12, 2.666666e-12]'], {}), '([1.3333e-12, 2.666666e-12])\n', (39744, 39772), False, 'from numpy import array, ndarray\n'), ((39777, 39804), 'numpy.array', 'array', (['[1.33e-12, 2.67e-12]'], {}), '([1.33e-12, 2.67e-12])\n', (39782, 39804), False, 'from numpy import array, ndarray\n'), ((39816, 39829), 'numpy.array', 'array', (['[1, 3]'], {}), '([1, 3])\n', (39821, 39829), False, 'from numpy import array, ndarray\n'), ((39834, 39847), 'numpy.array', 'array', (['[1, 3]'], {}), '([1, 3])\n', (39839, 39847), False, 'from numpy import array, ndarray\n')] |
import torch
import numpy as np
import rlkit.torch.pytorch_util as ptu
def GaussianKernel(x, y, bs):
sigma = 1
I = torch.ones([bs, 1])
x_tmp = torch.sum(torch.square(x), dim=1)
y_tmp = torch.sum(torch.square(y), dim=1)
x2 = torch.matmul(I, x_tmp.T)
y2 = torch.matmul(I, y_tmp.T)
normTmp = x2 - 2 * torch.matmul(y, x.T) + y2.T
norm = torch.sum(normTmp, dim=1)
K = torch.exp(-1 * norm / sigma)
return K
def MMD(x, y, bs):
Kxx = GaussianKernel(x, x, bs) / (bs ** 2)
Kxy = GaussianKernel(x, y, bs) / (bs ** 2)
Kyy = GaussianKernel(y, y, bs) / (bs ** 2)
MMD = Kxx - 2 * Kxy + Kyy
return MMD.sum()
class MMDEstimator(object):
def __init__(self, input_dim, D=512, gamma=1):
self.gamma = gamma
self.D = D
self.W = ptu.from_numpy(np.random.randn(input_dim, D))
self.b = ptu.from_numpy(np.random.uniform(size=(1, D), low=0, high=2 * np.pi))
def _forward(self, x, y):
psi_x = np.sqrt(2.0 / self.D) * torch.cos(np.sqrt(2 / self.gamma) *
torch.matmul(x, self.W) + self.b)
psi_y = np.sqrt(2.0 / self.D) * torch.cos(np.sqrt(2 / self.gamma) *
torch.matmul(y, self.W) + self.b)
MMD = torch.mean(psi_x, dim=0) - torch.mean(psi_y, dim=0)
return MMD.norm(2)
def forward(self, latents):
loss = 0.
n = len(latents)
for i in range(n):
x, y = latents[i], latents[(i+1) % n]
loss += self._forward(x, y)
return loss / n
| [
"torch.mean",
"torch.ones",
"numpy.random.uniform",
"numpy.random.randn",
"torch.exp",
"torch.matmul",
"torch.sum",
"torch.square",
"numpy.sqrt"
] | [((126, 145), 'torch.ones', 'torch.ones', (['[bs, 1]'], {}), '([bs, 1])\n', (136, 145), False, 'import torch\n'), ((247, 271), 'torch.matmul', 'torch.matmul', (['I', 'x_tmp.T'], {}), '(I, x_tmp.T)\n', (259, 271), False, 'import torch\n'), ((281, 305), 'torch.matmul', 'torch.matmul', (['I', 'y_tmp.T'], {}), '(I, y_tmp.T)\n', (293, 305), False, 'import torch\n'), ((368, 393), 'torch.sum', 'torch.sum', (['normTmp'], {'dim': '(1)'}), '(normTmp, dim=1)\n', (377, 393), False, 'import torch\n'), ((402, 430), 'torch.exp', 'torch.exp', (['(-1 * norm / sigma)'], {}), '(-1 * norm / sigma)\n', (411, 430), False, 'import torch\n'), ((168, 183), 'torch.square', 'torch.square', (['x'], {}), '(x)\n', (180, 183), False, 'import torch\n'), ((214, 229), 'torch.square', 'torch.square', (['y'], {}), '(y)\n', (226, 229), False, 'import torch\n'), ((819, 848), 'numpy.random.randn', 'np.random.randn', (['input_dim', 'D'], {}), '(input_dim, D)\n', (834, 848), True, 'import numpy as np\n'), ((882, 935), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(1, D)', 'low': '(0)', 'high': '(2 * np.pi)'}), '(size=(1, D), low=0, high=2 * np.pi)\n', (899, 935), True, 'import numpy as np\n'), ((984, 1005), 'numpy.sqrt', 'np.sqrt', (['(2.0 / self.D)'], {}), '(2.0 / self.D)\n', (991, 1005), True, 'import numpy as np\n'), ((1144, 1165), 'numpy.sqrt', 'np.sqrt', (['(2.0 / self.D)'], {}), '(2.0 / self.D)\n', (1151, 1165), True, 'import numpy as np\n'), ((1302, 1326), 'torch.mean', 'torch.mean', (['psi_x'], {'dim': '(0)'}), '(psi_x, dim=0)\n', (1312, 1326), False, 'import torch\n'), ((1329, 1353), 'torch.mean', 'torch.mean', (['psi_y'], {'dim': '(0)'}), '(psi_y, dim=0)\n', (1339, 1353), False, 'import torch\n'), ((329, 349), 'torch.matmul', 'torch.matmul', (['y', 'x.T'], {}), '(y, x.T)\n', (341, 349), False, 'import torch\n'), ((1018, 1041), 'numpy.sqrt', 'np.sqrt', (['(2 / self.gamma)'], {}), '(2 / self.gamma)\n', (1025, 1041), True, 'import numpy as np\n'), ((1094, 1117), 'torch.matmul', 'torch.matmul', (['x', 'self.W'], {}), '(x, self.W)\n', (1106, 1117), False, 'import torch\n'), ((1178, 1201), 'numpy.sqrt', 'np.sqrt', (['(2 / self.gamma)'], {}), '(2 / self.gamma)\n', (1185, 1201), True, 'import numpy as np\n'), ((1254, 1277), 'torch.matmul', 'torch.matmul', (['y', 'self.W'], {}), '(y, self.W)\n', (1266, 1277), False, 'import torch\n')] |
"""
This is the main script for running the baseline algorithms reported in the paper.
It should output a folder full of metadata, as well as the summary of results for each algorithm.
"""
import json
import os
import sys
import numpy as np
import pandas as pd
import pathlib2
from sklearn.ensemble import AdaBoostClassifier
from sklearn.model_selection import StratifiedKFold
from linear import LogisticAdaBoost, AdaBoostOnes, AdaBoostNormal
from utils import path_to_dataframe
from reporter import BaselineReporter
__author__ = '<NAME>'
def test_baselines(datasets_path, output_path, params_path):
"""
Runs baselines.
:type datasets_path: str
:param datasets_path: path to a folder where datasets will be used for tests.
:type output_path: str
:param output_path: path to output metadata regarding evolutionary process.
:type params_path: str
:param params_path: path to parameters file.
"""
n_runs = 10
params = json.load(open(params_path, 'r'))
datasets = [str(xx).split('/')[-1] for xx in pathlib2.Path(datasets_path).iterdir() if xx.is_file()]
algorithms = [LogisticAdaBoost, AdaBoostClassifier, AdaBoostOnes, AdaBoostNormal]
for dataset in datasets:
dataset_name = dataset.split('/')[-1].split('.')[-2]
print('testing %s dataset' % dataset_name)
full_df = path_to_dataframe(os.path.join(datasets_path, dataset))
y_name = full_df.columns[-1]
full_df[y_name] = pd.Categorical(full_df[y_name])
full_df[y_name] = full_df[y_name].cat.codes
X = full_df[full_df.columns[:-1]]
y = full_df[full_df.columns[-1]]
skf = StratifiedKFold(n_splits=params['n_folds'], shuffle=True, random_state=params['random_state'])
for n_fold, (train_index, test_index) in enumerate(skf.split(X, y)):
X_train = X.iloc[train_index]
X_test = X.iloc[test_index]
y_train = y.iloc[train_index]
y_test = y.iloc[test_index]
n_classes = len(np.unique(y_train))
for n_run in range(n_runs):
for algorithm in algorithms:
print('# --- dataset: %r n_fold: %r n_run: %r algorithm: %r --- #' % (
dataset_name, n_fold, n_run, algorithm.__name__
))
reporter = BaselineReporter(
Xs=[X_train, X_test],
ys=[y_train, y_test],
set_names=['train', 'test'],
output_path=output_path,
dataset_name=dataset_name,
n_fold=n_fold,
n_run=n_run,
n_classifiers=params['n_base_classifiers'],
n_classes=n_classes,
algorithm=algorithm
)
if algorithm.__name__ == 'AdaBoostClassifier':
inst = algorithm(n_estimators=params['n_base_classifiers'], algorithm='SAMME').fit(
X_train, y_train
)
else:
inst = algorithm(n_estimators=params['n_base_classifiers']).fit(X_train, y_train)
reporter.save_baseline(inst)
if __name__ == '__main__':
if len(sys.argv) != 5:
print('usage:')
print('\tpython test_baselines.py <path_datasets> <path_metadata> <path_params> <path_results>')
print('example:')
print('\tpython test_baselines.py \"/home/user/datasets\" \"/home/user/metadata\"' + \
'\"/home/user/params.json\" \"/home/user/results.csv\"')
else:
__dataset_path, __output_path, __params_path, __results_path = sys.argv[1:]
test_baselines(__dataset_path, __output_path, __params_path)
BaselineReporter.generate_summary(__output_path, __results_path)
| [
"sklearn.model_selection.StratifiedKFold",
"pandas.Categorical",
"reporter.BaselineReporter.generate_summary",
"pathlib2.Path",
"os.path.join",
"numpy.unique",
"reporter.BaselineReporter"
] | [((1477, 1508), 'pandas.Categorical', 'pd.Categorical', (['full_df[y_name]'], {}), '(full_df[y_name])\n', (1491, 1508), True, 'import pandas as pd\n'), ((1660, 1759), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': "params['n_folds']", 'shuffle': '(True)', 'random_state': "params['random_state']"}), "(n_splits=params['n_folds'], shuffle=True, random_state=\n params['random_state'])\n", (1675, 1759), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((3847, 3911), 'reporter.BaselineReporter.generate_summary', 'BaselineReporter.generate_summary', (['__output_path', '__results_path'], {}), '(__output_path, __results_path)\n', (3880, 3911), False, 'from reporter import BaselineReporter\n'), ((1374, 1410), 'os.path.join', 'os.path.join', (['datasets_path', 'dataset'], {}), '(datasets_path, dataset)\n', (1386, 1410), False, 'import os\n'), ((2028, 2046), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (2037, 2046), True, 'import numpy as np\n'), ((1052, 1080), 'pathlib2.Path', 'pathlib2.Path', (['datasets_path'], {}), '(datasets_path)\n', (1065, 1080), False, 'import pathlib2\n'), ((2352, 2620), 'reporter.BaselineReporter', 'BaselineReporter', ([], {'Xs': '[X_train, X_test]', 'ys': '[y_train, y_test]', 'set_names': "['train', 'test']", 'output_path': 'output_path', 'dataset_name': 'dataset_name', 'n_fold': 'n_fold', 'n_run': 'n_run', 'n_classifiers': "params['n_base_classifiers']", 'n_classes': 'n_classes', 'algorithm': 'algorithm'}), "(Xs=[X_train, X_test], ys=[y_train, y_test], set_names=[\n 'train', 'test'], output_path=output_path, dataset_name=dataset_name,\n n_fold=n_fold, n_run=n_run, n_classifiers=params['n_base_classifiers'],\n n_classes=n_classes, algorithm=algorithm)\n", (2368, 2620), False, 'from reporter import BaselineReporter\n')] |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2020, empress development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os
import pkg_resources
from bp import parse_newick
import numpy as np
import pandas as pd
from scipy.spatial.distance import euclidean
SUPPORT_FILES = pkg_resources.resource_filename('empress', 'support_files')
TEMPLATES = os.path.join(SUPPORT_FILES, 'templates')
def get_bp(newickfmt):
"""Loads a bp.BP tree from a QIIME 2 NewickFormat object.
This function, along with save_viz(), was moved here from _plot.py so it
could be reused between different Empress commands.
Parameters
----------
newickfmt : q2_types.tree.NewickFormat
Returns
-------
bp.BP
"""
with open(str(newickfmt)) as treefile:
# The file will still be closed even though we return from within the
# with block: see https://stackoverflow.com/a/9885287/10730311.
return parse_newick(treefile.readline())
def save_viz(viz, output_dir, q2=True):
"""Saves an Empress visualization to a filepath.
Parameters
----------
viz : empress.Empress
output_dir : str
q2 : bool
"""
with open(os.path.join(output_dir, 'empress.html'), 'w') as htmlfile:
htmlfile.write(str(viz))
viz.copy_support_files(output_dir)
if q2:
import q2templates
index = os.path.join(TEMPLATES, 'index.html')
q2templates.render(index, output_dir)
def prepare_pcoa(pcoa, number_of_features):
"""Selects top N biplot features by magnitude (coped from q2-emperor).
Parameters
----------
pcoa : skbio.stats.ordination.OrdinationResults
number_of_features : int
Returns
-------
skbio.stats.ordination.OrdinationResults
"""
feats = pcoa.features.copy()
# in cases where the axes are all zero there might be all-NA
# columns
feats.fillna(0, inplace=True)
origin = np.zeros_like(feats.columns)
feats['importance'] = feats.apply(euclidean, axis=1, args=(origin,))
feats.sort_values('importance', inplace=True, ascending=False)
feats.drop(['importance'], inplace=True, axis=1)
pcoa.features = feats[:number_of_features].copy()
return pcoa
def check_and_process_files(output_dir, tree_file, feature_metadata):
"""Initial checks and processing of files for standalone CLI plotting.
Parameters
----------
output_dir : str
tree_file : str
fm_file : str
Returns
-------
bp.Tree
pd.DataFrame
"""
if os.path.isdir(output_dir):
raise OSError("Output directory already exists!")
with open(str(tree_file), "r") as f:
tree_newick = parse_newick(f.readline())
if feature_metadata is not None:
feature_metadata = pd.read_csv(feature_metadata, sep="\t", index_col=0)
return tree_newick, feature_metadata
| [
"numpy.zeros_like",
"os.path.isdir",
"pandas.read_csv",
"pkg_resources.resource_filename",
"q2templates.render",
"os.path.join"
] | [((512, 571), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""empress"""', '"""support_files"""'], {}), "('empress', 'support_files')\n", (543, 571), False, 'import pkg_resources\n'), ((584, 624), 'os.path.join', 'os.path.join', (['SUPPORT_FILES', '"""templates"""'], {}), "(SUPPORT_FILES, 'templates')\n", (596, 624), False, 'import os\n'), ((2156, 2184), 'numpy.zeros_like', 'np.zeros_like', (['feats.columns'], {}), '(feats.columns)\n', (2169, 2184), True, 'import numpy as np\n'), ((2754, 2779), 'os.path.isdir', 'os.path.isdir', (['output_dir'], {}), '(output_dir)\n', (2767, 2779), False, 'import os\n'), ((1602, 1639), 'os.path.join', 'os.path.join', (['TEMPLATES', '"""index.html"""'], {}), "(TEMPLATES, 'index.html')\n", (1614, 1639), False, 'import os\n'), ((1648, 1685), 'q2templates.render', 'q2templates.render', (['index', 'output_dir'], {}), '(index, output_dir)\n', (1666, 1685), False, 'import q2templates\n'), ((2993, 3045), 'pandas.read_csv', 'pd.read_csv', (['feature_metadata'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(feature_metadata, sep='\\t', index_col=0)\n", (3004, 3045), True, 'import pandas as pd\n'), ((1414, 1454), 'os.path.join', 'os.path.join', (['output_dir', '"""empress.html"""'], {}), "(output_dir, 'empress.html')\n", (1426, 1454), False, 'import os\n')] |
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from torch.optim.lr_scheduler import ReduceLROnPlateau
from scipy.stats.mstats import mquantiles
from tqdm import tqdm
import argparse
import torch
import os
def TuebingenDataset(root):
"""
Reads the Tuebingen cause-effect dataset
https://webdav.tuebingen.mpg.de/cause-effect/
"""
import numpy as np
meta = np.genfromtxt(os.path.join(root, 'pairmeta.txt'), dtype=np.str)
samples = []
weights = []
names = []
for i in range(meta.shape[0]):
fname = 'pair' + meta[i][0] + '.txt'
data = np.genfromtxt(os.path.join(root, fname))
x = data[:, 0]
y = data[:, 1]
# remove binary pairs
if (len(np.unique(x)) > 2) and (len(np.unique(y)) > 2):
if((meta[i][1] == '1') and
(meta[i][2] == '1') and
(meta[i][3] == '2') and
(meta[i][4] == '2')):
d = torch.Tensor(np.vstack((x, y)).T)
w = float(meta[i][5])
samples.append(d)
weights.append(w)
names.append(fname)
if((meta[i][1] == '2') and
(meta[i][2] == '2') and
(meta[i][3] == '1') and
(meta[i][4] == '1')):
d = torch.Tensor(np.vstack((y, x)).T)
w = float(meta[i][5])
samples.append(d)
weights.append(w)
names.append(fname)
return samples, weights, names
def train_test_split(x, y, p_tr=0.5):
"""
Splits X, Y into training and testing splits
"""
perm = torch.randperm(x.size(0))
n_tr = int(x.size(0) * p_tr)
x_tr = x[perm[:n_tr]]
y_tr = y[perm[:n_tr]]
x_te = x[perm[n_tr:]]
y_te = y[perm[n_tr:]]
return x_tr, y_tr, x_te, y_te
def qnorm_scale(x):
"""
transform variable to standard Gaussian
"""
from scipy import stats
import pandas
# rank elements with method = "random" when resolving ties
x = pandas.Series(x.numpy())
x_rank = x.sample(frac=1).rank(method='first').reindex_like(x)/(x.size + 1)
x = stats.norm.ppf(x_rank.values)
return torch.Tensor(x).view(-1, 1)
class PinballLoss(torch.nn.Module):
"""
Quantile regression loss
"""
def __init__(self):
super(PinballLoss, self).__init__()
def forward(self, yhat, y, tau=0.5):
diff = yhat - y
mask = diff.ge(0).float() - tau
return (mask * diff).mean()
def causal_score(net, x, y):
"""
S(x \to y) = CL(P(X)) + CL(P(Y|X)) ~ QS(X) + QS(Y|X)
https://arxiv.org/abs/1801.10579 [Theorem 3]
"""
loss = PinballLoss()
scores = []
for tau, tau_w in zip(TAUS, wTAUS):
x_marg = torch.Tensor(mquantiles(x.numpy(), prob=tau, alphap=1, betap=1))
x_marg_qs = loss(x_marg, x, tau).item() # QS(X)
y_cond_qs = loss(net(x, tau), y, tau).item() # QS(Y|X)
scores.append((x_marg_qs + y_cond_qs) * tau_w)
return scores
class TauNet(torch.nn.Module):
def __init__(self, nh=128):
super(TauNet, self).__init__()
self.net = torch.nn.Sequential(
torch.nn.Linear(2, nh),
torch.nn.ReLU(),
torch.nn.Linear(nh, 1))
def forward(self, x, tau):
if type(tau) == float or type(tau) == int or tau.dim() == 0:
tau = torch.zeros(x.size(0), 1).fill_(tau)
return self.net(torch.cat((x, tau), 1))
def train_net(x_tr,
y_tr,
x_te,
y_te,
n_epochs=10000,
nh=64,
lr=1e-4,
wd=1e-4):
net = TauNet(nh)
opt = torch.optim.Adam(net.parameters(),
lr=lr,
weight_decay=wd)
scheduler = ReduceLROnPlateau(opt,
patience=2,
factor=0.99,
threshold=1e-10,
min_lr=1e-10,
threshold_mode='abs')
loss = PinballLoss()
for epoch in tqdm(range(n_epochs)):
taus = torch.randn(x_tr.size(0), 1).uniform_()
opt.zero_grad()
loss(net(x_tr, taus), y_tr, taus).backward()
opt.step()
test_loss = 0
for tau in TAUS:
test_loss += loss(net(x_te, tau), y_te, tau).item()
scheduler.step(test_loss)
return net
def plot_results(net, x_all, y_all, fname):
order = x_all.sort(0)[1]
x_all = x_all[order].view(-1, 1)
y_all = y_all[order].view(-1, 1)
plt.figure(figsize=(10, 6))
plt.plot(x_all.numpy(), y_all.numpy(), '.')
for tau in TAUS:
lw = 5 if tau == 0.5 else 2
plt.plot(x_all.numpy(),
net(x_all, tau).detach().numpy(),
label="$\\tau = " + str(tau) + "$",
lw=lw)
plt.legend(ncol=3)
plt.tight_layout(pad=0)
plt.savefig(fname)
plt.close('all')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Quantile regression')
parser.add_argument('--root', type=str, default='../data/pairs_tuebingen')
parser.add_argument('--pair', type=int, default=0)
parser.add_argument('--plot', type=int, default=1)
parser.add_argument('--repetitions', type=int, default=5)
parser.add_argument('--n_epochs', type=int, default=3000)
parser.add_argument('--n_hidden_units', type=int, default=50)
parser.add_argument('--quad_int', type=int, default=3)
parser.add_argument('--unif_int', type=int, default=0)
args = parser.parse_args()
global TAUS
global wTAUS
samples, weights, names = TuebingenDataset(args.root)
print("Number of pairs = ", len(samples))
x_all = qnorm_scale(samples[args.pair][:, 0])
y_all = qnorm_scale(samples[args.pair][:, 1])
# if the pair has less than 200 samples, casual score only from the median
if args.quad_int == 1 or args.unif_int == 1 or x_all.shape[0] <= 200:
TAUS = [0.5]
wTAUS = [1]
# Gaussian quadrature integration
elif args.quad_int == 3:
TAUS = [0.12, 0.5, 0.89]
wTAUS = [0.28, 0.44, 0.28]
elif args.quad_int == 5:
TAUS = [0.05, 0.23, 0.5, 0.77, 0.95]
wTAUS = [0.12, 0.24, 0.28, 0.24, 0.12]
elif args.unif_int != 0:
TAUS = torch.linspace(0, 1, args.unif_int)
wTAUS = torch.Tensor([1 / args.unif_int]).repeat(args.unif_int)
score_fw = 0
score_bw = 0
reps = 0
for repetition in range(args.repetitions):
x_tr, y_tr, x_te, y_te = train_test_split(x_all, y_all)
net_fw = train_net(x_tr, y_tr, x_te, y_te, n_epochs=args.n_epochs)
net_bw = train_net(y_tr, x_tr, x_te, y_te, n_epochs=args.n_epochs)
score_fw += sum(causal_score(net_fw, x_te, y_te))
score_bw += sum(causal_score(net_bw, y_te, x_te))
reps += 1
score_fw /= reps
score_bw /= reps
print(args.root,
names[args.pair],
weights[args.pair],
score_fw,
score_bw,
score_fw < score_bw)
if args.plot:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
plt.rc('font', size=16)
plot_results(net_fw, x_all, y_all, names[args.pair] + "_fw.pdf")
plot_results(net_bw, y_all, x_all, names[args.pair] + "_bw.pdf")
| [
"scipy.stats.norm.ppf",
"torch.nn.ReLU",
"argparse.ArgumentParser",
"os.path.join",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"numpy.unique",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.cat",
"matplotlib.pyplot.figure",
"matplotlib.use",
"torch.Tensor",
"matplotlib.pypl... | [((2278, 2307), 'scipy.stats.norm.ppf', 'stats.norm.ppf', (['x_rank.values'], {}), '(x_rank.values)\n', (2292, 2307), False, 'from scipy import stats\n'), ((3981, 4086), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'ReduceLROnPlateau', (['opt'], {'patience': '(2)', 'factor': '(0.99)', 'threshold': '(1e-10)', 'min_lr': '(1e-10)', 'threshold_mode': '"""abs"""'}), "(opt, patience=2, factor=0.99, threshold=1e-10, min_lr=\n 1e-10, threshold_mode='abs')\n", (3998, 4086), False, 'from torch.optim.lr_scheduler import ReduceLROnPlateau\n'), ((4786, 4813), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (4796, 4813), True, 'import matplotlib.pyplot as plt\n'), ((5084, 5102), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'ncol': '(3)'}), '(ncol=3)\n', (5094, 5102), True, 'import matplotlib.pyplot as plt\n'), ((5107, 5130), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(0)'}), '(pad=0)\n', (5123, 5130), True, 'import matplotlib.pyplot as plt\n'), ((5135, 5153), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (5146, 5153), True, 'import matplotlib.pyplot as plt\n'), ((5158, 5174), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (5167, 5174), True, 'import matplotlib.pyplot as plt\n'), ((5217, 5275), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Quantile regression"""'}), "(description='Quantile regression')\n", (5240, 5275), False, 'import argparse\n'), ((535, 569), 'os.path.join', 'os.path.join', (['root', '"""pairmeta.txt"""'], {}), "(root, 'pairmeta.txt')\n", (547, 569), False, 'import os\n'), ((7367, 7388), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (7381, 7388), False, 'import matplotlib\n'), ((7438, 7465), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (7444, 7465), True, 'import matplotlib.pyplot as plt\n'), ((7474, 7497), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(16)'}), "('font', size=16)\n", (7480, 7497), True, 'import matplotlib.pyplot as plt\n'), ((744, 769), 'os.path.join', 'os.path.join', (['root', 'fname'], {}), '(root, fname)\n', (756, 769), False, 'import os\n'), ((2320, 2335), 'torch.Tensor', 'torch.Tensor', (['x'], {}), '(x)\n', (2332, 2335), False, 'import torch\n'), ((3322, 3344), 'torch.nn.Linear', 'torch.nn.Linear', (['(2)', 'nh'], {}), '(2, nh)\n', (3337, 3344), False, 'import torch\n'), ((3369, 3384), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (3382, 3384), False, 'import torch\n'), ((3409, 3431), 'torch.nn.Linear', 'torch.nn.Linear', (['nh', '(1)'], {}), '(nh, 1)\n', (3424, 3431), False, 'import torch\n'), ((3614, 3636), 'torch.cat', 'torch.cat', (['(x, tau)', '(1)'], {}), '((x, tau), 1)\n', (3623, 3636), False, 'import torch\n'), ((864, 876), 'numpy.unique', 'np.unique', (['x'], {}), '(x)\n', (873, 876), True, 'import numpy as np\n'), ((892, 904), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (901, 904), True, 'import numpy as np\n'), ((6541, 6576), 'torch.linspace', 'torch.linspace', (['(0)', '(1)', 'args.unif_int'], {}), '(0, 1, args.unif_int)\n', (6555, 6576), False, 'import torch\n'), ((1100, 1117), 'numpy.vstack', 'np.vstack', (['(x, y)'], {}), '((x, y))\n', (1109, 1117), True, 'import numpy as np\n'), ((1451, 1468), 'numpy.vstack', 'np.vstack', (['(y, x)'], {}), '((y, x))\n', (1460, 1468), True, 'import numpy as np\n'), ((6593, 6626), 'torch.Tensor', 'torch.Tensor', (['[1 / args.unif_int]'], {}), '([1 / args.unif_int])\n', (6605, 6626), False, 'import torch\n')] |
"""
This module contains code to calculate QNM amplitude and phases given a set of
spherical waveform modes of a given azimuthal index m.
e.g. given h22, h32, h42 spherical modes, these routines will compute the
m = 2, l = 2,3,4 QNMs and m = -2, l = 2,3,4 mirror QNMS
"""
import os
import numpy as np
import pandas as pd
import qnm
def mulmlpnp(m,l,lprime,n,a):
"""
Returns spherical-spheroidal overlap mu_{m,l,l',n'}(a)
defined in Eq. (5) of Berti and Klein (2014)
"""
omega_s, a_val, _ = qnm.modes_cache(s=-2,l=lprime,m=m,n=n)(a=a)
i = np.where(qnm.angular.ells(-2,m,abs(m)+100) == l)[0][0]
overlap_s = qnm.angular.C_and_sep_const_closest(a_val,-2,a*omega_s,m,abs(m) + 100)[1][i]
return overlap_s.real + 1j*overlap_s.imag
def loadqnm(lprime,m,n,a):
"""Read in QNM frequencies using qnm package."""
omega_s,_,_ = qnm.modes_cache(s=-2,l=lprime,m=m,n=n)(a=a)
return omega_s
def lrradius(a):
"""Calculate light ring radius for equatorial prograde orbit."""
return 2*(1+np.cos((2./3.)*np.arccos(-np.abs(a))))
def get_lrange(m,k_ell,lmin=2):
"""
Return range of angular indices l_min <= l <= l_max
where l_min = max(2,|m|) and l_max = |m| + k_ell
"""
return np.arange(np.amax([lmin,np.abs(m)]),k_ell + np.abs(m) + 1)
def calculate_matrix_components(m, a, larray, lparray):
"""
Helper function to calculate matrix elements containing spherical-spheroidal
overlaps and qnm frequencies needed for Eq. (3.10) in
Lim, Khanna, Apte, and Hughes (2019)
Inputs:
- larray: list [[l,is_derivative],...] describing each
spherical mode and whether derivative was taken
- lparray: list of spheroidal modes
"""
alphasystem = np.zeros((len(larray),len(larray),2),dtype=complex)
for lindex, (l, is_derivative) in enumerate(larray):
for lpindex, (lprime,mprime) in enumerate(lparray):
overlap = mulmlpnp(mprime,l,np.abs(lprime),0,a)
omega_j = -1j * loadqnm(np.abs(lprime),mprime,0,a)
# Check if mirror mode
# m == 0, use sign(lp) to denote mirror modes
# m != 0, use sign(mp*m) to denote mirror modes
if (m != 0 and mprime*m < 0) or (m == 0 and lprime < 0):
overlap = np.conjugate(overlap) * (-1)**l
omega_j = np.conjugate(omega_j)
# If derivative, multiply mu by 1j*omega
if is_derivative is True:
overlap *= omega_j
alphasystem[lindex][lpindex] = [overlap,omega_j]
return alphasystem
def preparesystem(m,a,k_ell,cachedir=None,overwrite=False):
"""
Calculate matrix elements containing spherical-spheroidal
overlaps and qnm frequencies needed for Eq. (3.10) in
Lim, Khanna, Apte, and Hughes (2019)
Inputs:
- m: azimuthal index
- a: spin parameter
- k_ell: number of mixed modes to include beyond l == |m| [lmin,..,...,|m| + k_ell]
- cachedir: directory where computations are saved or loaded
Output:
- alphasystem[lp][l], matrix of coefficients
"""
if (cachedir is not None and overwrite is False and
os.path.exists(f'{cachedir}/prepare_system_{m}_{a:.4f}_{k_ell}.npy')):
alphasystem = np.load(f'{cachedir}/prepare_system_{m}_{a:.4f}_{k_ell}.npy')
else:
# Setup the system matrix to solve
larray = [[l,is_derivative]
for is_derivative in [False,True]
for l in get_lrange(m,k_ell)]
lparray = [[l,mprime]
for l in get_lrange(m,k_ell)
for mprime in [m,-m]]
# For m = 0, take into account degenerate mirror modes
# Label the pair modes as (|l|,0) and (-|l|,0)
if m == 0:
lparray = [[lprime*(-1)**lpindex,mprime] for
lpindex,(lprime,mprime) in enumerate(lparray)]
alphasystem = calculate_matrix_components(m, a, larray, lparray)
if cachedir is not None:
if os.path.exists(cachedir) == False:
os.makedirs(cachedir)
np.save(f"{cachedir}/prepare_system_{m}_{a:.4f}_{k_ell}.npy",alphasystem)
return alphasystem
def get_linearsystem(alphasystem,t,t_0):
"""
Calculate LHS of matrix equation.
Consists of spherical-spheroidal overlaps and QNMs,
according to Eq. (3.10) in Lim,Khanna,Apte,Hughes (2019)
"""
musystem = alphasystem[:,:,0]
omegasystem = alphasystem[:,:,1]
return musystem * np.exp(omegasystem * (t - t_0))
def load_waveforms(wavefiles, spherical_modes, k_ell, t_cut=0):
"""
Load and cut waveform data
Assumes set of wavefiles have naming conventions:
- hm{m}_*.dat for m >= 0
- hmm{m}_*.dat for m < 0
Inputs:
- wavefiles: list of filepaths for each wavefile of index m
- spherical_modes: list of modes corresponding to wavefiles [[2,2],[3,2],...]
- k_ell: number of mixed modes to model beyond l == |m|
Ouputs:
- wavedatas (N)
contains N rows for each mode in spherical_modes
each row has wavedatas[i] = hlm_+ - 1j * hlm_x
"""
# Check consistency of inputs
for (l,m) in spherical_modes:
assert l in get_lrange(m,k_ell), f"Mode ({l},{m}) out of range"
# Find which modes are contained in each wavefile
filemodelist = []
for wavefile in wavefiles:
wavefile = wavefile.split("/")[-1]
# sign(m), check by examining fourth character in filename
# m > 0, e.g. hm1*.dat
if wavefile[3] == "_":
m = int(wavefile[2])
filemodelist.append([[l,m] for l in get_lrange(m,k_ell,lmin=m)])
# m < 0, e.g. hmm1*.dat
elif wavefile[3] != "_":
m = -int(wavefile[3])
filemodelist.append([[l,m] for l in get_lrange(m,k_ell,lmin=-m)])
# Read in waveforms in order of modelist
wavedatas = []
for l,m in spherical_modes:
# Find file containing desired mode
for j,wavefile in enumerate(wavefiles):
if [l,m] in filemodelist[j]:
data = pd.read_csv(wavefile,delim_whitespace=True,header=None,engine='python',
usecols=[0,1+2*(l-np.abs(m)),2+2*(l-np.abs(m))]).to_numpy()
time,h_plus,h_cross = data[data[:,0] >= t_cut].T
wavedatas.append(h_plus - 1j * h_cross)
return time, np.array(wavedatas)
def get_spheroidalmodes(spherical_modes):
"""
Return list of pairs of modeled spheroidal modes given
list of inputted spherical modes
For N spherical modes, will model N spheroidal mode (QNM) pairs
"""
spheroidal_modes = []
for l,m in spherical_modes:
spheroidal_modes.append([l,m,0])
spheroidal_modes.append([l,-m,1])
return spheroidal_modes
def get_sphericalcoefs(time,wavedatas):
"""
Calculate RHS of matrix equation.
Consists of spherical modes and their derivatives,
according to Eq. (3.10) in Lim,Khanna,Apte,Hughes (2019)
"""
delta_t = time[1] - time[0]
wavedatas_deriv = np.gradient(wavedatas,delta_t,axis=1)
return np.vstack((wavedatas,wavedatas_deriv)).T
def solve_system(m,k_ell,t_fiducial,wavefilepaths,alphasystem):
"""
Solve for QNM amplitudes at each point in time
Eq. (3.10) in Lim, Khanna, Apte, Hughes (2019)
Inputs:
- m: spherical index describing input waveform files
- a: spin parameter
- thinc: spin-orbit misalignment parameter describing input trajectory
- thf: plunge parameter describing input trajectory
- k_ell: number of mixed QNMs to model, lmax = k_ell + max(2,|m|)
- t_fiducial: fiducial time
Outputs:
- spherical modes: list of spherical modes used to find QNMs
- time: array of times at which spheroidal modes are calculated
- spheroidal coefs: solved spheroidal modes at each time
"""
spherical_modes = [[l,m] for l in get_lrange(m,k_ell)]
time, wavedatas = load_waveforms(wavefilepaths,spherical_modes,
k_ell,t_cut=t_fiducial-50)
spherical_coefs = get_sphericalcoefs(time,wavedatas)
spheroidal_coefs = np.zeros(spherical_coefs.shape,dtype=complex)
for t,time_i in enumerate(time):
alplm = get_linearsystem(alphasystem,time_i,t_fiducial)
spheroidal_coefs[t] = np.linalg.solve(alplm,spherical_coefs[t])
return spherical_modes, time, spheroidal_coefs
def mlabel(mval):
"""
Outputs m-index as string
if m >= 0, maps to f"m{m}"
if m < 0, maps to f"mm{m}"
"""
if mval < 0:
return f"mm{np.abs(mval)}"
return f"m{mval}"
def postprocess(time,spheroidal_coefs,t_start,t_end,t_window):
"""
Take average of spheroidal coefs over many fitting times
to extract QNM. Procedure described in Lim,Khanna,Apte,Hughes (2019)
"""
assert t_end - t_start >= t_window, "Need at least one averaging period"
c_amps = np.abs(spheroidal_coefs)
c_phases = np.unwrap(np.angle(spheroidal_coefs))
stdmin = 1e99
t_max = np.min([time[-1] - t_window,t_end])
i = np.argmin(np.abs(time - t_start))
c_amps_mean_best = None
while time[i] + t_window <= t_max:
mask = (time >= time[i]) & (time <= time[i] + t_window)
c_amps_mean = np.mean(c_amps[mask,:],axis=0)
stdtotal = np.sum(np.std(c_amps[mask,:],axis=0) / c_amps_mean)
if stdtotal < stdmin:
stdmin = stdtotal
c_amps_mean_best = c_amps_mean
c_phases_mean_best = np.mod(np.mean(c_phases[mask,:],axis=0),2*np.pi)
i += 1
assert c_amps_mean_best is not None, "Could not find QNM amplitude, terminating"
return c_amps_mean_best, c_phases_mean_best
| [
"numpy.load",
"numpy.save",
"numpy.abs",
"os.makedirs",
"numpy.std",
"numpy.angle",
"numpy.zeros",
"os.path.exists",
"qnm.modes_cache",
"numpy.min",
"numpy.mean",
"numpy.array",
"numpy.exp",
"numpy.vstack",
"numpy.conjugate",
"numpy.linalg.solve",
"numpy.gradient"
] | [((7060, 7099), 'numpy.gradient', 'np.gradient', (['wavedatas', 'delta_t'], {'axis': '(1)'}), '(wavedatas, delta_t, axis=1)\n', (7071, 7099), True, 'import numpy as np\n'), ((8140, 8186), 'numpy.zeros', 'np.zeros', (['spherical_coefs.shape'], {'dtype': 'complex'}), '(spherical_coefs.shape, dtype=complex)\n', (8148, 8186), True, 'import numpy as np\n'), ((8916, 8940), 'numpy.abs', 'np.abs', (['spheroidal_coefs'], {}), '(spheroidal_coefs)\n', (8922, 8940), True, 'import numpy as np\n'), ((9026, 9062), 'numpy.min', 'np.min', (['[time[-1] - t_window, t_end]'], {}), '([time[-1] - t_window, t_end])\n', (9032, 9062), True, 'import numpy as np\n'), ((512, 553), 'qnm.modes_cache', 'qnm.modes_cache', ([], {'s': '(-2)', 'l': 'lprime', 'm': 'm', 'n': 'n'}), '(s=-2, l=lprime, m=m, n=n)\n', (527, 553), False, 'import qnm\n'), ((857, 898), 'qnm.modes_cache', 'qnm.modes_cache', ([], {'s': '(-2)', 'l': 'lprime', 'm': 'm', 'n': 'n'}), '(s=-2, l=lprime, m=m, n=n)\n', (872, 898), False, 'import qnm\n'), ((3148, 3216), 'os.path.exists', 'os.path.exists', (['f"""{cachedir}/prepare_system_{m}_{a:.4f}_{k_ell}.npy"""'], {}), "(f'{cachedir}/prepare_system_{m}_{a:.4f}_{k_ell}.npy')\n", (3162, 3216), False, 'import os\n'), ((3242, 3303), 'numpy.load', 'np.load', (['f"""{cachedir}/prepare_system_{m}_{a:.4f}_{k_ell}.npy"""'], {}), "(f'{cachedir}/prepare_system_{m}_{a:.4f}_{k_ell}.npy')\n", (3249, 3303), True, 'import numpy as np\n'), ((4495, 4526), 'numpy.exp', 'np.exp', (['(omegasystem * (t - t_0))'], {}), '(omegasystem * (t - t_0))\n', (4501, 4526), True, 'import numpy as np\n'), ((6383, 6402), 'numpy.array', 'np.array', (['wavedatas'], {}), '(wavedatas)\n', (6391, 6402), True, 'import numpy as np\n'), ((7109, 7148), 'numpy.vstack', 'np.vstack', (['(wavedatas, wavedatas_deriv)'], {}), '((wavedatas, wavedatas_deriv))\n', (7118, 7148), True, 'import numpy as np\n'), ((8317, 8359), 'numpy.linalg.solve', 'np.linalg.solve', (['alplm', 'spherical_coefs[t]'], {}), '(alplm, spherical_coefs[t])\n', (8332, 8359), True, 'import numpy as np\n'), ((8966, 8992), 'numpy.angle', 'np.angle', (['spheroidal_coefs'], {}), '(spheroidal_coefs)\n', (8974, 8992), True, 'import numpy as np\n'), ((9080, 9102), 'numpy.abs', 'np.abs', (['(time - t_start)'], {}), '(time - t_start)\n', (9086, 9102), True, 'import numpy as np\n'), ((9259, 9291), 'numpy.mean', 'np.mean', (['c_amps[mask, :]'], {'axis': '(0)'}), '(c_amps[mask, :], axis=0)\n', (9266, 9291), True, 'import numpy as np\n'), ((4092, 4166), 'numpy.save', 'np.save', (['f"""{cachedir}/prepare_system_{m}_{a:.4f}_{k_ell}.npy"""', 'alphasystem'], {}), "(f'{cachedir}/prepare_system_{m}_{a:.4f}_{k_ell}.npy', alphasystem)\n", (4099, 4166), True, 'import numpy as np\n'), ((1255, 1264), 'numpy.abs', 'np.abs', (['m'], {}), '(m)\n', (1261, 1264), True, 'import numpy as np\n'), ((1275, 1284), 'numpy.abs', 'np.abs', (['m'], {}), '(m)\n', (1281, 1284), True, 'import numpy as np\n'), ((1946, 1960), 'numpy.abs', 'np.abs', (['lprime'], {}), '(lprime)\n', (1952, 1960), True, 'import numpy as np\n'), ((2336, 2357), 'numpy.conjugate', 'np.conjugate', (['omega_j'], {}), '(omega_j)\n', (2348, 2357), True, 'import numpy as np\n'), ((4007, 4031), 'os.path.exists', 'os.path.exists', (['cachedir'], {}), '(cachedir)\n', (4021, 4031), False, 'import os\n'), ((4058, 4079), 'os.makedirs', 'os.makedirs', (['cachedir'], {}), '(cachedir)\n', (4069, 4079), False, 'import os\n'), ((8575, 8587), 'numpy.abs', 'np.abs', (['mval'], {}), '(mval)\n', (8581, 8587), True, 'import numpy as np\n'), ((9316, 9347), 'numpy.std', 'np.std', (['c_amps[mask, :]'], {'axis': '(0)'}), '(c_amps[mask, :], axis=0)\n', (9322, 9347), True, 'import numpy as np\n'), ((9504, 9538), 'numpy.mean', 'np.mean', (['c_phases[mask, :]'], {'axis': '(0)'}), '(c_phases[mask, :], axis=0)\n', (9511, 9538), True, 'import numpy as np\n'), ((2002, 2016), 'numpy.abs', 'np.abs', (['lprime'], {}), '(lprime)\n', (2008, 2016), True, 'import numpy as np\n'), ((2278, 2299), 'numpy.conjugate', 'np.conjugate', (['overlap'], {}), '(overlap)\n', (2290, 2299), True, 'import numpy as np\n'), ((1049, 1058), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (1055, 1058), True, 'import numpy as np\n'), ((6203, 6212), 'numpy.abs', 'np.abs', (['m'], {}), '(m)\n', (6209, 6212), True, 'import numpy as np\n'), ((6221, 6230), 'numpy.abs', 'np.abs', (['m'], {}), '(m)\n', (6227, 6230), True, 'import numpy as np\n')] |
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import sys
import time
import logging
import itertools
import collections
from copy import copy, deepcopy
import numpy as np
from matplotlib import pyplot as plt
try:
import pygraphviz as pgv
except ImportError:
pass
from indra.statements import *
from indra.databases import uniprot_client
logger = logging.getLogger('preassembler')
from matplotlib import pyplot as plt
import numpy as np
class Preassembler(object):
"""De-duplicates statements and arranges them in a specificity hierarchy.
Parameters
----------
hierarchies : dict[:py:class:`indra.preassembler.hierarchy_manager`]
A dictionary of hierarchies with keys such as 'entity' (hierarchy of
entities, primarily specifying relationships between genes and their
families) and 'modification' pointing to HierarchyManagers
stmts : list of :py:class:`indra.statements.Statement` or None
A set of statements to perform pre-assembly on. If None, statements
should be added using the :py:meth:`add_statements` method.
Attributes
----------
stmts : list of :py:class:`indra.statements.Statement`
Starting set of statements for preassembly.
unique_stmts : list of :py:class:`indra.statements.Statement`
Statements resulting from combining duplicates.
related_stmts : list of :py:class:`indra.statements.Statement`
Top-level statements after building the refinement hierarchy.
hierarchies : dict[:py:class:`indra.preassembler.hierarchy_manager`]
A dictionary of hierarchies with keys such as 'entity' and
'modification' pointing to HierarchyManagers
"""
def __init__(self, hierarchies, stmts=None):
self.hierarchies = hierarchies
if stmts:
self.stmts = deepcopy(stmts)
else:
self.stmts = []
self.unique_stmts = []
self.related_stmts = []
def add_statements(self, stmts):
"""Add to the current list of statements.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
Statements to add to the current list.
"""
self.stmts += deepcopy(stmts)
def combine_duplicates(self):
"""Combine duplicates among `stmts` and save result in `unique_stmts`.
A wrapper around the static method :py:meth:`combine_duplicate_stmts`.
"""
self.unique_stmts = self.combine_duplicate_stmts(self.stmts)
return self.unique_stmts
@staticmethod
def combine_duplicate_stmts(stmts):
"""Combine evidence from duplicate Statements.
Statements are deemed to be duplicates if they have the same key
returned by the `matches_key()` method of the Statement class. This
generally means that statements must be identical in terms of their
arguments and can differ only in their associated `Evidence` objects.
This function keeps the first instance of each set of duplicate
statements and merges the lists of Evidence from all of the other
statements.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
Set of statements to de-duplicate.
Returns
-------
list of :py:class:`indra.statements.Statement`
Unique statements with accumulated evidence across duplicates.
Examples
--------
De-duplicate and combine evidence for two statements differing only
in their evidence lists:
>>> map2k1 = Agent('MAP2K1')
>>> mapk1 = Agent('MAPK1')
>>> stmt1 = Phosphorylation(map2k1, mapk1, 'T', '185',
... evidence=[Evidence(text='evidence 1')])
>>> stmt2 = Phosphorylation(map2k1, mapk1, 'T', '185',
... evidence=[Evidence(text='evidence 2')])
>>> uniq_stmts = Preassembler.combine_duplicate_stmts([stmt1, stmt2])
>>> uniq_stmts
[Phosphorylation(MAP2K1(), MAPK1(), T, 185)]
>>> sorted([e.text for e in uniq_stmts[0].evidence]) # doctest:+IGNORE_UNICODE
['evidence 1', 'evidence 2']
"""
unique_stmts = []
# Remove exact duplicates using a set() call, then make copies:
st = list(deepcopy(set(stmts)))
# Group statements according to whether they are matches (differing
# only in their evidence).
# Sort the statements in place by matches_key()
st.sort(key=lambda x: x.matches_key())
for key, duplicates in itertools.groupby(st,
key=lambda x: x.matches_key()):
# Get the first statement and add the evidence of all subsequent
# Statements to it
for stmt_ix, stmt in enumerate(duplicates):
if stmt_ix == 0:
first_stmt = stmt
else:
first_stmt.evidence += stmt.evidence
# This should never be None or anything else
assert isinstance(first_stmt, Statement)
unique_stmts.append(first_stmt)
return unique_stmts
def combine_related(self, return_toplevel=True):
"""Connect related statements based on their refinement relationships.
This function takes as a starting point the unique statements (with
duplicates removed) and returns a modified flat list of statements
containing only those statements which do not represent a refinement of
other existing statements. In other words, the more general versions of
a given statement do not appear at the top level, but instead are
listed in the `supports` field of the top-level statements.
If :py:attr:`unique_stmts` has not been initialized with the
de-duplicated statements, :py:meth:`combine_duplicates` is called
internally.
After this function is called the attribute :py:attr:`related_stmts` is
set as a side-effect.
The procedure for combining statements in this way involves a series
of steps:
1. The statements are grouped by type (e.g., Phosphorylation) and
each type is iterated over independently.
2. Statements of the same type are then grouped according to their
Agents' entity hierarchy component identifiers. For instance,
ERK, MAPK1 and MAPK3 are all in the same connected component in the
entity hierarchy and therefore all Statements of the same type
referencing these entities will be grouped. This grouping assures
that relations are only possible within Statement groups and
not among groups. For two Statements to be in the same group at
this step, the Statements must be the same type and the Agents at
each position in the Agent lists must either be in the same
hierarchy component, or if they are not in the hierarchy, must have
identical entity_matches_keys. Statements with None in one of the
Agent list positions are collected separately at this stage.
3. Statements with None at either the first or second position are
iterated over. For a statement with a None as the first Agent,
the second Agent is examined; then the Statement with None is
added to all Statement groups with a corresponding component or
entity_matches_key in the second position. The same procedure is
performed for Statements with None at the second Agent position.
4. The statements within each group are then compared; if one
statement represents a refinement of the other (as defined by the
`refinement_of()` method implemented for the Statement), then the
more refined statement is added to the `supports` field of the more
general statement, and the more general statement is added to the
`supported_by` field of the more refined statement.
5. A new flat list of statements is created that contains only those
statements that have no `supports` entries (statements containing
such entries are not eliminated, because they will be retrievable
from the `supported_by` fields of other statements). This list
is returned to the caller.
.. note:: Subfamily relationships must be consistent across arguments
For now, we require that merges can only occur if the *isa*
relationships are all in the *same direction for all the agents* in
a Statement. For example, the two statement groups: `RAF_family ->
MEK1` and `BRAF -> MEK_family` would not be merged, since BRAF
*isa* RAF_family, but MEK_family is not a MEK1. In the future this
restriction could be revisited.
Parameters
----------
return_toplevel : bool
If True only the top level statements are returned.
If False, all statements are returned. Default: True
Returns
-------
list of :py:class:`indra.statement.Statement`
The returned list contains Statements representing the more
concrete/refined versions of the Statements involving particular
entities. The attribute :py:attr:`related_stmts` is also set to
this list. However, if return_toplevel is False then all
statements are returned, irrespective of level of specificity.
In this case the relationships between statements can
be accessed via the supports/supported_by attributes.
Examples
--------
A more general statement with no information about a Phosphorylation
site is identified as supporting a more specific statement:
>>> from indra.preassembler.hierarchy_manager import hierarchies
>>> braf = Agent('BRAF')
>>> map2k1 = Agent('MAP2K1')
>>> st1 = Phosphorylation(braf, map2k1)
>>> st2 = Phosphorylation(braf, map2k1, residue='S')
>>> pa = Preassembler(hierarchies, [st1, st2])
>>> combined_stmts = pa.combine_related() # doctest:+ELLIPSIS
>>> combined_stmts
[Phosphorylation(BRAF(), MAP2K1(), S)]
>>> combined_stmts[0].supported_by
[Phosphorylation(BRAF(), MAP2K1())]
>>> combined_stmts[0].supported_by[0].supports
[Phosphorylation(BRAF(), MAP2K1(), S)]
"""
# If unique_stmts is not initialized, call combine_duplicates.
if not self.unique_stmts:
self.combine_duplicates()
unique_stmts = deepcopy(self.unique_stmts)
eh = self.hierarchies['entity']
# Make a list of Statement types
stmts_by_type = collections.defaultdict(lambda: [])
for stmt in unique_stmts:
stmts_by_type[type(stmt)].append(stmt)
group_sizes = []
largest_group = None
largest_group_size = 0
num_stmts = len(unique_stmts)
related_stmts = []
# Each Statement type can be preassembled independently
for stmt_type, stmts_this_type in stmts_by_type.items():
logger.info('Preassembling %s (%s)' %
(stmt_type.__name__, len(stmts_this_type)))
# Dict of stmt group key tuples, indexed by their first Agent
stmt_by_first = collections.defaultdict(lambda: [])
# Dict of stmt group key tuples, indexed by their second Agent
stmt_by_second = collections.defaultdict(lambda: [])
# Dict of statements with None first, with second Agent as keys
none_first = collections.defaultdict(lambda: [])
# Dict of statements with None second, with first Agent as keys
none_second = collections.defaultdict(lambda: [])
# The dict of all statement groups, with tuples of components
# or entity_matches_keys as keys
stmt_by_group = collections.defaultdict(lambda: [])
# Iterate over the Statements and build the entity key tuples
# (hierarchy graph components or entity_matches_keys)
# used to group them
for stmt in stmts_this_type:
entities = []
for i, a in enumerate(stmt.agent_list()):
# Entity is None: add the None to the entities list
if a is None and stmt_type != Complex:
entities.append(a)
continue
# Entity is not None, but could be ungrounded or not
# in a family
else:
a_ns, a_id = a.get_grounding()
# No grounding available--in this case, use the
# entity_matches_key
if a_ns is None or a_id is None:
entities.append(a.entity_matches_key())
continue
# We have grounding, now check for a component ID
uri = eh.get_uri(a_ns, a_id)
# This is the component ID corresponding to the agent
# in the entity hierarchy
component = eh.components.get(uri)
# If no component ID, use the entity_matches_key()
if component is None:
entities.append(a.entity_matches_key())
# Component ID, so this is in a family
else:
# We turn the component ID into a string so that
# we can sort it alphabetically along with
# entity_matches_keys for Complexes
entities.append(str(component))
# At this point we have an entity list for the Statement.
# If we're dealing with Complexes, sort the entities and use
# the sorted list as the stmt_by_group dict key
if stmt_type == Complex:
# There shouldn't be any statements of the type
# e.g., Complex([Foo, None, Bar])
assert None not in entities
assert len(entities) > 0
entities.sort()
key = tuple(entities)
if stmt not in stmt_by_group[key]:
stmt_by_group[key].append(stmt)
# Now look at all other statement types
# All other statements will have one or two entities
elif len(entities) == 1:
# If only one entity, we only need the one key.
# It should not be None!
assert None not in entities
key = tuple(entities)
if stmt not in stmt_by_group[key]:
stmt_by_group[key].append(stmt)
else:
# Make sure we only have two entities, and they are not both
# None
key = tuple(entities)
assert len(key) == 2
assert key != (None, None)
# First agent is None; add the statements to the
# none_first dict, indexed by the 2nd entity
if key[0] is None and stmt not in none_first[key[1]]:
none_first[key[1]].append(stmt)
# Second agent is None; add the the statements to the
# none_second dict, indexed by the 1st entity
elif key[1] is None and stmt not in none_second[key[0]]:
none_second[key[0]].append(stmt)
# Neither entity is None! Add the statement to the
# stmt_by_group dict, and add the key to the corresponding
# list of keys in the stmt_by_first and stmt_by_second
# lists.
elif None not in key:
if stmt not in stmt_by_group[key]:
stmt_by_group[key].append(stmt)
if key not in stmt_by_first[key[0]]:
stmt_by_first[key[0]].append(key)
if key not in stmt_by_second[key[1]]:
stmt_by_second[key[1]].append(key)
# When we've gotten here, we should have stmt_by_group entries, and
# we may or may not have stmt_by_first/second and none_first/second
# dicts filled out (we'll only have them for Statement types that
# are not Complex and that have two Agents as arguments.
if none_first:
# Get the keys associated with stmts having a None first
# argument
for second_arg, stmts in none_first.items():
# Look for any statement group keys having this second arg
second_arg_keys = stmt_by_second[second_arg]
# If there are no more specific statements matching this
# set of statements with a None first arg, then the
# statements with the None first arg deserve to be in
# their own group.
if not second_arg_keys:
stmt_by_group[(None, second_arg)] = stmts
# On the other hand, if there are statements with a matching
# second arg component, we need to add the None first
# statements to all groups with the matching second arg
for second_arg_key in second_arg_keys:
stmt_by_group[second_arg_key] += stmts
# Now do the corresponding steps for the statements with None as the
# second argument:
if none_second:
for first_arg, stmts in none_second.items():
first_arg_keys = stmt_by_first[first_arg]
if not first_arg_keys:
stmt_by_group[(first_arg, None)] = stmts
for first_arg_key in first_arg_keys:
stmt_by_group[first_arg_key] += stmts
# Now, set supports/supported_by relationships!
# Keep track of the largest group size for debugging purposes.
logger.debug('Preassembling %d components' % (len(stmt_by_group)))
for key, stmts in stmt_by_group.items():
if len(stmts) > largest_group_size:
largest_group_size = len(stmts)
largest_group = (key, stmts[0:10])
group_sizes.append(len(stmts))
for stmt1, stmt2 in itertools.combinations(stmts, 2):
self._set_supports(stmt1, stmt2)
# Collect top level statements
toplevel_stmts = [st for st in stmts_this_type if not st.supports]
logger.debug('%d top level' % len(toplevel_stmts))
related_stmts += toplevel_stmts
# Log some stats for debugging purposes
total_comps = 0
for g in group_sizes:
total_comps += g ** 2
logger.debug("Total comparisons: %s" % total_comps)
if group_sizes:
logger.debug("Max group size: %s" % np.max(group_sizes))
logger.debug("(%.1f %% of all comparisons)" %
(100 * ((np.max(group_sizes) ** 2) / float(total_comps))))
self.related_stmts = related_stmts
if return_toplevel:
return self.related_stmts
else:
return unique_stmts
def _set_supports(self, stmt1, stmt2):
if (stmt2 not in stmt1.supported_by) and \
stmt1.refinement_of(stmt2, self.hierarchies):
stmt1.supported_by.append(stmt2)
stmt2.supports.append(stmt1)
elif (stmt1 not in stmt2.supported_by) and \
stmt2.refinement_of(stmt1, self.hierarchies):
stmt2.supported_by.append(stmt1)
stmt1.supports.append(stmt2)
def render_stmt_graph(statements, agent_style=None):
"""Render the statement hierarchy as a pygraphviz graph.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
A list of top-level statements with associated supporting statements
resulting from building a statement hierarchy with
:py:meth:`combine_related`.
agent_style : dict or None
Dict of attributes specifying the visual properties of nodes. If None,
the following default attributes are used::
agent_style = {'color': 'lightgray', 'style': 'filled',
'fontname': 'arial'}
Returns
-------
pygraphviz.AGraph
Pygraphviz graph with nodes representing statements and edges pointing
from supported statements to supported_by statements.
Examples
--------
Pattern for getting statements and rendering as a Graphviz graph:
>>> from indra.preassembler.hierarchy_manager import hierarchies
>>> braf = Agent('BRAF')
>>> map2k1 = Agent('MAP2K1')
>>> st1 = Phosphorylation(braf, map2k1)
>>> st2 = Phosphorylation(braf, map2k1, residue='S')
>>> pa = Preassembler(hierarchies, [st1, st2])
>>> pa.combine_related() # doctest:+ELLIPSIS
[Phosphorylation(BRAF(), MAP2K1(), S)]
>>> graph = render_stmt_graph(pa.related_stmts)
>>> graph.write('example_graph.dot') # To make the DOT file
>>> graph.draw('example_graph.png', prog='dot') # To make an image
Resulting graph:
.. image:: /images/example_graph.png
:align: center
:alt: Example statement graph rendered by Graphviz
"""
# Set the default agent formatting properties
if agent_style is None:
agent_style = {'color': 'lightgray', 'style': 'filled',
'fontname': 'arial'}
# Sets to store all of the nodes and edges as we recursively process all
# of the statements
nodes = set([])
edges = set([])
# Recursive function for processing all statements
def process_stmt(stmt):
nodes.add(stmt)
for sby_ix, sby_stmt in enumerate(stmt.supported_by):
edges.add((str(stmt.matches_key()), str(sby_stmt.matches_key())))
process_stmt(sby_stmt)
# Process all of the top-level statements, getting the supporting statements
# recursively
for stmt in statements:
process_stmt(stmt)
# Add the nodes and edges to the graph
try:
graph = pgv.AGraph(name='statements', directed=True, rankdir='LR')
except NameError:
logger.error('Cannot generate graph because '
'pygraphviz could not be imported.')
return None
for node in nodes:
graph.add_node(str(node.matches_key()), label=str(node), **agent_style)
graph.add_edges_from(edges)
return graph
def flatten_stmts(stmts):
"""Return the full set of unique stms in a pre-assembled stmt graph.
The flattened list of of statements returned by this function can be
compared to the original set of unique statements to make sure no
statements have been lost during the preassembly process.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
A list of top-level statements with associated supporting statements
resulting from building a statement hierarchy with
:py:meth:`combine_related`.
Returns
-------
stmts : list of :py:class:`indra.statements.Statement`
List of all statements contained in the hierarchical statement graph.
Examples
--------
Calling :py:meth:`combine_related` on two statements results in one
top-level statement; calling :py:func:`flatten_stmts` recovers both:
>>> from indra.preassembler.hierarchy_manager import hierarchies
>>> braf = Agent('BRAF')
>>> map2k1 = Agent('MAP2K1')
>>> st1 = Phosphorylation(braf, map2k1)
>>> st2 = Phosphorylation(braf, map2k1, residue='S')
>>> pa = Preassembler(hierarchies, [st1, st2])
>>> pa.combine_related() # doctest:+ELLIPSIS
[Phosphorylation(BRAF(), MAP2K1(), S)]
>>> flattened = flatten_stmts(pa.related_stmts)
>>> flattened.sort(key=lambda x: x.matches_key())
>>> flattened
[Phosphorylation(BRAF(), MAP2K1()), Phosphorylation(BRAF(), MAP2K1(), S)]
"""
total_stmts = set(stmts)
for stmt in stmts:
if stmt.supported_by:
children = flatten_stmts(stmt.supported_by)
total_stmts = total_stmts.union(children)
return list(total_stmts)
def _flatten_evidence_for_stmt(stmt):
total_evidence = set(stmt.evidence)
for supp_stmt in stmt.supported_by:
child_evidence = _flatten_evidence_for_stmt(supp_stmt)
total_evidence = total_evidence.union(child_evidence)
return list(total_evidence)
def flatten_evidence(stmts):
"""Add evidence from *supporting* stmts to evidence for *supported* stmts.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
A list of top-level statements with associated supporting statements
resulting from building a statement hierarchy with
:py:meth:`combine_related`.
Returns
-------
stmts : list of :py:class:`indra.statements.Statement`
Statement hierarchy identical to the one passed, but with the
evidence lists for each statement now containing all of the evidence
associated with the statements they are supported by.
Examples
--------
Flattening evidence adds the two pieces of evidence from the supporting
statement to the evidence list of the top-level statement:
>>> from indra.preassembler.hierarchy_manager import hierarchies
>>> braf = Agent('BRAF')
>>> map2k1 = Agent('MAP2K1')
>>> st1 = Phosphorylation(braf, map2k1,
... evidence=[Evidence(text='foo'), Evidence(text='bar')])
>>> st2 = Phosphorylation(braf, map2k1, residue='S',
... evidence=[Evidence(text='baz'), Evidence(text='bak')])
>>> pa = Preassembler(hierarchies, [st1, st2])
>>> pa.combine_related() # doctest:+ELLIPSIS
[Phosphorylation(BRAF(), MAP2K1(), S)]
>>> [e.text for e in pa.related_stmts[0].evidence] # doctest:+IGNORE_UNICODE
['baz', 'bak']
>>> flattened = flatten_evidence(pa.related_stmts)
>>> sorted([e.text for e in flattened[0].evidence]) # doctest:+IGNORE_UNICODE
['bak', 'bar', 'baz', 'foo']
"""
# Copy all of the statements--these will be the ones where we update
# the evidence lists
copied_stmts = deepcopy(stmts)
for stmt in stmts:
total_evidence = _flatten_evidence_for_stmt(stmt)
stmt.evidence = total_evidence
return stmts
| [
"copy.deepcopy",
"collections.defaultdict",
"pygraphviz.AGraph",
"itertools.combinations",
"numpy.max",
"builtins.str",
"logging.getLogger"
] | [((413, 446), 'logging.getLogger', 'logging.getLogger', (['"""preassembler"""'], {}), "('preassembler')\n", (430, 446), False, 'import logging\n'), ((26834, 26849), 'copy.deepcopy', 'deepcopy', (['stmts'], {}), '(stmts)\n', (26842, 26849), False, 'from copy import copy, deepcopy\n'), ((2276, 2291), 'copy.deepcopy', 'deepcopy', (['stmts'], {}), '(stmts)\n', (2284, 2291), False, 'from copy import copy, deepcopy\n'), ((10734, 10761), 'copy.deepcopy', 'deepcopy', (['self.unique_stmts'], {}), '(self.unique_stmts)\n', (10742, 10761), False, 'from copy import copy, deepcopy\n'), ((10867, 10903), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (10890, 10903), False, 'import collections\n'), ((22754, 22812), 'pygraphviz.AGraph', 'pgv.AGraph', ([], {'name': '"""statements"""', 'directed': '(True)', 'rankdir': '"""LR"""'}), "(name='statements', directed=True, rankdir='LR')\n", (22764, 22812), True, 'import pygraphviz as pgv\n'), ((1880, 1895), 'copy.deepcopy', 'deepcopy', (['stmts'], {}), '(stmts)\n', (1888, 1895), False, 'from copy import copy, deepcopy\n'), ((11488, 11524), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (11511, 11524), False, 'import collections\n'), ((11628, 11664), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (11651, 11664), False, 'import collections\n'), ((11765, 11801), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (11788, 11801), False, 'import collections\n'), ((11903, 11939), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (11926, 11939), False, 'import collections\n'), ((12086, 12122), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (12109, 12122), False, 'import collections\n'), ((18930, 18962), 'itertools.combinations', 'itertools.combinations', (['stmts', '(2)'], {}), '(stmts, 2)\n', (18952, 18962), False, 'import itertools\n'), ((23044, 23053), 'builtins.str', 'str', (['node'], {}), '(node)\n', (23047, 23053), False, 'from builtins import dict, str\n'), ((19515, 19534), 'numpy.max', 'np.max', (['group_sizes'], {}), '(group_sizes)\n', (19521, 19534), True, 'import numpy as np\n'), ((13950, 13964), 'builtins.str', 'str', (['component'], {}), '(component)\n', (13953, 13964), False, 'from builtins import dict, str\n'), ((19621, 19640), 'numpy.max', 'np.max', (['group_sizes'], {}), '(group_sizes)\n', (19627, 19640), True, 'import numpy as np\n')] |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
##############export checkpoint file into mindir model#################
python export.py
"""
import argparse
import os
import numpy as np
from mindspore import Tensor
from mindspore import export, load_checkpoint, load_param_into_net
from src.config import lstm_cfg as cfg
from src.lstm import SentimentNet
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='MindSpore LSTM Exporter')
parser.add_argument('--preprocess_path', type=str, default='./preprocess',
help='path where the pre-process data is stored.')
parser.add_argument('--ckpt_file', type=str, required=True, help='lstm ckpt file.')
args = parser.parse_args()
embedding_table = np.loadtxt(os.path.join(args.preprocess_path, "weight.txt")).astype(np.float32)
network = SentimentNet(vocab_size=embedding_table.shape[0],
embed_size=cfg.embed_size,
num_hiddens=cfg.num_hiddens,
num_layers=cfg.num_layers,
bidirectional=cfg.bidirectional,
num_classes=cfg.num_classes,
weight=Tensor(embedding_table),
batch_size=cfg.batch_size)
param_dict = load_checkpoint(args.ckpt_file)
load_param_into_net(network, param_dict)
input_arr = Tensor(np.random.uniform(0.0, 1e5, size=[64, 500]).astype(np.int32))
export(network, input_arr, file_name="lstm", file_format="MINDIR")
| [
"numpy.random.uniform",
"mindspore.export",
"argparse.ArgumentParser",
"mindspore.load_param_into_net",
"mindspore.load_checkpoint",
"mindspore.Tensor",
"os.path.join"
] | [((1021, 1083), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""MindSpore LSTM Exporter"""'}), "(description='MindSpore LSTM Exporter')\n", (1044, 1083), False, 'import argparse\n'), ((1935, 1966), 'mindspore.load_checkpoint', 'load_checkpoint', (['args.ckpt_file'], {}), '(args.ckpt_file)\n', (1950, 1966), False, 'from mindspore import export, load_checkpoint, load_param_into_net\n'), ((1971, 2011), 'mindspore.load_param_into_net', 'load_param_into_net', (['network', 'param_dict'], {}), '(network, param_dict)\n', (1990, 2011), False, 'from mindspore import export, load_checkpoint, load_param_into_net\n'), ((2102, 2168), 'mindspore.export', 'export', (['network', 'input_arr'], {'file_name': '"""lstm"""', 'file_format': '"""MINDIR"""'}), "(network, input_arr, file_name='lstm', file_format='MINDIR')\n", (2108, 2168), False, 'from mindspore import export, load_checkpoint, load_param_into_net\n'), ((1838, 1861), 'mindspore.Tensor', 'Tensor', (['embedding_table'], {}), '(embedding_table)\n', (1844, 1861), False, 'from mindspore import Tensor\n'), ((1391, 1439), 'os.path.join', 'os.path.join', (['args.preprocess_path', '"""weight.txt"""'], {}), "(args.preprocess_path, 'weight.txt')\n", (1403, 1439), False, 'import os\n'), ((2036, 2084), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(100000.0)'], {'size': '[64, 500]'}), '(0.0, 100000.0, size=[64, 500])\n', (2053, 2084), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""
Authors: <NAME>, <NAME>
Functionality implemented:
- Scraper that retrieves conversations from multiple online web sources
- Formats and outputs conversations in a Pandas table
"""
# Libraries and Dependencies
import demoji
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import StaleElementReferenceException
from headlines_scraper import create_array
from pathlib import Path
from selenium.webdriver.chrome.service import Service
import time
# Setup
demoji.download_codes()
def get_yahoo_conversations(stock):
"""
Parses yahoo finance conversations page to get conversations related to the stock.
"""
stock = stock.replace('.', '-') # In case a stock contains ".", EX: BRK.B
url = "https://finance.yahoo.com/quote/" + stock + "/community?p=" + stock
# Selenium Web Driver to click load more button and continue to retrieve conversation
option = webdriver.ChromeOptions()
option.add_argument('headless') # Runs without opening browser
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()), options=option)
# Attempt to scroll as much as possible
driver.get(url)
ignored_exceptions = (NoSuchElementException, StaleElementReferenceException)
i = 0
while i < 5:
try:
WebDriverWait(driver, 5, ignored_exceptions).until(
EC.element_to_be_clickable((By.XPATH, '//*[@id="canvass-0-CanvassApplet"]/div/button')))
element = driver.find_element(By.XPATH, '//*[@id="canvass-0-CanvassApplet"]/div/button')
driver.execute_script("arguments[0].click();", element)
except Exception as e:
print("Error on iteration", i, "- Exception:", e)
i += 1
# Retrieving soup after load more button is clicked
soup = BeautifulSoup(driver.page_source, 'lxml')
driver.quit()
return create_array(soup.find_all('div', class_='C($c-fuji-grey-l) Mb(2px) Fz(14px) Lh(20px) Pend(8px)'))
def get_all_conversations(stock):
"""
Gets conversations from various sources, concatenates the arrays of conversations, cleans up the text and returns
the overall array.
:param stock: Name of stock ticker.
:return: Overall array of conversations from various sources after cleaning (Removal of punctuations).
"""
yahoo_conversations = get_yahoo_conversations(stock)
yahoo_conversations = np.array(yahoo_conversations)
if len(yahoo_conversations) == 0:
return []
return list(np.concatenate(yahoo_conversations, axis=None))
def output(overall_data, stock):
"""
Prints out the pandas dataframe after removing duplicates.
:param overall_data: Array of headlines/conversations after retrieving from respective web sources, in text form.
:param stock: Name of the stock for which all the above data is being retrieved.
:return None.
"""
# Removes duplicates by first converting to hash set (Stores only unique values), then converts back to list
overall_data = list(set(overall_data))
file_path = str(Path(__file__).resolve().parents[1]) + '/Conversations/' + stock.upper() + '_conversations.csv'
if len(overall_data) > 0:
# Formatting current dataframe, merging with previously existing (if it exists)
title = 'Conversation'
overall_dataframe = pd.DataFrame(overall_data, columns=[title])
overall_dataframe[title] = overall_dataframe[title].apply(demoji.replace)
overall_dataframe.to_csv(file_path, index=False)
else:
print("Invalid ticker/company or no headlines/conversations available.")
def main():
# Tickers and companies
stocks_df = pd.read_csv("../companies.csv")
stocks_dict = {}
for index, row in stocks_df.iterrows():
stocks_dict.update(
{row["Symbol"]: row["Company"]}
)
tickers = list(stocks_dict.keys())
for stock in tickers:
print("\n\n===================================================================")
print("Getting conversations for:", stock)
try:
overall_conversations = get_all_conversations(stock)
output(overall_conversations, stock)
except RuntimeError as e:
print(e, "was handled")
if __name__ == "__main__":
main()
| [
"pandas.DataFrame",
"selenium.webdriver.support.expected_conditions.element_to_be_clickable",
"pandas.read_csv",
"demoji.download_codes",
"pathlib.Path",
"selenium.webdriver.ChromeOptions",
"numpy.array",
"webdriver_manager.chrome.ChromeDriverManager",
"bs4.BeautifulSoup",
"selenium.webdriver.supp... | [((850, 873), 'demoji.download_codes', 'demoji.download_codes', ([], {}), '()\n', (871, 873), False, 'import demoji\n'), ((1277, 1302), 'selenium.webdriver.ChromeOptions', 'webdriver.ChromeOptions', ([], {}), '()\n', (1300, 1302), False, 'from selenium import webdriver\n'), ((2169, 2210), 'bs4.BeautifulSoup', 'BeautifulSoup', (['driver.page_source', '"""lxml"""'], {}), "(driver.page_source, 'lxml')\n", (2182, 2210), False, 'from bs4 import BeautifulSoup\n'), ((2763, 2792), 'numpy.array', 'np.array', (['yahoo_conversations'], {}), '(yahoo_conversations)\n', (2771, 2792), True, 'import numpy as np\n'), ((4032, 4063), 'pandas.read_csv', 'pd.read_csv', (['"""../companies.csv"""'], {}), "('../companies.csv')\n", (4043, 4063), True, 'import pandas as pd\n'), ((2867, 2913), 'numpy.concatenate', 'np.concatenate', (['yahoo_conversations'], {'axis': 'None'}), '(yahoo_conversations, axis=None)\n', (2881, 2913), True, 'import numpy as np\n'), ((3700, 3743), 'pandas.DataFrame', 'pd.DataFrame', (['overall_data'], {'columns': '[title]'}), '(overall_data, columns=[title])\n', (3712, 3743), True, 'import pandas as pd\n'), ((1734, 1825), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'EC.element_to_be_clickable', (['(By.XPATH, \'//*[@id="canvass-0-CanvassApplet"]/div/button\')'], {}), '((By.XPATH,\n \'//*[@id="canvass-0-CanvassApplet"]/div/button\'))\n', (1760, 1825), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((1666, 1710), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', '(5)', 'ignored_exceptions'], {}), '(driver, 5, ignored_exceptions)\n', (1679, 1710), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((1417, 1438), 'webdriver_manager.chrome.ChromeDriverManager', 'ChromeDriverManager', ([], {}), '()\n', (1436, 1438), False, 'from webdriver_manager.chrome import ChromeDriverManager\n'), ((3426, 3440), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (3430, 3440), False, 'from pathlib import Path\n')] |
import numpy as np
import cv2
import time
from skimage.measure import find_contours,approximate_polygon
import math
from intervaltree import Interval, IntervalTree # to select not overlapping edges
import numpy_opencv
def load_params():
params={'Kalib': np.array([[570.34,0,319.5],[0, 570.34, 239.5], [0, 0, 1]]),
'width': 640,
'height': 512,
'ceiling_height': 2.8,
'ground_label_id':1,
'wall_label_id':0,
'polyline_tolerance': 15,
'downsample_popup': 0,
'erosion_distance':11,
'dilation_distance':11,
'plot_ground_polys': 0,
'pre_vertical_thre':15,
'pre_minium_len': 15,
'pre_boundary_thre':5,
'pre_merge_angle_thre':10,
'pre_merge_dist_thre':10,
'pre_proj_angle_thre':20,
'pre_proj_cover_thre':0.6,
'pre_proj_cover_large_thre':0.8,
'pre_proj_dist_thre':100,
'pre_contour_close_thre':50,
'interval_overlap_thre':20,
'post_short_thre':30,
'post_bind_dist_thre':10,
'post_merge_dist_thre':20,
'post_merge_angle_thre':10,
'post_extend_thre':15,
'dark_threshold':90
}
params['invK'] = np.linalg.inv(params['Kalib'])
return params
def get_intersect_len_tree(query, interval_tree):
'''get the total intersection length of a query segment with a interval tree storing all the existing segments
query 1*2 array [small, large], must be sorted. if not sorted, need to manually sort here
'''
all_intersect_length=0
all_intervals=sorted(interval_tree.search(query[0],query[1]))
if (len(all_intervals)==0): # if not found any intervals
return all_intersect_length
else:
for i in xrange(len(all_intervals)):
overlap1=query[1]- all_intervals[0].begin;
overlap2=all_intervals[0].end-query[0];
intersect_length=min(overlap1, overlap2, abs(query[1]-query[0]), all_intervals[0].length())
all_intersect_length = all_intersect_length+intersect_length
return all_intersect_length
def get_approxi_polys(label_map,polyline_tolerance):
'''ICRA 2016 function, C++ call python function to find ground polygon label_map: ground is 0, wall is 1. it is preprocessed label map (already erosion
dilation... just want to call python approximate polygon)
'''
contours=find_contours(label_map, 0) # row ind, col ind y x
if contours !=[]:
# there might be several contours, find the longest one
contour_lengths=np.zeros(len(contours))
for contour_ind in xrange(len(contours)):
bg_end_diff=contours[contour_ind][0,:]-contours[contour_ind][-1,:]
length=np.sum(np.linalg.norm(bg_end_diff))
contour_lengths[contour_ind]=length
ground_contour_ind=np.argmax(contour_lengths)
ground_contour=contours[ground_contour_ind].copy() # whether swap x and y
ground_contour[:,0]=contours[ground_contour_ind][:,1]
ground_contour[:,1]=contours[ground_contour_ind][:,0]
# piecewise poly line simplification # each row is a vertex's coordinate x and y
ground2d=approximate_polygon(ground_contour, polyline_tolerance)
ground2d=ground2d[::-1,:] # reverse the order
return ground2d
def python_get_contours(label_map):
'''
label_map: ground is 0, wall is 1
'''
contours=find_contours(label_map, 0) # row ind, col ind y x
final_line_segs=np.array([]);final_extend_segs=np.array([]);final_lines_in_extend_ind=np.array([]);
if contours !=[]:
# there might be several contours, find the longest one
contour_lengths=np.zeros(len(contours))
for contour_ind in xrange(len(contours)):
bg_end_diff=contours[contour_ind][0,:]-contours[contour_ind][-1,:]
length=np.sum(np.linalg.norm(bg_end_diff))
contour_lengths[contour_ind]=length
ground_contour_ind=np.argmax(contour_lengths)
ground_contour=contours[ground_contour_ind].copy() # whether swap x and y
ground_contour[:,0]=contours[ground_contour_ind][:,1]
ground_contour[:,1]=contours[ground_contour_ind][:,0]
return ground_contour[0:-1:20]
def interval_tree_optimization(close_nonvertical_lines, params):
''' using optimization to select the best set of edges which maximize union in x direction while minimizing
intersection in x direction. use a greedy selection method
lines n*4 lines [x1 y1 x2 y2] output is m*4 remaining lines. there won't be any overlapping between edges
'''
optimized_line_segs=[] # each row is x1,y1,x2,y2
if (close_nonvertical_lines.shape[0]>0):
closed_list=np.empty((0,1),int) # already visited actual index
open_list=np.arange(close_nonvertical_lines.shape[0]) # remaining to visit actual index
# initially select the longest line TODO. may be a mix of length, x_duration
close_nonvertical_lengths=np.linalg.norm(close_nonvertical_lines[:,2:4]-close_nonvertical_lines[:,0:2],axis=1)
best_ind=np.argmax(close_nonvertical_lengths[open_list])
current_node=open_list[best_ind] # actual index of raw lines (close_nonvertical_lines)
open_list=np.delete(open_list,best_ind)
closed_list=np.append(closed_list,current_node)
x_cover_range=np.array(close_nonvertical_lines[current_node][0:3:2]) #all the line x range covered n*2 array
x_cover_range=x_cover_range.reshape(1,2)
x_cover_tree=IntervalTree()
x_cover_tree[close_nonvertical_lines[current_node][0]:close_nonvertical_lines[current_node][2]] = (close_nonvertical_lines[current_node,:]).tolist()
# each row is a segment interval
while (open_list.size!=0):
# find all not overlapping or little overlapping lines. then select the longest
potential_open_ind=np.empty((0,1),int) # index of openlist
for i in range(open_list.size):
inter_length=get_intersect_len_tree(close_nonvertical_lines[open_list[i]][0:3:2],x_cover_tree)
if (inter_length<params['interval_overlap_thre']): # if very small overlapping
potential_open_ind=np.append(potential_open_ind,i)
if potential_open_ind.size==0:
break; # cannot find potential list
best_potentia_ind=np.argmax(close_nonvertical_lengths[open_list[potential_open_ind]])
current_node=open_list[potential_open_ind[best_potentia_ind]]
closed_list=np.append(closed_list, current_node) # push into visited node
open_list=np.delete(open_list,potential_open_ind[best_potentia_ind]) # delete from open list
x_cover_tree[close_nonvertical_lines[current_node][0]:close_nonvertical_lines[current_node][2]] = (close_nonvertical_lines[current_node,:]).tolist()
# handle all the overlapping areas. use the longer edge. or you can also manually choose the mean point
# after this, there won't be any overlapping in x direction note!!! this will increase the number of edges
raw_seg=len(x_cover_tree)
x_cover_tree.split_overlaps() # break into many small edges if they overlap
needs_post_processing=(len(x_cover_tree)!=raw_seg)
if (needs_post_processing): # if there exist overlap
all_intervals=sorted(x_cover_tree)
to_delete_intervals=[] # to remove short intervals
for i in range(len(all_intervals)):
for j in range(i+1, len(all_intervals)):
if ( (all_intervals[i].begin==all_intervals[j].begin) & (all_intervals[i].end==all_intervals[j].end) ):
if ( (all_intervals[i].data[2]-all_intervals[i].data[0]) < (all_intervals[j].data[2]-all_intervals[j].data[0]) ):
to_delete_intervals.append(all_intervals[i]) # TODO or remove the longer lines
else:
to_delete_intervals.append(all_intervals[j])
to_delete_intervals=list(set(to_delete_intervals)) # remove duplicate terms, avoid being deleted twice
for k in range(len(to_delete_intervals)):
x_cover_tree.remove(to_delete_intervals[k])
# merge the intervals if there are originally in the same line (share the same interval data) and currently adjacent
if (needs_post_processing):
can_merge=1;
counter=0;
while ((can_merge==1)& (counter<100)):
can_merge=0;
counter=counter+1;
all_intervals=sorted(x_cover_tree)
for i in range(len(all_intervals)):
for j in range(i+1, len(all_intervals)):
# if there are ajacent, and original come from the same line, then, merge them
if ( ((all_intervals[i].begin==all_intervals[j].end) | (all_intervals[i].end==all_intervals[j].begin)) & (all_intervals[i].data==all_intervals[j].data) ):
merge_min=min(all_intervals[i].begin,all_intervals[j].begin);
merge_max=max(all_intervals[i].end,all_intervals[j].end);
merge_data=all_intervals[j].data
x_cover_tree.remove(all_intervals[i])
x_cover_tree.remove(all_intervals[j])
x_cover_tree.addi(merge_min,merge_max,merge_data)
can_merge=1;
break;
if (can_merge==1):
break;
# refine the remained ones, so that all the intervals data (line boundaries) matches the begin/end point of interval tree
# done of interval tree optimization part
all_intervals=sorted(x_cover_tree) # all_intervals is linked with the tree, share the same memory
if (needs_post_processing):
for i in range(len(all_intervals)):
if ( (all_intervals[i].begin!=all_intervals[i].data[0]) | (all_intervals[i].end!=all_intervals[i].data[2])):
raw_line=all_intervals[i].data
frac1=float((all_intervals[i].begin-raw_line[0]))/(raw_line[2]-raw_line[0])
frac2=float((all_intervals[i].end-raw_line[0]))/(raw_line[2]-raw_line[0])
optimized_line_segs.append([all_intervals[i].begin, int(frac1*(raw_line[3]-raw_line[1])+raw_line[1]), all_intervals[i].end, int(frac2*(raw_line[3]-raw_line[1])+raw_line[1])])
else:
optimized_line_segs.append(all_intervals[i].data)
else:
for i in range(len(all_intervals)):
optimized_line_segs.append(all_intervals[i].data)
optimized_line_segs=np.array(optimized_line_segs)
return optimized_line_segs | [
"numpy.delete",
"numpy.argmax",
"numpy.empty",
"numpy.append",
"numpy.array",
"numpy.linalg.inv",
"skimage.measure.find_contours",
"numpy.arange",
"numpy.linalg.norm",
"intervaltree.IntervalTree",
"skimage.measure.approximate_polygon"
] | [((1365, 1395), 'numpy.linalg.inv', 'np.linalg.inv', (["params['Kalib']"], {}), "(params['Kalib'])\n", (1378, 1395), True, 'import numpy as np\n'), ((2562, 2589), 'skimage.measure.find_contours', 'find_contours', (['label_map', '(0)'], {}), '(label_map, 0)\n', (2575, 2589), False, 'from skimage.measure import find_contours, approximate_polygon\n'), ((3592, 3619), 'skimage.measure.find_contours', 'find_contours', (['label_map', '(0)'], {}), '(label_map, 0)\n', (3605, 3619), False, 'from skimage.measure import find_contours, approximate_polygon\n'), ((3665, 3677), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3673, 3677), True, 'import numpy as np\n'), ((3696, 3708), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3704, 3708), True, 'import numpy as np\n'), ((3735, 3747), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3743, 3747), True, 'import numpy as np\n'), ((11149, 11178), 'numpy.array', 'np.array', (['optimized_line_segs'], {}), '(optimized_line_segs)\n', (11157, 11178), True, 'import numpy as np\n'), ((262, 323), 'numpy.array', 'np.array', (['[[570.34, 0, 319.5], [0, 570.34, 239.5], [0, 0, 1]]'], {}), '([[570.34, 0, 319.5], [0, 570.34, 239.5], [0, 0, 1]])\n', (270, 323), True, 'import numpy as np\n'), ((3008, 3034), 'numpy.argmax', 'np.argmax', (['contour_lengths'], {}), '(contour_lengths)\n', (3017, 3034), True, 'import numpy as np\n'), ((3351, 3406), 'skimage.measure.approximate_polygon', 'approximate_polygon', (['ground_contour', 'polyline_tolerance'], {}), '(ground_contour, polyline_tolerance)\n', (3370, 3406), False, 'from skimage.measure import find_contours, approximate_polygon\n'), ((4146, 4172), 'numpy.argmax', 'np.argmax', (['contour_lengths'], {}), '(contour_lengths)\n', (4155, 4172), True, 'import numpy as np\n'), ((4921, 4942), 'numpy.empty', 'np.empty', (['(0, 1)', 'int'], {}), '((0, 1), int)\n', (4929, 4942), True, 'import numpy as np\n'), ((4991, 5034), 'numpy.arange', 'np.arange', (['close_nonvertical_lines.shape[0]'], {}), '(close_nonvertical_lines.shape[0])\n', (5000, 5034), True, 'import numpy as np\n'), ((5197, 5290), 'numpy.linalg.norm', 'np.linalg.norm', (['(close_nonvertical_lines[:, 2:4] - close_nonvertical_lines[:, 0:2])'], {'axis': '(1)'}), '(close_nonvertical_lines[:, 2:4] - close_nonvertical_lines[:,\n 0:2], axis=1)\n', (5211, 5290), True, 'import numpy as np\n'), ((5303, 5350), 'numpy.argmax', 'np.argmax', (['close_nonvertical_lengths[open_list]'], {}), '(close_nonvertical_lengths[open_list])\n', (5312, 5350), True, 'import numpy as np\n'), ((5465, 5495), 'numpy.delete', 'np.delete', (['open_list', 'best_ind'], {}), '(open_list, best_ind)\n', (5474, 5495), True, 'import numpy as np\n'), ((5519, 5555), 'numpy.append', 'np.append', (['closed_list', 'current_node'], {}), '(closed_list, current_node)\n', (5528, 5555), True, 'import numpy as np\n'), ((5582, 5636), 'numpy.array', 'np.array', (['close_nonvertical_lines[current_node][0:3:2]'], {}), '(close_nonvertical_lines[current_node][0:3:2])\n', (5590, 5636), True, 'import numpy as np\n'), ((5749, 5763), 'intervaltree.IntervalTree', 'IntervalTree', ([], {}), '()\n', (5761, 5763), False, 'from intervaltree import Interval, IntervalTree\n'), ((6129, 6150), 'numpy.empty', 'np.empty', (['(0, 1)', 'int'], {}), '((0, 1), int)\n', (6137, 6150), True, 'import numpy as np\n'), ((6618, 6685), 'numpy.argmax', 'np.argmax', (['close_nonvertical_lengths[open_list[potential_open_ind]]'], {}), '(close_nonvertical_lengths[open_list[potential_open_ind]])\n', (6627, 6685), True, 'import numpy as np\n'), ((6784, 6820), 'numpy.append', 'np.append', (['closed_list', 'current_node'], {}), '(closed_list, current_node)\n', (6793, 6820), True, 'import numpy as np\n'), ((6869, 6928), 'numpy.delete', 'np.delete', (['open_list', 'potential_open_ind[best_potentia_ind]'], {}), '(open_list, potential_open_ind[best_potentia_ind])\n', (6878, 6928), True, 'import numpy as np\n'), ((2904, 2931), 'numpy.linalg.norm', 'np.linalg.norm', (['bg_end_diff'], {}), '(bg_end_diff)\n', (2918, 2931), True, 'import numpy as np\n'), ((4042, 4069), 'numpy.linalg.norm', 'np.linalg.norm', (['bg_end_diff'], {}), '(bg_end_diff)\n', (4056, 4069), True, 'import numpy as np\n'), ((6460, 6492), 'numpy.append', 'np.append', (['potential_open_ind', 'i'], {}), '(potential_open_ind, i)\n', (6469, 6492), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import numpy as np
import pytest
import cntk as C
from cntk.axis import Axis
def _graph_dict():
# This function creates a graph that has no real meaning other than
# providing something to traverse.
d = {}
d['i1'] = C.sequence.input_variable(shape=(2, 3), sequence_axis=Axis('ia'), name='i1')
d['c1'] = C.constant(shape=(2, 3), value=6, name='c1')
d['p1'] = C.parameter(shape=(3, 2), init=7, name='p1')
d['op1'] = C.plus(d['i1'], d['c1'], name='op1')
d['op2'] = C.times(d['op1'], d['p1'], name='op2')
#d['slice'] = slice(d['c1'], Axis.default_dynamic_axis(), 0, 3)
#label_sentence_start = sequence.first(raw_labels)
# no name
d['p2'] = C.parameter(shape=(2, 2))
# duplicate names
d['op3a'] = C.plus(d['op2'], d['p2'], name='op3')
d['op3b'] = C.plus(d['op3a'], d['p2'], name='op3')
d['first'] = C.sequence.first(d['op3b'], name='past')
d['root'] = d['first']
return d
def _simple_dict():
d = {}
d['i1'] = C.input_variable(shape=(2, 3), name='i1')
d['c1'] = C.constant(shape=(2, 3), value=6, name='c1')
d['p1'] = C.parameter(shape=(3, 2), init=7, name='p1')
d['op1'] = C.plus(d['i1'], d['c1'], name='op1')
d['op2'] = C.times(d['op1'], d['p1'], name='op2')
d['root'] = d['op2']
d['target'] = C.input_variable((), name='label')
d['all'] = C.combine([d['root'], C.minus(
d['target'], C.constant(1, name='c2'), name='minus')], name='all')
return d
def test_find_nodes():
d = _graph_dict()
for name in ['i1', 'c1', 'p1', 'op1', 'op2', 'past']:
n = C.logging.graph.find_all_with_name(d['root'], name)
assert len(n) == 1, name
assert n[0].name == name, name
n = d['root'].find_all_with_name(name)
assert len(n) == 1, name
assert n[0].name == name, name
n = C.logging.graph.find_by_name(d['root'], name)
assert n.name == name, name
assert n != None
n = d['root'].find_by_name(name)
assert n.name == name, name
n = C.logging.graph.find_all_with_name(d['root'], 'op3')
assert len(n) == 2, 'op3'
assert n[0].name == 'op3' and n[1].name == 'op3', 'op3'
none = C.logging.graph.find_all_with_name(d['root'], 'none')
assert none == []
assert C.logging.graph.find_by_name(d['root'], 'none') is None
def test_find_nodes_returning_proper_types():
d = _graph_dict()
c1 = C.logging.graph.find_by_name(d['root'], 'c1')
assert isinstance(c1, C.Constant)
assert np.allclose(c1.value, np.zeros((2, 3)) + 6)
p1 = C.logging.graph.find_by_name(d['root'], 'p1')
assert isinstance(p1, C.Parameter)
assert np.allclose(p1.value, np.zeros((3, 2)) + 7)
def test_plot():
d = _simple_dict()
m = C.logging.graph.plot(d['all'])
p = "Plus"
t = "Times"
assert len(m) != 0
assert p in m
assert t in m
assert m.find(p) < m.find(t)
@pytest.mark.parametrize("depth", [
(-1), (0), (1), (5)])
def test_depth_first_search(depth):
'''
For graphs without blocks, depth should not make any difference.
'''
d = _simple_dict()
found = C.logging.graph.depth_first_search(d['all'], lambda x: True, depth=depth)
found_names = [v.name for v in found]
assert found_names == ['all', 'op2', 'op1',
'i1', 'c1', 'p1', 'minus', 'label', 'c2']
@pytest.mark.parametrize("depth,prefix_count", [
(0, {
"Input('image'":1,
"blocked_dense:":1,
"Dense(":1,
"MaxPooling(":1,
"Convolution(":1,
"Parameter('W'":3,
"Parameter('b'":3,
}),
(1, {
"Input('image'":1,
"blocked_dense:":1,
"Dense(":2,
"MaxPooling(":1,
"Convolution(":1,
"Parameter('W'":3,
"Parameter('b'":3,
}),
(2, {
"Input('image'":1,
"blocked_dense:":1,
"Dense(":2,
"MaxPooling(":1,
"Convolution(":1,
"Parameter('W'":3,
"Parameter('b'":3,
# in addition to depth=1...
"Plus(":2,
"Times(":2,
}),
(-1, {
"Input('image'":1,
"blocked_dense:":1,
"Dense(":2,
"MaxPooling(":1,
"Convolution(":2,
"Parameter('W'":3,
"Parameter('b'":3,
"Times(":2,
# in addition to depth=2...
"Plus(":3,
"ReLU(":1,
"Pooling(Tensor":1,
}),
])
def test_depth_first_search_blocks(depth, prefix_count):
from cntk.layers import Sequential, Convolution, MaxPooling, Dense
from cntk.default_options import default_options
def Blocked_Dense(dim, activation=None):
dense = Dense(dim, activation=activation)
@C.layers.BlockFunction('blocked_dense', 'blocked_dense')
def func(x):
return dense(x)
return func
with default_options(activation=C.relu):
image_to_vec = Sequential ([
Convolution((5,5), 32, pad=True),
MaxPooling((3,3), strides=(2,2)),
Dense(10, activation=None),
Blocked_Dense(10)
]
)
in1 = C.input_variable(shape=(3, 256, 256), name='image')
img = image_to_vec(in1)
found = C.logging.graph.depth_first_search(img, lambda x: True, depth=depth)
found_str = [str(v) for v in found]
assert len(found) == sum(prefix_count.values())
for prefix, count in prefix_count.items():
assert sum(f.startswith(prefix) for f in found_str) == count
| [
"cntk.default_options.default_options",
"cntk.axis.Axis",
"cntk.layers.MaxPooling",
"cntk.constant",
"cntk.layers.Convolution",
"cntk.times",
"numpy.zeros",
"cntk.layers.BlockFunction",
"cntk.plus",
"cntk.input_variable",
"cntk.parameter",
"cntk.logging.graph.find_by_name",
"cntk.sequence.fi... | [((3159, 3206), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""depth"""', '[-1, 0, 1, 5]'], {}), "('depth', [-1, 0, 1, 5])\n", (3182, 3206), False, 'import pytest\n'), ((3613, 4332), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""depth,prefix_count"""', '[(0, {"Input(\'image\'": 1, \'blocked_dense:\': 1, \'Dense(\': 1, \'MaxPooling(\': \n 1, \'Convolution(\': 1, "Parameter(\'W\'": 3, "Parameter(\'b\'": 3}), (1, {\n "Input(\'image\'": 1, \'blocked_dense:\': 1, \'Dense(\': 2, \'MaxPooling(\': 1,\n \'Convolution(\': 1, "Parameter(\'W\'": 3, "Parameter(\'b\'": 3}), (2, {\n "Input(\'image\'": 1, \'blocked_dense:\': 1, \'Dense(\': 2, \'MaxPooling(\': 1,\n \'Convolution(\': 1, "Parameter(\'W\'": 3, "Parameter(\'b\'": 3, \'Plus(\': 2,\n \'Times(\': 2}), (-1, {"Input(\'image\'": 1, \'blocked_dense:\': 1, \'Dense(\':\n 2, \'MaxPooling(\': 1, \'Convolution(\': 2, "Parameter(\'W\'": 3,\n "Parameter(\'b\'": 3, \'Times(\': 2, \'Plus(\': 3, \'ReLU(\': 1,\n \'Pooling(Tensor\': 1})]'], {}), '(\'depth,prefix_count\', [(0, {"Input(\'image\'": 1,\n \'blocked_dense:\': 1, \'Dense(\': 1, \'MaxPooling(\': 1, \'Convolution(\': 1,\n "Parameter(\'W\'": 3, "Parameter(\'b\'": 3}), (1, {"Input(\'image\'": 1,\n \'blocked_dense:\': 1, \'Dense(\': 2, \'MaxPooling(\': 1, \'Convolution(\': 1,\n "Parameter(\'W\'": 3, "Parameter(\'b\'": 3}), (2, {"Input(\'image\'": 1,\n \'blocked_dense:\': 1, \'Dense(\': 2, \'MaxPooling(\': 1, \'Convolution(\': 1,\n "Parameter(\'W\'": 3, "Parameter(\'b\'": 3, \'Plus(\': 2, \'Times(\': 2}), (-1,\n {"Input(\'image\'": 1, \'blocked_dense:\': 1, \'Dense(\': 2, \'MaxPooling(\': 1,\n \'Convolution(\': 2, "Parameter(\'W\'": 3, "Parameter(\'b\'": 3, \'Times(\': 2,\n \'Plus(\': 3, \'ReLU(\': 1, \'Pooling(Tensor\': 1})])\n', (3636, 4332), False, 'import pytest\n'), ((562, 606), 'cntk.constant', 'C.constant', ([], {'shape': '(2, 3)', 'value': '(6)', 'name': '"""c1"""'}), "(shape=(2, 3), value=6, name='c1')\n", (572, 606), True, 'import cntk as C\n'), ((621, 665), 'cntk.parameter', 'C.parameter', ([], {'shape': '(3, 2)', 'init': '(7)', 'name': '"""p1"""'}), "(shape=(3, 2), init=7, name='p1')\n", (632, 665), True, 'import cntk as C\n'), ((682, 718), 'cntk.plus', 'C.plus', (["d['i1']", "d['c1']"], {'name': '"""op1"""'}), "(d['i1'], d['c1'], name='op1')\n", (688, 718), True, 'import cntk as C\n'), ((734, 772), 'cntk.times', 'C.times', (["d['op1']", "d['p1']"], {'name': '"""op2"""'}), "(d['op1'], d['p1'], name='op2')\n", (741, 772), True, 'import cntk as C\n'), ((926, 951), 'cntk.parameter', 'C.parameter', ([], {'shape': '(2, 2)'}), '(shape=(2, 2))\n', (937, 951), True, 'import cntk as C\n'), ((991, 1028), 'cntk.plus', 'C.plus', (["d['op2']", "d['p2']"], {'name': '"""op3"""'}), "(d['op2'], d['p2'], name='op3')\n", (997, 1028), True, 'import cntk as C\n'), ((1045, 1083), 'cntk.plus', 'C.plus', (["d['op3a']", "d['p2']"], {'name': '"""op3"""'}), "(d['op3a'], d['p2'], name='op3')\n", (1051, 1083), True, 'import cntk as C\n'), ((1102, 1142), 'cntk.sequence.first', 'C.sequence.first', (["d['op3b']"], {'name': '"""past"""'}), "(d['op3b'], name='past')\n", (1118, 1142), True, 'import cntk as C\n'), ((1233, 1274), 'cntk.input_variable', 'C.input_variable', ([], {'shape': '(2, 3)', 'name': '"""i1"""'}), "(shape=(2, 3), name='i1')\n", (1249, 1274), True, 'import cntk as C\n'), ((1289, 1333), 'cntk.constant', 'C.constant', ([], {'shape': '(2, 3)', 'value': '(6)', 'name': '"""c1"""'}), "(shape=(2, 3), value=6, name='c1')\n", (1299, 1333), True, 'import cntk as C\n'), ((1348, 1392), 'cntk.parameter', 'C.parameter', ([], {'shape': '(3, 2)', 'init': '(7)', 'name': '"""p1"""'}), "(shape=(3, 2), init=7, name='p1')\n", (1359, 1392), True, 'import cntk as C\n'), ((1408, 1444), 'cntk.plus', 'C.plus', (["d['i1']", "d['c1']"], {'name': '"""op1"""'}), "(d['i1'], d['c1'], name='op1')\n", (1414, 1444), True, 'import cntk as C\n'), ((1460, 1498), 'cntk.times', 'C.times', (["d['op1']", "d['p1']"], {'name': '"""op2"""'}), "(d['op1'], d['p1'], name='op2')\n", (1467, 1498), True, 'import cntk as C\n'), ((1543, 1577), 'cntk.input_variable', 'C.input_variable', (['()'], {'name': '"""label"""'}), "((), name='label')\n", (1559, 1577), True, 'import cntk as C\n'), ((2282, 2334), 'cntk.logging.graph.find_all_with_name', 'C.logging.graph.find_all_with_name', (["d['root']", '"""op3"""'], {}), "(d['root'], 'op3')\n", (2316, 2334), True, 'import cntk as C\n'), ((2437, 2490), 'cntk.logging.graph.find_all_with_name', 'C.logging.graph.find_all_with_name', (["d['root']", '"""none"""'], {}), "(d['root'], 'none')\n", (2471, 2490), True, 'import cntk as C\n'), ((2661, 2706), 'cntk.logging.graph.find_by_name', 'C.logging.graph.find_by_name', (["d['root']", '"""c1"""'], {}), "(d['root'], 'c1')\n", (2689, 2706), True, 'import cntk as C\n'), ((2810, 2855), 'cntk.logging.graph.find_by_name', 'C.logging.graph.find_by_name', (["d['root']", '"""p1"""'], {}), "(d['root'], 'p1')\n", (2838, 2855), True, 'import cntk as C\n'), ((3001, 3031), 'cntk.logging.graph.plot', 'C.logging.graph.plot', (["d['all']"], {}), "(d['all'])\n", (3021, 3031), True, 'import cntk as C\n'), ((3377, 3450), 'cntk.logging.graph.depth_first_search', 'C.logging.graph.depth_first_search', (["d['all']", '(lambda x: True)'], {'depth': 'depth'}), "(d['all'], lambda x: True, depth=depth)\n", (3411, 3450), True, 'import cntk as C\n'), ((5534, 5585), 'cntk.input_variable', 'C.input_variable', ([], {'shape': '(3, 256, 256)', 'name': '"""image"""'}), "(shape=(3, 256, 256), name='image')\n", (5550, 5585), True, 'import cntk as C\n'), ((5627, 5695), 'cntk.logging.graph.depth_first_search', 'C.logging.graph.depth_first_search', (['img', '(lambda x: True)'], {'depth': 'depth'}), '(img, lambda x: True, depth=depth)\n', (5661, 5695), True, 'import cntk as C\n'), ((1831, 1882), 'cntk.logging.graph.find_all_with_name', 'C.logging.graph.find_all_with_name', (["d['root']", 'name'], {}), "(d['root'], name)\n", (1865, 1882), True, 'import cntk as C\n'), ((2088, 2133), 'cntk.logging.graph.find_by_name', 'C.logging.graph.find_by_name', (["d['root']", 'name'], {}), "(d['root'], name)\n", (2116, 2133), True, 'import cntk as C\n'), ((2525, 2572), 'cntk.logging.graph.find_by_name', 'C.logging.graph.find_by_name', (["d['root']", '"""none"""'], {}), "(d['root'], 'none')\n", (2553, 2572), True, 'import cntk as C\n'), ((5085, 5118), 'cntk.layers.Dense', 'Dense', (['dim'], {'activation': 'activation'}), '(dim, activation=activation)\n', (5090, 5118), False, 'from cntk.layers import Sequential, Convolution, MaxPooling, Dense\n'), ((5128, 5184), 'cntk.layers.BlockFunction', 'C.layers.BlockFunction', (['"""blocked_dense"""', '"""blocked_dense"""'], {}), "('blocked_dense', 'blocked_dense')\n", (5150, 5184), True, 'import cntk as C\n'), ((5264, 5298), 'cntk.default_options.default_options', 'default_options', ([], {'activation': 'C.relu'}), '(activation=C.relu)\n', (5279, 5298), False, 'from cntk.default_options import default_options\n'), ((525, 535), 'cntk.axis.Axis', 'Axis', (['"""ia"""'], {}), "('ia')\n", (529, 535), False, 'from cntk.axis import Axis\n'), ((2778, 2794), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (2786, 2794), True, 'import numpy as np\n'), ((2928, 2944), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {}), '((3, 2))\n', (2936, 2944), True, 'import numpy as np\n'), ((1645, 1669), 'cntk.constant', 'C.constant', (['(1)'], {'name': '"""c2"""'}), "(1, name='c2')\n", (1655, 1669), True, 'import cntk as C\n'), ((5349, 5382), 'cntk.layers.Convolution', 'Convolution', (['(5, 5)', '(32)'], {'pad': '(True)'}), '((5, 5), 32, pad=True)\n', (5360, 5382), False, 'from cntk.layers import Sequential, Convolution, MaxPooling, Dense\n'), ((5395, 5429), 'cntk.layers.MaxPooling', 'MaxPooling', (['(3, 3)'], {'strides': '(2, 2)'}), '((3, 3), strides=(2, 2))\n', (5405, 5429), False, 'from cntk.layers import Sequential, Convolution, MaxPooling, Dense\n'), ((5441, 5467), 'cntk.layers.Dense', 'Dense', (['(10)'], {'activation': 'None'}), '(10, activation=None)\n', (5446, 5467), False, 'from cntk.layers import Sequential, Convolution, MaxPooling, Dense\n')] |
"""
Streamlit WhatsApp Chat Analyzer
"""
import re
import os
import time
import warnings
import logging
import logging.config
import yaml
from typing import Dict, Any
import streamlit as st
from numpy import sum as npsum
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS
from processor.transformers.chat_eda import WhatsAppProcess, sorted_authors_df,\
statistics, process_data, WhatsAppConfig
from processor.graphs.charts import pie_display_emojis, time_series_plot,\
message_cluster, most_active_member, most_active_day,\
max_words_used, top_media_contributor, who_shared_links,\
sentiment_analysis, most_suitable_day, most_suitable_hour
from processor.common.configure import BANNER, TITLE, REPO_URL, FORMAT_BUTTON,\
HIDE_STREAMLIT_STYLE, MAIN_STYLE, APPLICATION_FEATURE
st.set_option('deprecation.showPyplotGlobalUse', False)
warnings.filterwarnings(
"ignore", message="Glyph 128584 missing from current font.")
# Initial page config
st.set_page_config(
page_title=TITLE,
page_icon="",
# layout="wide",
initial_sidebar_state="expanded",
)
# Application NAV BAR
_, n2, n3 = st.columns([4, 2, 1])
nav_area = st.empty()
nav_area.write("")
st.title(TITLE)
st.subheader("**♟ General Statistics ♟**")
st.write('''* This app is meant as for educational and demonstration purpose only.
Try it out by `Uploading WITHOUT MEDIA whatsapp chat export` here.''')
st.sidebar.title("WhatsApp Chat Analyzer")
st.sidebar.markdown('''Analyze the chats. Unravel the mysteries behind words''')
st.sidebar.markdown(APPLICATION_FEATURE)
st.sidebar.markdown("[Made by <NAME>](https://www.linkedin.com/in/mainak-chaudhuri-127898176/)")
def add_multilingual_stopwords() -> Dict:
"""
Function read language file stop words and covert
them into List of STOPWORDS.
Top languages added under stopwords folder.
attributes
----------
None
Returns
-------
set: Distinct list of words
"""
multilingul_list = []
for file in os.listdir('configs/stopwords'):
stopword = open('configs/stopwords/' + file, "r")
for word in stopword:
word = re.sub('[\n]', '', word)
multilingul_list.append(word)
return set(STOPWORDS).union(set(multilingul_list))
def generate_word_cloud(text: str, title: str) -> Any:
"""
Function takes text as input and transform it to
WordCloud display
attributes
----------
text (str): String of words
title (str): title Sting
Return
------
Matplotlib figure for wordcloud
"""
# wordcloud = WordCloud(
# stopwords=stopwords, background_color="white").generate(text)
wordcloud = WordCloud(
scale=3,
width=500,
height=330,
max_words=200,
colormap='tab20c',
stopwords=add_multilingual_stopwords(),
collocations=True,
contour_color='#5d0f24',
contour_width=3,
font_path='Laila-Regular.ttf',
background_color="white").generate(text)
# Display the generated image:
# the matplotlib way:
plt.figure(figsize=(10, 8))
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.title(title)
st.pyplot()
def next_page():
"""Pagination page Increment"""
st.session_state.page += 1
def prev_page():
"""Pagination page Decrement"""
st.session_state.page -= 1
def pagination_of_dataframe(raw_df):
"""
Display DataFrame in Pagination format
"""
if "page" not in st.session_state:
st.session_state.page = 0
col1, _, _, col2, _, col3 = st.columns(6)
if st.session_state.page < 10:
col3.button("Next", on_click=next_page)
else:
col3.write("") # this makes the empty column show up on mobile
if st.session_state.page > 0:
col1.button("Previous", on_click=prev_page)
else:
col1.write("") # this makes the empty column show up on mobile
col2.write(f"Page {1+st.session_state.page} of {5}")
start = 10 * st.session_state.page
end = start + 10
st.write("")
st.dataframe(raw_df[["datetime", "name", "message"]].iloc[start:end])
st.markdown("#")
def display_statistics(stats):
"""
Display Chat statistics with metric format
"""
col1, col2, col3, col4 = st.columns(4)
col1.metric(
"Total Messages", stats.get('total_messages'), delta="📦 📨")
col2.metric(
"Total Members", stats.get('total_members'), "💃🕺")
col3.metric(
"Total Media", stats.get('media_message'), delta="🎞️ 📷")
col4.metric(
"Link shared", int(stats.get('link_shared')), delta="🖇️ 🔗")
st.text("")
st.text("")
def chart_display(data_frame):
"""
Combine Charts display function
"""
st.markdown("----")
st.header("🔘 Most Active Member")
st.info("🔋 Member comparision based on the number of messages\
he/she posted in group chat")
st.pyplot(most_active_member(data_frame))
st.markdown("----")
st.header("🔘 Most Active Day")
st.info("🔋 Member comparision based on the number of messages\
he/she posted in group chat whatsapp.r.t Day")
st.pyplot(most_active_day(data_frame))
st.markdown("----")
st.header("🔘 Who uses more words in sentences")
st.info("🔋 Member uses more number of sentences during the conversation")
st.pyplot(max_words_used(data_frame))
st.markdown("----")
st.header("🔘 Who shares Links in group most? ")
st.info("🔋 Members who shares internet links of information with others")
st.pyplot(who_shared_links(data_frame))
'''st.markdown("----")
st.header("🔘 Most Active Day ")
st.info("🔋 Member who active for suitable Day")
st.pyplot(most_suitable_day(data_frame))
st.markdown("----")
st.header("🔘 Most Active Hour")
st.info("🔋 Member who active during suitable hours")
st.pyplot(most_suitable_hour(data_frame))'''
st.markdown("----")
st.header("🔘 Member activity Cluster")
st.info("🔋 Cluster hover about the total messages, Emoji's, Links, Words\
and Letter by individual member")
st.write(message_cluster(data_frame))
'''st.markdown("----")
st.header("🔘 Over the Time Analysis ")
st.info("🔋 Group activity over the time whatsapp.r.t to\
number of messages")
st.write(time_series_plot(data_frame))'''
st.markdown("----")
st.header("🔘 Curious about Emoji's ?")
st.info("🔋 The most use Emoji's in converstion is show with\
larger sector")
pie_display = pie_display_emojis(data_frame)
st.plotly_chart(pie_display)
def file_process(data, config):
"""
Regex passed message format frocessing function
"""
# reading source configuration
source_config = WhatsAppConfig(**config['whatsapp'])
whatsapp = WhatsAppProcess(source_config)
message = whatsapp.apply_regex(data)
raw_df = process_data(message)
data_frame = whatsapp.get_dataframe(raw_df)
stats = statistics(raw_df, data_frame)
st.markdown(f'# {stats.get("group_name")}')
st.markdown("----")
# Pagination of dataframe Display
pagination_of_dataframe(raw_df)
# Display Statistics
display_statistics(stats)
# Formation of Word Cloud Dataframe
cloud_df = whatsapp.cloud_data(raw_df)
# Frequently used word and word Cloud display for
# Indidvidual member and statitics
st.header("🔘 Frequently used words")
sorted_authors = sorted_authors_df(cloud_df)
select_author = []
select_author.append(st.selectbox('', sorted_authors))
dummy_df = cloud_df[cloud_df['name'] == select_author[0]]
text = " ".join(review for review in dummy_df.message)
col1, col2, col3, col4, col5 = st.columns(5)
col1.metric(
"Posted Messages",
dummy_df[dummy_df['name'] == select_author[0]].shape[0])
col2.metric(
"Emoji's Shared", sum(
data_frame[data_frame.name.str.contains(
select_author[0][-5:])].emojis.str.len()))
col3.metric("Link Shared", int(
data_frame[data_frame.name == select_author[0]].urlcount.sum()))
col4.metric("Total Words", int(
data_frame[data_frame.name == select_author[0]].word_count.sum()))
user_df = data_frame[data_frame.name.str.contains(
select_author[0][-5:])]
average = round(npsum(user_df.word_count)/user_df.shape[0], 2)
col5.metric("Average words/Message", average)
if len(text) != 0:
generate_word_cloud(
text, "Word Cloud for individual Words")
else:
generate_word_cloud(
"NOWORD", "Word Cloud for individual Words")
st.markdown("----")
st.header("🔘 Words and Phrases frequently used in Chat")
st.info("🔋 Frequently used words or phrases by all members in group chat.\
Most dicussion occurs around below words or used frequently.")
text = " ".join(review for review in cloud_df.message)
generate_word_cloud(
text, "Word Cloud for Chat words")
st.markdown("----")
st.header("🔘 Who has Positive Sentiment? ")
st.info("🔋 Member sentiment analysis score base on the words used in\
messages. Sentiment Score above 0.5 to 1 is consider as Positive.\
Pure English words and Phrases is ideal for calcalation")
st.pyplot(sentiment_analysis(cloud_df))
# DataFrame processing w.r.t Day hour and Date
whatsapp.day_analysis(data_frame)
# Calling Combine chart function
chart_display(data_frame)
st.markdown("----")
st.header("🔘 Top-10 Media Contributor ")
st.info("🔋 Comparision of members who contributes more number of Images,\
Video or Documents")
st.pyplot(top_media_contributor(raw_df))
# Footer Message for Tree Plantation
st.markdown("----")
def main():
"""
Function will process the txt data and process into
Pandas Dataframe items
"""
# Parsing YAML file
config = 'configs/app_configuration.yml'
config = yaml.safe_load(open(config))
# configure logging
log_config = config['logging']
logging.config.dictConfig(log_config)
logger = logging.getLogger(__name__)
logger.info("Welcome to WhatsApp Chat Analyzer")
c1, c2 = st.columns([3, 1])
# Uploaded file processing function
uploaded_file = c1.file_uploader(
"Choose a TXT file only",
type=['txt'],
accept_multiple_files=False)
if uploaded_file is not None:
# Convert txt string to utf-8 Encoding
data = uploaded_file.getvalue().decode("utf-8")
# Compatible iOS and Android regex search
st.markdown("The Genesis chatter of the group:")
file_process(data, config)
if __name__ == "__main__":
print(BANNER)
main()
| [
"matplotlib.pyplot.title",
"processor.graphs.charts.who_shared_links",
"numpy.sum",
"processor.graphs.charts.max_words_used",
"processor.graphs.charts.message_cluster",
"streamlit.title",
"streamlit.sidebar.title",
"matplotlib.pyplot.figure",
"processor.graphs.charts.top_media_contributor",
"strea... | [((820, 875), 'streamlit.set_option', 'st.set_option', (['"""deprecation.showPyplotGlobalUse"""', '(False)'], {}), "('deprecation.showPyplotGlobalUse', False)\n", (833, 875), True, 'import streamlit as st\n'), ((877, 966), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'message': '"""Glyph 128584 missing from current font."""'}), "('ignore', message=\n 'Glyph 128584 missing from current font.')\n", (900, 966), False, 'import warnings\n'), ((991, 1080), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': 'TITLE', 'page_icon': '""""""', 'initial_sidebar_state': '"""expanded"""'}), "(page_title=TITLE, page_icon='', initial_sidebar_state=\n 'expanded')\n", (1009, 1080), True, 'import streamlit as st\n'), ((1148, 1169), 'streamlit.columns', 'st.columns', (['[4, 2, 1]'], {}), '([4, 2, 1])\n', (1158, 1169), True, 'import streamlit as st\n'), ((1181, 1191), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (1189, 1191), True, 'import streamlit as st\n'), ((1215, 1230), 'streamlit.title', 'st.title', (['TITLE'], {}), '(TITLE)\n', (1223, 1230), True, 'import streamlit as st\n'), ((1233, 1275), 'streamlit.subheader', 'st.subheader', (['"""**♟ General Statistics ♟**"""'], {}), "('**♟ General Statistics ♟**')\n", (1245, 1275), True, 'import streamlit as st\n'), ((1276, 1443), 'streamlit.write', 'st.write', (['"""* This app is meant as for educational and demonstration purpose only.\n Try it out by `Uploading WITHOUT MEDIA whatsapp chat export` here."""'], {}), '(\n """* This app is meant as for educational and demonstration purpose only.\n Try it out by `Uploading WITHOUT MEDIA whatsapp chat export` here."""\n )\n', (1284, 1443), True, 'import streamlit as st\n'), ((1435, 1477), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""WhatsApp Chat Analyzer"""'], {}), "('WhatsApp Chat Analyzer')\n", (1451, 1477), True, 'import streamlit as st\n'), ((1478, 1554), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""Analyze the chats. Unravel the mysteries behind words"""'], {}), "('Analyze the chats. Unravel the mysteries behind words')\n", (1497, 1554), True, 'import streamlit as st\n'), ((1559, 1599), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['APPLICATION_FEATURE'], {}), '(APPLICATION_FEATURE)\n', (1578, 1599), True, 'import streamlit as st\n'), ((1602, 1708), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""[Made by <NAME>](https://www.linkedin.com/in/mainak-chaudhuri-127898176/)"""'], {}), "(\n '[Made by <NAME>](https://www.linkedin.com/in/mainak-chaudhuri-127898176/)'\n )\n", (1621, 1708), True, 'import streamlit as st\n'), ((2033, 2064), 'os.listdir', 'os.listdir', (['"""configs/stopwords"""'], {}), "('configs/stopwords')\n", (2043, 2064), False, 'import os\n'), ((3116, 3143), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (3126, 3143), True, 'import matplotlib.pyplot as plt\n'), ((3148, 3195), 'matplotlib.pyplot.imshow', 'plt.imshow', (['wordcloud'], {'interpolation': '"""bilinear"""'}), "(wordcloud, interpolation='bilinear')\n", (3158, 3195), True, 'import matplotlib.pyplot as plt\n'), ((3200, 3215), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3208, 3215), True, 'import matplotlib.pyplot as plt\n'), ((3220, 3236), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (3229, 3236), True, 'import matplotlib.pyplot as plt\n'), ((3241, 3252), 'streamlit.pyplot', 'st.pyplot', ([], {}), '()\n', (3250, 3252), True, 'import streamlit as st\n'), ((3628, 3641), 'streamlit.columns', 'st.columns', (['(6)'], {}), '(6)\n', (3638, 3641), True, 'import streamlit as st\n'), ((4096, 4108), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (4104, 4108), True, 'import streamlit as st\n'), ((4113, 4182), 'streamlit.dataframe', 'st.dataframe', (["raw_df[['datetime', 'name', 'message']].iloc[start:end]"], {}), "(raw_df[['datetime', 'name', 'message']].iloc[start:end])\n", (4125, 4182), True, 'import streamlit as st\n'), ((4187, 4203), 'streamlit.markdown', 'st.markdown', (['"""#"""'], {}), "('#')\n", (4198, 4203), True, 'import streamlit as st\n'), ((4329, 4342), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (4339, 4342), True, 'import streamlit as st\n'), ((4675, 4686), 'streamlit.text', 'st.text', (['""""""'], {}), "('')\n", (4682, 4686), True, 'import streamlit as st\n'), ((4696, 4707), 'streamlit.text', 'st.text', (['""""""'], {}), "('')\n", (4703, 4707), True, 'import streamlit as st\n'), ((4797, 4816), 'streamlit.markdown', 'st.markdown', (['"""----"""'], {}), "('----')\n", (4808, 4816), True, 'import streamlit as st\n'), ((4821, 4854), 'streamlit.header', 'st.header', (['"""🔘 Most Active Member"""'], {}), "('🔘 Most Active Member')\n", (4830, 4854), True, 'import streamlit as st\n'), ((4859, 4967), 'streamlit.info', 'st.info', (['"""🔋 Member comparision based on the number of messages he/she posted in group chat"""'], {}), "(\n '🔋 Member comparision based on the number of messages he/she posted in group chat'\n )\n", (4866, 4967), True, 'import streamlit as st\n'), ((5011, 5030), 'streamlit.markdown', 'st.markdown', (['"""----"""'], {}), "('----')\n", (5022, 5030), True, 'import streamlit as st\n'), ((5035, 5066), 'streamlit.header', 'st.header', (['"""🔘 Most Active Day"""'], {}), "('🔘 Most Active Day')\n", (5044, 5066), True, 'import streamlit as st\n'), ((5071, 5196), 'streamlit.info', 'st.info', (['"""🔋 Member comparision based on the number of messages he/she posted in group chat whatsapp.r.t Day"""'], {}), "(\n '🔋 Member comparision based on the number of messages he/she posted in group chat whatsapp.r.t Day'\n )\n", (5078, 5196), True, 'import streamlit as st\n'), ((5237, 5256), 'streamlit.markdown', 'st.markdown', (['"""----"""'], {}), "('----')\n", (5248, 5256), True, 'import streamlit as st\n'), ((5261, 5308), 'streamlit.header', 'st.header', (['"""🔘 Who uses more words in sentences"""'], {}), "('🔘 Who uses more words in sentences')\n", (5270, 5308), True, 'import streamlit as st\n'), ((5313, 5386), 'streamlit.info', 'st.info', (['"""🔋 Member uses more number of sentences during the conversation"""'], {}), "('🔋 Member uses more number of sentences during the conversation')\n", (5320, 5386), True, 'import streamlit as st\n'), ((5434, 5453), 'streamlit.markdown', 'st.markdown', (['"""----"""'], {}), "('----')\n", (5445, 5453), True, 'import streamlit as st\n'), ((5458, 5505), 'streamlit.header', 'st.header', (['"""🔘 Who shares Links in group most? """'], {}), "('🔘 Who shares Links in group most? ')\n", (5467, 5505), True, 'import streamlit as st\n'), ((5510, 5583), 'streamlit.info', 'st.info', (['"""🔋 Members who shares internet links of information with others"""'], {}), "('🔋 Members who shares internet links of information with others')\n", (5517, 5583), True, 'import streamlit as st\n'), ((5966, 5985), 'streamlit.markdown', 'st.markdown', (['"""----"""'], {}), "('----')\n", (5977, 5985), True, 'import streamlit as st\n'), ((5990, 6028), 'streamlit.header', 'st.header', (['"""🔘 Member activity Cluster"""'], {}), "('🔘 Member activity Cluster')\n", (5999, 6028), True, 'import streamlit as st\n'), ((6033, 6156), 'streamlit.info', 'st.info', (['"""🔋 Cluster hover about the total messages, Emoji\'s, Links, Words and Letter by individual member"""'], {}), '(\n "🔋 Cluster hover about the total messages, Emoji\'s, Links, Words and Letter by individual member"\n )\n', (6040, 6156), True, 'import streamlit as st\n'), ((6403, 6422), 'streamlit.markdown', 'st.markdown', (['"""----"""'], {}), "('----')\n", (6414, 6422), True, 'import streamlit as st\n'), ((6427, 6465), 'streamlit.header', 'st.header', (['"""🔘 Curious about Emoji\'s ?"""'], {}), '("🔘 Curious about Emoji\'s ?")\n', (6436, 6465), True, 'import streamlit as st\n'), ((6470, 6557), 'streamlit.info', 'st.info', (['"""🔋 The most use Emoji\'s in converstion is show with larger sector"""'], {}), '(\n "🔋 The most use Emoji\'s in converstion is show with larger sector")\n', (6477, 6557), True, 'import streamlit as st\n'), ((6573, 6603), 'processor.graphs.charts.pie_display_emojis', 'pie_display_emojis', (['data_frame'], {}), '(data_frame)\n', (6591, 6603), False, 'from processor.graphs.charts import pie_display_emojis, time_series_plot, message_cluster, most_active_member, most_active_day, max_words_used, top_media_contributor, who_shared_links, sentiment_analysis, most_suitable_day, most_suitable_hour\n'), ((6608, 6636), 'streamlit.plotly_chart', 'st.plotly_chart', (['pie_display'], {}), '(pie_display)\n', (6623, 6636), True, 'import streamlit as st\n'), ((6794, 6830), 'processor.transformers.chat_eda.WhatsAppConfig', 'WhatsAppConfig', ([], {}), "(**config['whatsapp'])\n", (6808, 6830), False, 'from processor.transformers.chat_eda import WhatsAppProcess, sorted_authors_df, statistics, process_data, WhatsAppConfig\n'), ((6846, 6876), 'processor.transformers.chat_eda.WhatsAppProcess', 'WhatsAppProcess', (['source_config'], {}), '(source_config)\n', (6861, 6876), False, 'from processor.transformers.chat_eda import WhatsAppProcess, sorted_authors_df, statistics, process_data, WhatsAppConfig\n'), ((6931, 6952), 'processor.transformers.chat_eda.process_data', 'process_data', (['message'], {}), '(message)\n', (6943, 6952), False, 'from processor.transformers.chat_eda import WhatsAppProcess, sorted_authors_df, statistics, process_data, WhatsAppConfig\n'), ((7013, 7043), 'processor.transformers.chat_eda.statistics', 'statistics', (['raw_df', 'data_frame'], {}), '(raw_df, data_frame)\n', (7023, 7043), False, 'from processor.transformers.chat_eda import WhatsAppProcess, sorted_authors_df, statistics, process_data, WhatsAppConfig\n'), ((7098, 7117), 'streamlit.markdown', 'st.markdown', (['"""----"""'], {}), "('----')\n", (7109, 7117), True, 'import streamlit as st\n'), ((7433, 7469), 'streamlit.header', 'st.header', (['"""🔘 Frequently used words"""'], {}), "('🔘 Frequently used words')\n", (7442, 7469), True, 'import streamlit as st\n'), ((7491, 7518), 'processor.transformers.chat_eda.sorted_authors_df', 'sorted_authors_df', (['cloud_df'], {}), '(cloud_df)\n', (7508, 7518), False, 'from processor.transformers.chat_eda import WhatsAppProcess, sorted_authors_df, statistics, process_data, WhatsAppConfig\n'), ((7758, 7771), 'streamlit.columns', 'st.columns', (['(5)'], {}), '(5)\n', (7768, 7771), True, 'import streamlit as st\n'), ((8673, 8692), 'streamlit.markdown', 'st.markdown', (['"""----"""'], {}), "('----')\n", (8684, 8692), True, 'import streamlit as st\n'), ((8697, 8753), 'streamlit.header', 'st.header', (['"""🔘 Words and Phrases frequently used in Chat"""'], {}), "('🔘 Words and Phrases frequently used in Chat')\n", (8706, 8753), True, 'import streamlit as st\n'), ((8758, 8911), 'streamlit.info', 'st.info', (['"""🔋 Frequently used words or phrases by all members in group chat. Most dicussion occurs around below words or used frequently."""'], {}), "(\n '🔋 Frequently used words or phrases by all members in group chat. Most dicussion occurs around below words or used frequently.'\n )\n", (8765, 8911), True, 'import streamlit as st\n'), ((9040, 9059), 'streamlit.markdown', 'st.markdown', (['"""----"""'], {}), "('----')\n", (9051, 9059), True, 'import streamlit as st\n'), ((9064, 9107), 'streamlit.header', 'st.header', (['"""🔘 Who has Positive Sentiment? """'], {}), "('🔘 Who has Positive Sentiment? ')\n", (9073, 9107), True, 'import streamlit as st\n'), ((9112, 9328), 'streamlit.info', 'st.info', (['"""🔋 Member sentiment analysis score base on the words used in messages. Sentiment Score above 0.5 to 1 is consider as Positive. Pure English words and Phrases is ideal for calcalation"""'], {}), "(\n '🔋 Member sentiment analysis score base on the words used in messages. Sentiment Score above 0.5 to 1 is consider as Positive. Pure English words and Phrases is ideal for calcalation'\n )\n", (9119, 9328), True, 'import streamlit as st\n'), ((9540, 9559), 'streamlit.markdown', 'st.markdown', (['"""----"""'], {}), "('----')\n", (9551, 9559), True, 'import streamlit as st\n'), ((9564, 9604), 'streamlit.header', 'st.header', (['"""🔘 Top-10 Media Contributor """'], {}), "('🔘 Top-10 Media Contributor ')\n", (9573, 9604), True, 'import streamlit as st\n'), ((9609, 9719), 'streamlit.info', 'st.info', (['"""🔋 Comparision of members who contributes more number of Images, Video or Documents"""'], {}), "(\n '🔋 Comparision of members who contributes more number of Images, Video or Documents'\n )\n", (9616, 9719), True, 'import streamlit as st\n'), ((9809, 9828), 'streamlit.markdown', 'st.markdown', (['"""----"""'], {}), "('----')\n", (9820, 9828), True, 'import streamlit as st\n'), ((10121, 10158), 'logging.config.dictConfig', 'logging.config.dictConfig', (['log_config'], {}), '(log_config)\n', (10146, 10158), False, 'import logging\n'), ((10172, 10199), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (10189, 10199), False, 'import logging\n'), ((10267, 10285), 'streamlit.columns', 'st.columns', (['[3, 1]'], {}), '([3, 1])\n', (10277, 10285), True, 'import streamlit as st\n'), ((4974, 5004), 'processor.graphs.charts.most_active_member', 'most_active_member', (['data_frame'], {}), '(data_frame)\n', (4992, 5004), False, 'from processor.graphs.charts import pie_display_emojis, time_series_plot, message_cluster, most_active_member, most_active_day, max_words_used, top_media_contributor, who_shared_links, sentiment_analysis, most_suitable_day, most_suitable_hour\n'), ((5203, 5230), 'processor.graphs.charts.most_active_day', 'most_active_day', (['data_frame'], {}), '(data_frame)\n', (5218, 5230), False, 'from processor.graphs.charts import pie_display_emojis, time_series_plot, message_cluster, most_active_member, most_active_day, max_words_used, top_media_contributor, who_shared_links, sentiment_analysis, most_suitable_day, most_suitable_hour\n'), ((5401, 5427), 'processor.graphs.charts.max_words_used', 'max_words_used', (['data_frame'], {}), '(data_frame)\n', (5415, 5427), False, 'from processor.graphs.charts import pie_display_emojis, time_series_plot, message_cluster, most_active_member, most_active_day, max_words_used, top_media_contributor, who_shared_links, sentiment_analysis, most_suitable_day, most_suitable_hour\n'), ((5598, 5626), 'processor.graphs.charts.who_shared_links', 'who_shared_links', (['data_frame'], {}), '(data_frame)\n', (5614, 5626), False, 'from processor.graphs.charts import pie_display_emojis, time_series_plot, message_cluster, most_active_member, most_active_day, max_words_used, top_media_contributor, who_shared_links, sentiment_analysis, most_suitable_day, most_suitable_hour\n'), ((6162, 6189), 'processor.graphs.charts.message_cluster', 'message_cluster', (['data_frame'], {}), '(data_frame)\n', (6177, 6189), False, 'from processor.graphs.charts import pie_display_emojis, time_series_plot, message_cluster, most_active_member, most_active_day, max_words_used, top_media_contributor, who_shared_links, sentiment_analysis, most_suitable_day, most_suitable_hour\n'), ((7567, 7599), 'streamlit.selectbox', 'st.selectbox', (['""""""', 'sorted_authors'], {}), "('', sorted_authors)\n", (7579, 7599), True, 'import streamlit as st\n'), ((9337, 9365), 'processor.graphs.charts.sentiment_analysis', 'sentiment_analysis', (['cloud_df'], {}), '(cloud_df)\n', (9355, 9365), False, 'from processor.graphs.charts import pie_display_emojis, time_series_plot, message_cluster, most_active_member, most_active_day, max_words_used, top_media_contributor, who_shared_links, sentiment_analysis, most_suitable_day, most_suitable_hour\n'), ((9726, 9755), 'processor.graphs.charts.top_media_contributor', 'top_media_contributor', (['raw_df'], {}), '(raw_df)\n', (9747, 9755), False, 'from processor.graphs.charts import pie_display_emojis, time_series_plot, message_cluster, most_active_member, most_active_day, max_words_used, top_media_contributor, who_shared_links, sentiment_analysis, most_suitable_day, most_suitable_hour\n'), ((10662, 10710), 'streamlit.markdown', 'st.markdown', (['"""The Genesis chatter of the group:"""'], {}), "('The Genesis chatter of the group:')\n", (10673, 10710), True, 'import streamlit as st\n'), ((2173, 2197), 're.sub', 're.sub', (['"""[\n]"""', '""""""', 'word'], {}), "('[\\n]', '', word)\n", (2179, 2197), False, 'import re\n'), ((8369, 8394), 'numpy.sum', 'npsum', (['user_df.word_count'], {}), '(user_df.word_count)\n', (8374, 8394), True, 'from numpy import sum as npsum\n')] |
# ----------------------------------------------------------------------------
# - Open3D: www.open3d.org -
# ----------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2018-2021 www.open3d.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# ----------------------------------------------------------------------------
import open3d as o3d
import numpy as np
import sys
import os
pyexample_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
test_data_path = os.path.join(os.path.dirname(pyexample_path), 'test_data')
sys.path.append(pyexample_path)
import open3d_example as o3dex
def xyz_spherical(xyz):
x = xyz[0]
y = xyz[1]
z = xyz[2]
r = np.sqrt(x * x + y * y + z * z)
r_x = np.arccos(y / r)
r_y = np.arctan2(z, x)
return [r, r_x, r_y]
def get_rotation_matrix(r_x, r_y):
rot_x = np.asarray([[1, 0, 0], [0, np.cos(r_x), -np.sin(r_x)],
[0, np.sin(r_x), np.cos(r_x)]])
rot_y = np.asarray([[np.cos(r_y), 0, np.sin(r_y)], [0, 1, 0],
[-np.sin(r_y), 0, np.cos(r_y)]])
return rot_y.dot(rot_x)
def get_extrinsic(xyz):
rvec = xyz_spherical(xyz)
r = get_rotation_matrix(rvec[1], rvec[2])
t = np.asarray([0, 0, 2]).transpose()
trans = np.eye(4)
trans[:3, :3] = r
trans[:3, 3] = t
return trans
def preprocess(model):
min_bound = model.get_min_bound()
max_bound = model.get_max_bound()
center = min_bound + (max_bound - min_bound) / 2.0
scale = np.linalg.norm(max_bound - min_bound) / 2.0
vertices = np.asarray(model.vertices)
vertices -= center
model.vertices = o3d.utility.Vector3dVector(vertices / scale)
return model
def voxel_carving(mesh,
output_filename,
camera_path,
cubic_size,
voxel_resolution,
w=300,
h=300):
mesh.compute_vertex_normals()
camera_sphere = o3d.io.read_triangle_mesh(camera_path)
# Setup dense voxel grid.
voxel_carving = o3d.geometry.VoxelGrid.create_dense(
width=cubic_size,
height=cubic_size,
depth=cubic_size,
voxel_size=cubic_size / voxel_resolution,
origin=[-cubic_size / 2.0, -cubic_size / 2.0, -cubic_size / 2.0],
color=[1.0, 0.7, 0.0])
# Rescale geometry.
camera_sphere = preprocess(camera_sphere)
mesh = preprocess(mesh)
# Setup visualizer to render depthmaps.
vis = o3d.visualization.Visualizer()
vis.create_window(width=w, height=h, visible=False)
vis.add_geometry(mesh)
vis.get_render_option().mesh_show_back_face = True
ctr = vis.get_view_control()
param = ctr.convert_to_pinhole_camera_parameters()
# Carve voxel grid.
centers_pts = np.zeros((len(camera_sphere.vertices), 3))
for cid, xyz in enumerate(camera_sphere.vertices):
# Get new camera pose.
trans = get_extrinsic(xyz)
param.extrinsic = trans
c = np.linalg.inv(trans).dot(np.asarray([0, 0, 0, 1]).transpose())
centers_pts[cid, :] = c[:3]
ctr.convert_from_pinhole_camera_parameters(param)
# Capture depth image and make a point cloud.
vis.poll_events()
vis.update_renderer()
depth = vis.capture_depth_float_buffer(False)
# Depth map carving method.
voxel_carving.carve_depth_map(o3d.geometry.Image(depth), param)
print("Carve view %03d/%03d" % (cid + 1, len(camera_sphere.vertices)))
vis.destroy_window()
return voxel_carving
if __name__ == "__main__":
mesh = o3dex.get_armadillo_mesh()
output_filename = os.path.join(test_data_path, 'voxelized.ply')
camera_path = os.path.join(test_data_path, 'sphere.ply')
cubic_size = 2.0
voxel_resolution = 128.0
carved_voxels = voxel_carving(mesh, output_filename, camera_path,
cubic_size, voxel_resolution)
print("Carved voxels ...")
print(carved_voxels)
o3d.visualization.draw([carved_voxels])
| [
"numpy.arctan2",
"open3d.visualization.draw",
"numpy.sin",
"numpy.linalg.norm",
"os.path.join",
"open3d.geometry.VoxelGrid.create_dense",
"sys.path.append",
"os.path.abspath",
"open3d.geometry.Image",
"os.path.dirname",
"open3d_example.get_armadillo_mesh",
"numpy.arccos",
"numpy.asarray",
... | [((1656, 1687), 'sys.path.append', 'sys.path.append', (['pyexample_path'], {}), '(pyexample_path)\n', (1671, 1687), False, 'import sys\n'), ((1610, 1641), 'os.path.dirname', 'os.path.dirname', (['pyexample_path'], {}), '(pyexample_path)\n', (1625, 1641), False, 'import os\n'), ((1799, 1829), 'numpy.sqrt', 'np.sqrt', (['(x * x + y * y + z * z)'], {}), '(x * x + y * y + z * z)\n', (1806, 1829), True, 'import numpy as np\n'), ((1840, 1856), 'numpy.arccos', 'np.arccos', (['(y / r)'], {}), '(y / r)\n', (1849, 1856), True, 'import numpy as np\n'), ((1867, 1883), 'numpy.arctan2', 'np.arctan2', (['z', 'x'], {}), '(z, x)\n', (1877, 1883), True, 'import numpy as np\n'), ((2376, 2385), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2382, 2385), True, 'import numpy as np\n'), ((2673, 2699), 'numpy.asarray', 'np.asarray', (['model.vertices'], {}), '(model.vertices)\n', (2683, 2699), True, 'import numpy as np\n'), ((2744, 2788), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['(vertices / scale)'], {}), '(vertices / scale)\n', (2770, 2788), True, 'import open3d as o3d\n'), ((3069, 3107), 'open3d.io.read_triangle_mesh', 'o3d.io.read_triangle_mesh', (['camera_path'], {}), '(camera_path)\n', (3094, 3107), True, 'import open3d as o3d\n'), ((3159, 3394), 'open3d.geometry.VoxelGrid.create_dense', 'o3d.geometry.VoxelGrid.create_dense', ([], {'width': 'cubic_size', 'height': 'cubic_size', 'depth': 'cubic_size', 'voxel_size': '(cubic_size / voxel_resolution)', 'origin': '[-cubic_size / 2.0, -cubic_size / 2.0, -cubic_size / 2.0]', 'color': '[1.0, 0.7, 0.0]'}), '(width=cubic_size, height=cubic_size,\n depth=cubic_size, voxel_size=cubic_size / voxel_resolution, origin=[-\n cubic_size / 2.0, -cubic_size / 2.0, -cubic_size / 2.0], color=[1.0, \n 0.7, 0.0])\n', (3194, 3394), True, 'import open3d as o3d\n'), ((3584, 3614), 'open3d.visualization.Visualizer', 'o3d.visualization.Visualizer', ([], {}), '()\n', (3612, 3614), True, 'import open3d as o3d\n'), ((4694, 4720), 'open3d_example.get_armadillo_mesh', 'o3dex.get_armadillo_mesh', ([], {}), '()\n', (4718, 4720), True, 'import open3d_example as o3dex\n'), ((4744, 4789), 'os.path.join', 'os.path.join', (['test_data_path', '"""voxelized.ply"""'], {}), "(test_data_path, 'voxelized.ply')\n", (4756, 4789), False, 'import os\n'), ((4808, 4850), 'os.path.join', 'os.path.join', (['test_data_path', '"""sphere.ply"""'], {}), "(test_data_path, 'sphere.ply')\n", (4820, 4850), False, 'import os\n'), ((5096, 5135), 'open3d.visualization.draw', 'o3d.visualization.draw', (['[carved_voxels]'], {}), '([carved_voxels])\n', (5118, 5135), True, 'import open3d as o3d\n'), ((1552, 1577), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1567, 1577), False, 'import os\n'), ((2614, 2651), 'numpy.linalg.norm', 'np.linalg.norm', (['(max_bound - min_bound)'], {}), '(max_bound - min_bound)\n', (2628, 2651), True, 'import numpy as np\n'), ((2330, 2351), 'numpy.asarray', 'np.asarray', (['[0, 0, 2]'], {}), '([0, 0, 2])\n', (2340, 2351), True, 'import numpy as np\n'), ((4489, 4514), 'open3d.geometry.Image', 'o3d.geometry.Image', (['depth'], {}), '(depth)\n', (4507, 4514), True, 'import open3d as o3d\n'), ((1985, 1996), 'numpy.cos', 'np.cos', (['r_x'], {}), '(r_x)\n', (1991, 1996), True, 'import numpy as np\n'), ((2041, 2052), 'numpy.sin', 'np.sin', (['r_x'], {}), '(r_x)\n', (2047, 2052), True, 'import numpy as np\n'), ((2054, 2065), 'numpy.cos', 'np.cos', (['r_x'], {}), '(r_x)\n', (2060, 2065), True, 'import numpy as np\n'), ((2094, 2105), 'numpy.cos', 'np.cos', (['r_y'], {}), '(r_y)\n', (2100, 2105), True, 'import numpy as np\n'), ((2110, 2121), 'numpy.sin', 'np.sin', (['r_y'], {}), '(r_y)\n', (2116, 2121), True, 'import numpy as np\n'), ((2177, 2188), 'numpy.cos', 'np.cos', (['r_y'], {}), '(r_y)\n', (2183, 2188), True, 'import numpy as np\n'), ((4092, 4112), 'numpy.linalg.inv', 'np.linalg.inv', (['trans'], {}), '(trans)\n', (4105, 4112), True, 'import numpy as np\n'), ((1999, 2010), 'numpy.sin', 'np.sin', (['r_x'], {}), '(r_x)\n', (2005, 2010), True, 'import numpy as np\n'), ((2161, 2172), 'numpy.sin', 'np.sin', (['r_y'], {}), '(r_y)\n', (2167, 2172), True, 'import numpy as np\n'), ((4117, 4141), 'numpy.asarray', 'np.asarray', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (4127, 4141), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import os
# sys.path.append('/foss_fim/src')
import geopandas as gpd
from utils.shared_variables import PREP_PROJECTION
from utils.shared_functions import getDriver
from reduce_nhd_stream_density import subset_nhd_network
from adjust_headwater_streams import adjust_headwaters
from shapely.geometry import Point
from concurrent.futures import ProcessPoolExecutor
from collections import deque
import numpy as np
from shapely.wkb import dumps, loads
import pygeos
nhdplus_vectors_dir = os.environ.get('nhdplus_vectors_dir')
wbd_filename = os.environ.get('wbd_filename')
nwm_streams_orig_filename = os.environ.get('nwm_streams_orig_filename')
nwm_streams_all_filename = os.environ.get('nwm_streams_all_filename')
nwm_headwaters_filename = os.environ.get('nwm_headwaters_filename')
nwm_catchments_orig_filename = os.environ.get('nwm_catchments_orig_filename')
nwm_catchments_all_filename = os.environ.get('nwm_catchments_all_filename')
ahps_filename = os.environ.get('ahps_filename')
nwm_huc4_intersections_filename = os.environ.get('nwm_huc4_intersections_filename')
nhd_huc8_intersections_filename = os.environ.get('nhd_huc8_intersections_filename')
agg_nhd_headwaters_adj_fileName = os.environ['agg_nhd_headwaters_adj_fileName']
agg_nhd_streams_adj_fileName = os.environ['agg_nhd_streams_adj_fileName']
def identify_nwm_ms_streams(nwm_streams_filename,ahps_filename,nwm_streams_all_filename):
# Subset nwm network to ms
ahps_headwaters = gpd.read_file(ahps_filename)
nwm_streams = gpd.read_file(nwm_streams_filename)
# Remove mainstem column if it already exists
nwm_streams = nwm_streams.drop(['mainstem'], axis=1, errors='ignore')
nwm_streams['is_headwater'] = False
nwm_streams.loc[nwm_streams.ID.isin(list(ahps_headwaters.nwm_featur)),'is_headwater'] = True
# Subset NHDPlus HR
nwm_streams['is_relevant_stream'] = nwm_streams['is_headwater'].copy()
nwm_streams = nwm_streams.explode()
# Trace down from headwaters
nwm_streams.set_index('ID',inplace=True,drop=False)
Q = deque(nwm_streams.loc[nwm_streams['is_headwater'],'ID'].tolist())
visited = set()
while Q:
q = Q.popleft()
if q in visited:
continue
visited.add(q)
toNode = nwm_streams.loc[q,'to']
if not toNode == 0:
nwm_streams.loc[nwm_streams.ID==toNode,'is_relevant_stream'] = True
if toNode not in visited:
Q.append(toNode)
nwm_streams_ms = nwm_streams.loc[nwm_streams['is_relevant_stream'],:]
ms_segments = nwm_streams_ms.ID.to_list()
nwm_streams.reset_index(drop=True,inplace=True)
# Add column to FR nwm layer to indicate MS segments
nwm_streams['mainstem'] = np.where(nwm_streams.ID.isin(ms_segments), 1, 0)
nwm_streams = nwm_streams.drop(['is_relevant_stream','is_headwater'], axis=1, errors='ignore')
nwm_streams.to_file(nwm_streams_all_filename,driver=getDriver(nwm_streams_all_filename),index=False,layer='nwm_streams')
return ms_segments
def find_nwm_incoming_streams(nwm_streams_,wbd,huc_unit):
# Input wbd
if isinstance(wbd,str):
layer = f"WBDHU{huc_unit}"
wbd = gpd.read_file(wbd, layer=layer)
elif isinstance(wbd,gpd.GeoDataFrame):
pass
else:
raise TypeError("Pass dataframe or filepath for wbd")
intersecting_points = []
nhdplus_ids = []
mainstem_flag = []
print (f"iterating through {len(wbd)} hucs")
for index, row in wbd.iterrows():
col_name = f"HUC{huc_unit}"
huc = row[col_name]
huc_mask = wbd.loc[wbd[col_name]==str(huc)]
huc_mask = huc_mask.explode()
huc_mask = huc_mask.reset_index(drop=True)
# Input nwm streams
if isinstance(nwm_streams_,str):
nwm_streams = gpd.read_file(nwm_streams_, mask=huc_mask)
elif isinstance(nwm_streams_,gpd.GeoDataFrame):
nwm_streams = nwm_streams_.copy()
else:
raise TypeError("Pass dataframe or filepath for nwm streams")
nwm_streams = nwm_streams.explode()
nwm_streams = nwm_streams.reset_index(drop=True)
for index, polygon in enumerate(huc_mask.geometry):
crosses=nwm_streams.crosses(polygon.exterior)
nwm_streams_subset =nwm_streams[crosses]
nwm_streams_subset = nwm_streams_subset.reset_index(drop=True)
for index, segment in nwm_streams_subset.iterrows():
distances = []
try:
nhdplus_id = segment.ID
except:
nhdplus_id = segment.NHDPlusID
linestring = segment.geometry
mainstem = segment.mainstem
# Distance to each stream segment
for point in zip(*linestring.coords.xy):
distance = Point(point).distance(polygon.exterior)
distances = distances + [distance]
# Find minimum distance
min_index = np.argmin(distances)
# Closest segment to headwater
closest_point = list(linestring.coords)[min_index]
last_node = Point(closest_point)
# Convert geometries to WKB representation
wkb_point = dumps(last_node)
wkb_poly = dumps(polygon.exterior)
# Create pygeos geometries from WKB representation
stream_point_geom = pygeos.io.from_wkb(wkb_point)
polybin_geom = pygeos.io.from_wkb(wkb_poly)
# Linear reference end node to huc boundary
pointdistancetoline = pygeos.linear.line_locate_point(polybin_geom,stream_point_geom)
referencedpoint = pygeos.linear.line_interpolate_point(polybin_geom, pointdistancetoline)
# Convert geometries to wkb representation
bin_referencedpoint = pygeos.io.to_wkb(referencedpoint)
# Convert to shapely geometries
shply_referencedpoint = loads(bin_referencedpoint)
# Collect all nhd stream segment linestring verticies
intersecting_points = intersecting_points + [shply_referencedpoint]
nhdplus_ids = nhdplus_ids + [nhdplus_id]
mainstem_flag = mainstem_flag + [mainstem]
del huc_mask
huc_intersection = gpd.GeoDataFrame({'geometry': intersecting_points, 'NHDPlusID': nhdplus_ids,'mainstem': mainstem_flag},crs=nwm_streams.crs,geometry='geometry')
huc_intersection = huc_intersection.drop_duplicates()
del nwm_streams,wbd
return huc_intersection
def collect_stream_attributes(nhdplus_vectors_dir, huc):
print (f"Starting attribute collection for HUC {huc}",flush=True)
# Collecting NHDPlus HR attributes
burnline_filename = os.path.join(nhdplus_vectors_dir,huc,'NHDPlusBurnLineEvent' + str(huc) + '.gpkg')
vaa_filename = os.path.join(nhdplus_vectors_dir,huc,'NHDPlusFlowLineVAA' + str(huc) + '.gpkg')
flowline_filename = os.path.join(nhdplus_vectors_dir,huc,'NHDFlowline' + str(huc) + '.gpkg')
if os.path.exists(os.path.join(nhdplus_vectors_dir,huc,'NHDPlusBurnLineEvent' + str(huc) + '.gpkg')):
burnline = gpd.read_file(burnline_filename)
burnline = burnline[['NHDPlusID','ReachCode','geometry']]
flowline = gpd.read_file(flowline_filename)
flowline = flowline[['NHDPlusID','FType','FCode']]
# flowline = flowline.loc[flowline["FType"].isin([334,420,428,460,558])]
flowline = flowline.loc[~flowline["FType"].isin([566,420])]
nhd_streams_vaa = gpd.read_file(vaa_filename)
nhd_streams_vaa = nhd_streams_vaa[['FromNode','ToNode','NHDPlusID','StreamOrde','DnLevelPat','LevelPathI']]
nhd_streams = burnline.merge(nhd_streams_vaa,on='NHDPlusID',how='inner')
nhd_streams = nhd_streams.merge(flowline,on='NHDPlusID',how='inner')
del burnline, flowline, nhd_streams_vaa
nhd_streams = nhd_streams.to_crs(PREP_PROJECTION)
nhd_streams = nhd_streams.loc[nhd_streams.geometry!=None,:] # special case: remove segments without geometries
nhd_streams['HUC4'] = str(huc)
# special case; breach in network at Tiber Dam
if huc == '1003' and nhd_streams.loc[nhd_streams.NHDPlusID==23001300078682.0,'DnLevelPat'] == 23001300001574.0:
nhd_streams = nhd_streams.loc[nhd_streams.NHDPlusID!=23001300009084.0]
nhd_streams.loc[nhd_streams.NHDPlusID==23001300078682.0,'DnLevelPat'] = 23001300001566.0
# Write out NHDPlus HR aggregated
nhd_streams_agg_fileName = os.path.join(nhdplus_vectors_dir,huc,'NHDPlusBurnLineEvent' + str(huc) + '_agg.gpkg')
nhd_streams.to_file(nhd_streams_agg_fileName,driver=getDriver(nhd_streams_agg_fileName),index=False)
del nhd_streams
print (f"finished attribute collection for HUC {huc}",flush=True)
else:
print (f"missing data for HUC {huc}",flush=True)
def subset_stream_networks(args, huc):
nwm_headwaters_filename = args[0]
ahps_filename = args[1]
wbd4 = args[2]
wbd8 = args[3]
nhdplus_vectors_dir = args[4]
nwm_huc4_intersections_filename = args[5]
print(f"starting stream subset for HUC {huc}",flush=True)
nwm_headwater_id = 'ID'
ahps_headwater_id = 'nws_lid'
headwater_pts_id = 'site_id'
column_order = ['pt_type', headwater_pts_id, 'mainstem', 'geometry']
nhd_streams_filename = os.path.join(nhdplus_vectors_dir,huc,'NHDPlusBurnLineEvent' + str(huc) + '_agg.gpkg')
# Subset to reduce footprint
selected_wbd4 = wbd4.loc[wbd4.HUC4.str.startswith(huc)]
del wbd4
selected_wbd8 = wbd8.loc[wbd8.HUC8.str.startswith(huc)]
del wbd8
huc_mask = selected_wbd4.loc[selected_wbd4.HUC4.str.startswith(huc)]
huc_mask = huc_mask.explode()
huc_mask = huc_mask.reset_index(drop=True)
if len(selected_wbd8.HUC8) > 0:
selected_wbd8 = selected_wbd8.reset_index(drop=True)
# Identify FR/NWM headwaters and subset HR network
try:
nhd_streams_fr = subset_nhd_network(huc,huc_mask,selected_wbd8,nhd_streams_filename,nwm_headwaters_filename,nwm_headwater_id,nwm_huc4_intersections_filename)
except:
print (f"Error subsetting NHD HR network for HUC {huc}",flush=True)
# Identify nhd mainstem streams
try:
nhd_streams_all = subset_nhd_network(huc,huc_mask,selected_wbd8,nhd_streams_fr,ahps_filename,ahps_headwater_id,nwm_huc4_intersections_filename,True)
except:
print (f"Error identifing MS network for HUC {huc}",flush=True)
# Identify HUC8 intersection points
nhd_huc8_intersections = find_nwm_incoming_streams(nhd_streams_all,selected_wbd8,8)
# Load nwm headwaters
nwm_headwaters = gpd.read_file(nwm_headwaters_filename, mask=huc_mask)
nwm_headwaters['pt_type'] = 'nwm_headwater'
nwm_headwaters = nwm_headwaters.rename(columns={"ID": headwater_pts_id})
# Load nws lids
nws_lids = gpd.read_file(ahps_filename, mask=huc_mask)
nws_lids = nws_lids.drop(columns=['name','nwm_feature_id','usgs_site_code','states','HUC8','is_headwater', 'is_colocated'])
nws_lids = nws_lids.rename(columns={"nws_lid": headwater_pts_id})
nws_lids['pt_type'] = 'nws_lid'
nws_lids['mainstem'] = True
if (len(nwm_headwaters) > 0) or (len(nws_lids) > 0):
# Adjust FR/NWM headwater segments
adj_nhd_streams_all, adj_nhd_headwater_points = adjust_headwaters(huc,nhd_streams_all,nwm_headwaters,nws_lids,headwater_pts_id)
adj_nhd_headwater_points = adj_nhd_headwater_points[column_order]
nhd_huc8_intersections['pt_type'] = 'nhd_huc8_intersections'
nhd_huc8_intersections = nhd_huc8_intersections.rename(columns={"NHDPlusID": headwater_pts_id})
nhd_huc8_intersections = nhd_huc8_intersections[column_order]
adj_nhd_headwater_points_all = adj_nhd_headwater_points.append(nhd_huc8_intersections)
adj_nhd_headwater_points_all = adj_nhd_headwater_points_all.reset_index(drop=True)
adj_nhd_streams_all_fileName = os.path.join(nhdplus_vectors_dir,huc,'NHDPlusBurnLineEvent' + str(huc) + '_adj.gpkg')
adj_nhd_headwaters_all_fileName = os.path.join(nhdplus_vectors_dir,huc,'nhd' + str(huc) + '_headwaters_adj.gpkg')
# Write out FR adjusted
adj_nhd_streams_all.to_file(adj_nhd_streams_all_fileName,driver=getDriver(adj_nhd_streams_all_fileName),index=False)
adj_nhd_headwater_points_all.to_file(adj_nhd_headwaters_all_fileName,driver=getDriver(adj_nhd_headwaters_all_fileName),index=False)
del adj_nhd_streams_all, adj_nhd_headwater_points_all
else:
print (f"skipping headwater adjustments for HUC {huc}")
del nhd_streams_fr
print(f"finished stream subset for HUC {huc}",flush=True)
def aggregate_stream_networks(nhdplus_vectors_dir,agg_nhd_headwaters_adj_fileName,agg_nhd_streams_adj_fileName,huc_list):
for huc in huc_list:
# aggregated final filenames
nhd_agg_adj_huc_subset = os.path.join(nhdplus_vectors_dir,huc,'NHDPlusBurnLineEvent' + str(huc) + '_adj.gpkg')
nhd_agg_adj_headwaters_subset = os.path.join(nhdplus_vectors_dir,huc,'nhd' + str(huc) + '_headwaters_adj.gpkg')
if os.path.isfile(nhd_agg_adj_huc_subset):
adj_nhd_streams_all = gpd.read_file(nhd_agg_adj_huc_subset)
# Write out FR adjusted
if os.path.isfile(agg_nhd_streams_adj_fileName):
adj_nhd_streams_all.to_file(agg_nhd_streams_adj_fileName,driver=getDriver(agg_nhd_streams_adj_fileName),index=False, mode='a')
else:
adj_nhd_streams_all.to_file(agg_nhd_streams_adj_fileName,driver=getDriver(agg_nhd_streams_adj_fileName),index=False)
del adj_nhd_streams_all
if os.path.isfile(nhd_agg_adj_headwaters_subset):
adj_nhd_headwater_points_all = gpd.read_file(nhd_agg_adj_headwaters_subset)
# Write out FR adjusted
if os.path.isfile(agg_nhd_headwaters_adj_fileName):
adj_nhd_headwater_points_all.to_file(agg_nhd_headwaters_adj_fileName,driver=getDriver(agg_nhd_headwaters_adj_fileName),index=False, mode='a')
else:
adj_nhd_headwater_points_all.to_file(agg_nhd_headwaters_adj_fileName,driver=getDriver(agg_nhd_headwaters_adj_fileName),index=False)
del adj_nhd_headwater_points_all
def clean_up_intermediate_files(nhdplus_vectors_dir):
for huc in os.listdir(nhdplus_vectors_dir):
agg_path= os.path.join(nhdplus_vectors_dir,huc,'NHDPlusBurnLineEvent' + str(huc) + '_agg.gpkg')
streams_adj_path= os.path.join(nhdplus_vectors_dir,huc,'NHDPlusBurnLineEvent' + str(huc) + '_adj.gpkg')
headwater_adj_path= os.path.join(nhdplus_vectors_dir,huc,'nhd' + str(huc) + '_headwaters_adj.gpkg')
if os.path.exists(agg_path):
os.remove(agg_path)
if os.path.exists(streams_adj_path):
os.remove(streams_adj_path)
if os.path.exists(headwater_adj_path):
os.remove(headwater_adj_path)
if __name__ == '__main__':
# # Generate NWM Headwaters
print ('loading HUC4s')
wbd4 = gpd.read_file(wbd_filename, layer='WBDHU4')
print ('loading HUC8s')
wbd8 = gpd.read_file(wbd_filename, layer='WBDHU8')
subset_arg_list = (nwm_headwaters_filename,ahps_filename,wbd4,wbd8,nhdplus_vectors_dir,nwm_huc4_intersections_filename)
huc_list = os.listdir(nhdplus_vectors_dir)
missing_subsets = []
for huc in os.listdir(nhdplus_vectors_dir):
streams_adj_path= os.path.join(nhdplus_vectors_dir,huc,'NHDPlusBurnLineEvent' + str(huc) + '_adj.gpkg')
if not os.path.isfile(streams_adj_path):
missing_subsets = missing_subsets + [huc]
print (f"Subsetting stream network for {len(missing_subsets)} HUC4s")
num_workers=11
with ProcessPoolExecutor(max_workers=num_workers) as executor:
# Preprocess nhd hr and add attributes
# collect_attributes = [executor.submit(collect_stream_attributes, nhdplus_vectors_dir, str(huc)) for huc in huc_list]
# Subset nhd hr network
subset_results = [executor.submit(subset_stream_networks, subset_arg_list, str(huc)) for huc in missing_subsets]
del wbd4,wbd8
# Aggregate subset nhd networks for entire nwm domain
print ('Aggregating subset NHD networks for entire NWM domain')
aggregate_stream_networks(nhdplus_vectors_dir,agg_nhd_headwaters_adj_fileName,agg_nhd_streams_adj_fileName,missing_subsets)
# Remove intermediate files
# clean_up_intermediate_files(nhdplus_vectors_dir)
| [
"os.remove",
"concurrent.futures.ProcessPoolExecutor",
"numpy.argmin",
"os.path.isfile",
"pygeos.linear.line_locate_point",
"shapely.geometry.Point",
"os.path.exists",
"geopandas.GeoDataFrame",
"shapely.wkb.loads",
"reduce_nhd_stream_density.subset_nhd_network",
"geopandas.read_file",
"pygeos.... | [((510, 547), 'os.environ.get', 'os.environ.get', (['"""nhdplus_vectors_dir"""'], {}), "('nhdplus_vectors_dir')\n", (524, 547), False, 'import os\n'), ((563, 593), 'os.environ.get', 'os.environ.get', (['"""wbd_filename"""'], {}), "('wbd_filename')\n", (577, 593), False, 'import os\n'), ((622, 665), 'os.environ.get', 'os.environ.get', (['"""nwm_streams_orig_filename"""'], {}), "('nwm_streams_orig_filename')\n", (636, 665), False, 'import os\n'), ((693, 735), 'os.environ.get', 'os.environ.get', (['"""nwm_streams_all_filename"""'], {}), "('nwm_streams_all_filename')\n", (707, 735), False, 'import os\n'), ((762, 803), 'os.environ.get', 'os.environ.get', (['"""nwm_headwaters_filename"""'], {}), "('nwm_headwaters_filename')\n", (776, 803), False, 'import os\n'), ((835, 881), 'os.environ.get', 'os.environ.get', (['"""nwm_catchments_orig_filename"""'], {}), "('nwm_catchments_orig_filename')\n", (849, 881), False, 'import os\n'), ((912, 957), 'os.environ.get', 'os.environ.get', (['"""nwm_catchments_all_filename"""'], {}), "('nwm_catchments_all_filename')\n", (926, 957), False, 'import os\n'), ((974, 1005), 'os.environ.get', 'os.environ.get', (['"""ahps_filename"""'], {}), "('ahps_filename')\n", (988, 1005), False, 'import os\n'), ((1040, 1089), 'os.environ.get', 'os.environ.get', (['"""nwm_huc4_intersections_filename"""'], {}), "('nwm_huc4_intersections_filename')\n", (1054, 1089), False, 'import os\n'), ((1124, 1173), 'os.environ.get', 'os.environ.get', (['"""nhd_huc8_intersections_filename"""'], {}), "('nhd_huc8_intersections_filename')\n", (1138, 1173), False, 'import os\n'), ((1474, 1502), 'geopandas.read_file', 'gpd.read_file', (['ahps_filename'], {}), '(ahps_filename)\n', (1487, 1502), True, 'import geopandas as gpd\n'), ((1522, 1557), 'geopandas.read_file', 'gpd.read_file', (['nwm_streams_filename'], {}), '(nwm_streams_filename)\n', (1535, 1557), True, 'import geopandas as gpd\n'), ((6402, 6552), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (["{'geometry': intersecting_points, 'NHDPlusID': nhdplus_ids, 'mainstem':\n mainstem_flag}"], {'crs': 'nwm_streams.crs', 'geometry': '"""geometry"""'}), "({'geometry': intersecting_points, 'NHDPlusID': nhdplus_ids,\n 'mainstem': mainstem_flag}, crs=nwm_streams.crs, geometry='geometry')\n", (6418, 6552), True, 'import geopandas as gpd\n'), ((14779, 14810), 'os.listdir', 'os.listdir', (['nhdplus_vectors_dir'], {}), '(nhdplus_vectors_dir)\n', (14789, 14810), False, 'import os\n'), ((15485, 15528), 'geopandas.read_file', 'gpd.read_file', (['wbd_filename'], {'layer': '"""WBDHU4"""'}), "(wbd_filename, layer='WBDHU4')\n", (15498, 15528), True, 'import geopandas as gpd\n'), ((15568, 15611), 'geopandas.read_file', 'gpd.read_file', (['wbd_filename'], {'layer': '"""WBDHU8"""'}), "(wbd_filename, layer='WBDHU8')\n", (15581, 15611), True, 'import geopandas as gpd\n'), ((15752, 15783), 'os.listdir', 'os.listdir', (['nhdplus_vectors_dir'], {}), '(nhdplus_vectors_dir)\n', (15762, 15783), False, 'import os\n'), ((15825, 15856), 'os.listdir', 'os.listdir', (['nhdplus_vectors_dir'], {}), '(nhdplus_vectors_dir)\n', (15835, 15856), False, 'import os\n'), ((3195, 3226), 'geopandas.read_file', 'gpd.read_file', (['wbd'], {'layer': 'layer'}), '(wbd, layer=layer)\n', (3208, 3226), True, 'import geopandas as gpd\n'), ((7257, 7289), 'geopandas.read_file', 'gpd.read_file', (['burnline_filename'], {}), '(burnline_filename)\n', (7270, 7289), True, 'import geopandas as gpd\n'), ((7375, 7407), 'geopandas.read_file', 'gpd.read_file', (['flowline_filename'], {}), '(flowline_filename)\n', (7388, 7407), True, 'import geopandas as gpd\n'), ((7643, 7670), 'geopandas.read_file', 'gpd.read_file', (['vaa_filename'], {}), '(vaa_filename)\n', (7656, 7670), True, 'import geopandas as gpd\n'), ((10966, 11019), 'geopandas.read_file', 'gpd.read_file', (['nwm_headwaters_filename'], {'mask': 'huc_mask'}), '(nwm_headwaters_filename, mask=huc_mask)\n', (10979, 11019), True, 'import geopandas as gpd\n'), ((11197, 11240), 'geopandas.read_file', 'gpd.read_file', (['ahps_filename'], {'mask': 'huc_mask'}), '(ahps_filename, mask=huc_mask)\n', (11210, 11240), True, 'import geopandas as gpd\n'), ((13548, 13586), 'os.path.isfile', 'os.path.isfile', (['nhd_agg_adj_huc_subset'], {}), '(nhd_agg_adj_huc_subset)\n', (13562, 13586), False, 'import os\n'), ((14101, 14146), 'os.path.isfile', 'os.path.isfile', (['nhd_agg_adj_headwaters_subset'], {}), '(nhd_agg_adj_headwaters_subset)\n', (14115, 14146), False, 'import os\n'), ((15149, 15173), 'os.path.exists', 'os.path.exists', (['agg_path'], {}), '(agg_path)\n', (15163, 15173), False, 'import os\n'), ((15219, 15251), 'os.path.exists', 'os.path.exists', (['streams_adj_path'], {}), '(streams_adj_path)\n', (15233, 15251), False, 'import os\n'), ((15305, 15339), 'os.path.exists', 'os.path.exists', (['headwater_adj_path'], {}), '(headwater_adj_path)\n', (15319, 15339), False, 'import os\n'), ((16178, 16222), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {'max_workers': 'num_workers'}), '(max_workers=num_workers)\n', (16197, 16222), False, 'from concurrent.futures import ProcessPoolExecutor\n'), ((2947, 2982), 'utils.shared_functions.getDriver', 'getDriver', (['nwm_streams_all_filename'], {}), '(nwm_streams_all_filename)\n', (2956, 2982), False, 'from utils.shared_functions import getDriver\n'), ((3818, 3860), 'geopandas.read_file', 'gpd.read_file', (['nwm_streams_'], {'mask': 'huc_mask'}), '(nwm_streams_, mask=huc_mask)\n', (3831, 3860), True, 'import geopandas as gpd\n'), ((10229, 10379), 'reduce_nhd_stream_density.subset_nhd_network', 'subset_nhd_network', (['huc', 'huc_mask', 'selected_wbd8', 'nhd_streams_filename', 'nwm_headwaters_filename', 'nwm_headwater_id', 'nwm_huc4_intersections_filename'], {}), '(huc, huc_mask, selected_wbd8, nhd_streams_filename,\n nwm_headwaters_filename, nwm_headwater_id, nwm_huc4_intersections_filename)\n', (10247, 10379), False, 'from reduce_nhd_stream_density import subset_nhd_network\n'), ((10550, 10691), 'reduce_nhd_stream_density.subset_nhd_network', 'subset_nhd_network', (['huc', 'huc_mask', 'selected_wbd8', 'nhd_streams_fr', 'ahps_filename', 'ahps_headwater_id', 'nwm_huc4_intersections_filename', '(True)'], {}), '(huc, huc_mask, selected_wbd8, nhd_streams_fr,\n ahps_filename, ahps_headwater_id, nwm_huc4_intersections_filename, True)\n', (10568, 10691), False, 'from reduce_nhd_stream_density import subset_nhd_network\n'), ((11693, 11780), 'adjust_headwater_streams.adjust_headwaters', 'adjust_headwaters', (['huc', 'nhd_streams_all', 'nwm_headwaters', 'nws_lids', 'headwater_pts_id'], {}), '(huc, nhd_streams_all, nwm_headwaters, nws_lids,\n headwater_pts_id)\n', (11710, 11780), False, 'from adjust_headwater_streams import adjust_headwaters\n'), ((13622, 13659), 'geopandas.read_file', 'gpd.read_file', (['nhd_agg_adj_huc_subset'], {}), '(nhd_agg_adj_huc_subset)\n', (13635, 13659), True, 'import geopandas as gpd\n'), ((13712, 13756), 'os.path.isfile', 'os.path.isfile', (['agg_nhd_streams_adj_fileName'], {}), '(agg_nhd_streams_adj_fileName)\n', (13726, 13756), False, 'import os\n'), ((14191, 14235), 'geopandas.read_file', 'gpd.read_file', (['nhd_agg_adj_headwaters_subset'], {}), '(nhd_agg_adj_headwaters_subset)\n', (14204, 14235), True, 'import geopandas as gpd\n'), ((14288, 14335), 'os.path.isfile', 'os.path.isfile', (['agg_nhd_headwaters_adj_fileName'], {}), '(agg_nhd_headwaters_adj_fileName)\n', (14302, 14335), False, 'import os\n'), ((15187, 15206), 'os.remove', 'os.remove', (['agg_path'], {}), '(agg_path)\n', (15196, 15206), False, 'import os\n'), ((15265, 15292), 'os.remove', 'os.remove', (['streams_adj_path'], {}), '(streams_adj_path)\n', (15274, 15292), False, 'import os\n'), ((15353, 15382), 'os.remove', 'os.remove', (['headwater_adj_path'], {}), '(headwater_adj_path)\n', (15362, 15382), False, 'import os\n'), ((15985, 16017), 'os.path.isfile', 'os.path.isfile', (['streams_adj_path'], {}), '(streams_adj_path)\n', (15999, 16017), False, 'import os\n'), ((5033, 5053), 'numpy.argmin', 'np.argmin', (['distances'], {}), '(distances)\n', (5042, 5053), True, 'import numpy as np\n'), ((5197, 5217), 'shapely.geometry.Point', 'Point', (['closest_point'], {}), '(closest_point)\n', (5202, 5217), False, 'from shapely.geometry import Point\n'), ((5306, 5322), 'shapely.wkb.dumps', 'dumps', (['last_node'], {}), '(last_node)\n', (5311, 5322), False, 'from shapely.wkb import dumps, loads\n'), ((5350, 5373), 'shapely.wkb.dumps', 'dumps', (['polygon.exterior'], {}), '(polygon.exterior)\n', (5355, 5373), False, 'from shapely.wkb import dumps, loads\n'), ((5478, 5507), 'pygeos.io.from_wkb', 'pygeos.io.from_wkb', (['wkb_point'], {}), '(wkb_point)\n', (5496, 5507), False, 'import pygeos\n'), ((5539, 5567), 'pygeos.io.from_wkb', 'pygeos.io.from_wkb', (['wkb_poly'], {}), '(wkb_poly)\n', (5557, 5567), False, 'import pygeos\n'), ((5667, 5731), 'pygeos.linear.line_locate_point', 'pygeos.linear.line_locate_point', (['polybin_geom', 'stream_point_geom'], {}), '(polybin_geom, stream_point_geom)\n', (5698, 5731), False, 'import pygeos\n'), ((5765, 5836), 'pygeos.linear.line_interpolate_point', 'pygeos.linear.line_interpolate_point', (['polybin_geom', 'pointdistancetoline'], {}), '(polybin_geom, pointdistancetoline)\n', (5801, 5836), False, 'import pygeos\n'), ((5935, 5968), 'pygeos.io.to_wkb', 'pygeos.io.to_wkb', (['referencedpoint'], {}), '(referencedpoint)\n', (5951, 5968), False, 'import pygeos\n'), ((6058, 6084), 'shapely.wkb.loads', 'loads', (['bin_referencedpoint'], {}), '(bin_referencedpoint)\n', (6063, 6084), False, 'from shapely.wkb import dumps, loads\n'), ((8795, 8830), 'utils.shared_functions.getDriver', 'getDriver', (['nhd_streams_agg_fileName'], {}), '(nhd_streams_agg_fileName)\n', (8804, 8830), False, 'from utils.shared_functions import getDriver\n'), ((12670, 12709), 'utils.shared_functions.getDriver', 'getDriver', (['adj_nhd_streams_all_fileName'], {}), '(adj_nhd_streams_all_fileName)\n', (12679, 12709), False, 'from utils.shared_functions import getDriver\n'), ((12811, 12853), 'utils.shared_functions.getDriver', 'getDriver', (['adj_nhd_headwaters_all_fileName'], {}), '(adj_nhd_headwaters_all_fileName)\n', (12820, 12853), False, 'from utils.shared_functions import getDriver\n'), ((13838, 13877), 'utils.shared_functions.getDriver', 'getDriver', (['agg_nhd_streams_adj_fileName'], {}), '(agg_nhd_streams_adj_fileName)\n', (13847, 13877), False, 'from utils.shared_functions import getDriver\n'), ((13999, 14038), 'utils.shared_functions.getDriver', 'getDriver', (['agg_nhd_streams_adj_fileName'], {}), '(agg_nhd_streams_adj_fileName)\n', (14008, 14038), False, 'from utils.shared_functions import getDriver\n'), ((14429, 14471), 'utils.shared_functions.getDriver', 'getDriver', (['agg_nhd_headwaters_adj_fileName'], {}), '(agg_nhd_headwaters_adj_fileName)\n', (14438, 14471), False, 'from utils.shared_functions import getDriver\n'), ((14605, 14647), 'utils.shared_functions.getDriver', 'getDriver', (['agg_nhd_headwaters_adj_fileName'], {}), '(agg_nhd_headwaters_adj_fileName)\n', (14614, 14647), False, 'from utils.shared_functions import getDriver\n'), ((4869, 4881), 'shapely.geometry.Point', 'Point', (['point'], {}), '(point)\n', (4874, 4881), False, 'from shapely.geometry import Point\n')] |
from unittest import TestCase
import numpy as np
import cca_zoo.models.innerloop
class TestInnerLoop(TestCase):
def setUp(self):
self.X = np.random.rand(10, 10)
self.Y = np.random.rand(10, 10)
self.Z = np.random.rand(10, 10)
def tearDown(self):
pass
def test_regularized(self):
park = cca_zoo.models.innerloop.ParkhomenkoInnerLoop(c=[0.0001, 0.0001]).fit(self.X, self.Y)
park_gen = cca_zoo.models.innerloop.ParkhomenkoInnerLoop(c=[0.0001, 0.0001], generalized=True).fit(self.X,
self.Y)
params = {'c': [2, 2]}
pmd = cca_zoo.models.innerloop.PMDInnerLoop(c=[2, 2]).fit(self.X, self.Y)
pmd_gen = cca_zoo.models.innerloop.PMDInnerLoop(c=[2, 2], generalized=True).fit(self.X, self.Y)
| [
"numpy.random.rand"
] | [((155, 177), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)'], {}), '(10, 10)\n', (169, 177), True, 'import numpy as np\n'), ((195, 217), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)'], {}), '(10, 10)\n', (209, 217), True, 'import numpy as np\n'), ((235, 257), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)'], {}), '(10, 10)\n', (249, 257), True, 'import numpy as np\n')] |
import json
import logging
from argparse import Namespace
import numpy as np
from fairseq import metrics, search, utils
from fairseq.data import encoders
from fairseq.tasks import register_task
from fairseq.tasks.translation_multi_simple_epoch import TranslationMultiSimpleEpochTask
from fairseq.sequence_generator import SequenceGenerator
EVAL_BLEU_ORDER = 4
logger = logging.getLogger(__name__)
@register_task('translation_multi_simple_epoch_extended')
class TranslationMultiSimpleEpochTaskExtended(TranslationMultiSimpleEpochTask):
"""
Extended version of TranslationMultiSimpleEpochTask to support Validation using BLEU.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# options for reporting BLEU during validation
TranslationMultiSimpleEpochTask.add_args(parser)
parser.add_argument('--eval-bleu', action='store_true',
help='evaluation with BLEU scores')
parser.add_argument('--eval-bleu-detok', type=str, default="space",
help='detokenize before computing BLEU (e.g., "moses"); '
'required if using --eval-bleu; use "space" to '
'disable detokenization; see fairseq.data.encoders '
'for other options')
parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON',
help='args for building the tokenizer, if needed')
parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False,
help='compute tokenized BLEU instead of sacrebleu')
parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE before computing BLEU')
parser.add_argument('--eval-bleu-args', type=str, metavar='JSON',
help='generation args for BLUE scoring, '
'e.g., \'{"beam": 4, "lenpen": 0.6}\'')
parser.add_argument('--eval-bleu-print-samples', action='store_true',
help='print sample generations during validation')
parser.add_argument('--bleu-type', type=str, default='text',
choices=['text', 'code'],
help='compute bleu for text or code')
def __init__(self, args, langs, dicts, training):
super().__init__(args, langs, dicts, training)
def build_model(self, args):
model = super().build_model(args)
if getattr(args, "eval_bleu", False):
assert getattr(args, "eval_bleu_detok", None) is not None, (
"--eval-bleu-detok is required if using --eval-bleu; "
"try --eval-bleu-detok=moses (or --eval-bleu-detok=space "
"to disable detokenization, e.g., when using sentencepiece)"
)
detok_args = json.loads(getattr(args, "eval_bleu_detok_args", "{}") or "{}")
self.tokenizer = encoders.build_tokenizer(
Namespace(
tokenizer=getattr(args, "eval_bleu_detok", None), **detok_args
)
)
gen_args = json.loads(getattr(args, "eval_bleu_args", "{}") or "{}")
self.sequence_generator = self._build_generator([model], Namespace(**gen_args))
return model
def _build_generator(
self,
models,
args,
seq_gen_cls=None,
extra_gen_cls_kwargs=None,
):
if not getattr(args, "keep_inference_langtok", False):
_, tgt_langtok_spec = self.args.langtoks["main"]
if tgt_langtok_spec:
if self.training:
tgt_langs = list({
lang_pair.split("-")[1] for lang_pair in self.lang_pairs
})
else:
tgt_langs = [self.args.target_lang]
tgt_lang_toks = {
self.data_manager.get_decoder_langtok(l, tgt_langtok_spec) for l in tgt_langs
}
extra_gen_cls_kwargs = extra_gen_cls_kwargs or {}
extra_gen_cls_kwargs["symbols_to_strip_from_output"] = {*tgt_lang_toks}
search_strategy = search.BeamSearch(self.target_dictionary)
return SequenceGenerator(
models,
self.target_dictionary,
beam_size=getattr(args, "beam", 5),
max_len_a=getattr(args, "max_len_a", 0),
max_len_b=getattr(args, "max_len_b", 200),
min_len=getattr(args, "min_len", 1),
normalize_scores=(not getattr(args, "unnormalized", False)),
len_penalty=getattr(args, "lenpen", 1),
unk_penalty=getattr(args, "unkpen", 0),
temperature=getattr(args, "temperature", 1.0),
match_source_len=getattr(args, "match_source_len", False),
no_repeat_ngram_size=getattr(args, "no_repeat_ngram_size", 0),
search_strategy=search_strategy,
**extra_gen_cls_kwargs,
)
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if self.args.eval_bleu:
bleu = self._inference_with_bleu(self.sequence_generator, sample, model)
# logger.info('Counts: %s\tTotal: %s' % (str(bleu.counts), str(bleu.totals)))
logging_output["_bleu_sys_len"] = bleu.sys_len
logging_output["_bleu_ref_len"] = bleu.ref_len
# we split counts into separate entries so that they can be
# summed efficiently across workers using fast-stat-sync
assert len(bleu.counts) == EVAL_BLEU_ORDER
for i in range(EVAL_BLEU_ORDER):
logging_output["_bleu_counts_" + str(i)] = bleu.counts[i]
logging_output["_bleu_totals_" + str(i)] = bleu.totals[i]
return loss, sample_size, logging_output
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if self.args.eval_bleu:
def sum_logs(key):
return sum(log.get(key, 0) for log in logging_outputs)
counts, totals = [], []
for i in range(EVAL_BLEU_ORDER):
counts.append(sum_logs("_bleu_counts_" + str(i)))
totals.append(sum_logs("_bleu_totals_" + str(i)))
if max(totals) > 0:
# log counts as numpy arrays -- log_scalar will sum them correctly
metrics.log_scalar("_bleu_counts", np.array(counts))
metrics.log_scalar("_bleu_totals", np.array(totals))
metrics.log_scalar("_bleu_sys_len", sum_logs("_bleu_sys_len"))
metrics.log_scalar("_bleu_ref_len", sum_logs("_bleu_ref_len"))
def compute_bleu(meters):
import inspect
import sacrebleu
fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0]
if "smooth_method" in fn_sig:
smooth = {"smooth_method": "exp"}
else:
smooth = {"smooth": "exp"}
bleu = sacrebleu.compute_bleu(
correct=meters["_bleu_counts"].sum,
total=meters["_bleu_totals"].sum,
sys_len=meters["_bleu_sys_len"].sum,
ref_len=meters["_bleu_ref_len"].sum,
**smooth
)
return round(bleu.score, 2)
metrics.log_derived("bleu", compute_bleu)
def _inference_with_bleu(self, generator, sample, model):
import sacrebleu
def decode(toks, escape_unk=False):
s = self.target_dictionary.string(
toks.int().cpu(),
self.args.eval_bleu_remove_bpe,
# The default unknown string in fairseq is `<unk>`, but
# this is tokenized by sacrebleu as `< unk >`, inflating
# BLEU scores. Instead, we use a somewhat more verbose
# alternative that is unlikely to appear in the real
# reference, but doesn't get split into multiple tokens.
unk_string=("UNKNOWNTOKENINREF" if escape_unk else "UNKNOWNTOKENINHYP"),
)
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
target = sample['target']
prefix_token = target[:, 0].unsqueeze(-1)
gen_out = self.inference_step(generator, [model], sample, prefix_tokens=prefix_token)
hyps, refs = [], []
for i in range(len(gen_out)):
decoded_hyp = decode(gen_out[i][0]["tokens"])
decoded_ref = decode(
utils.strip_pad(sample["target"][i], self.target_dictionary.pad()),
escape_unk=True, # don't count <unk> as matches to the hypo
)
hyps.append(decoded_hyp)
refs.append(decoded_ref)
if self.args.eval_bleu_print_samples:
logger.info("example hypothesis: " + hyps[0])
logger.info("example reference: " + refs[0])
if self.args.eval_tokenized_bleu:
return sacrebleu.corpus_bleu(hyps, [refs], tokenize="none")
else:
return sacrebleu.corpus_bleu(hyps, [refs])
| [
"fairseq.tasks.translation_multi_simple_epoch.TranslationMultiSimpleEpochTask.add_args",
"argparse.Namespace",
"inspect.getfullargspec",
"fairseq.tasks.register_task",
"sacrebleu.compute_bleu",
"sacrebleu.corpus_bleu",
"fairseq.search.BeamSearch",
"numpy.array",
"fairseq.metrics.log_derived",
"log... | [((372, 399), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (389, 399), False, 'import logging\n'), ((403, 459), 'fairseq.tasks.register_task', 'register_task', (['"""translation_multi_simple_epoch_extended"""'], {}), "('translation_multi_simple_epoch_extended')\n", (416, 459), False, 'from fairseq.tasks import register_task\n'), ((815, 863), 'fairseq.tasks.translation_multi_simple_epoch.TranslationMultiSimpleEpochTask.add_args', 'TranslationMultiSimpleEpochTask.add_args', (['parser'], {}), '(parser)\n', (855, 863), False, 'from fairseq.tasks.translation_multi_simple_epoch import TranslationMultiSimpleEpochTask\n'), ((4354, 4395), 'fairseq.search.BeamSearch', 'search.BeamSearch', (['self.target_dictionary'], {}), '(self.target_dictionary)\n', (4371, 4395), False, 'from fairseq import metrics, search, utils\n'), ((9407, 9459), 'sacrebleu.corpus_bleu', 'sacrebleu.corpus_bleu', (['hyps', '[refs]'], {'tokenize': '"""none"""'}), "(hyps, [refs], tokenize='none')\n", (9428, 9459), False, 'import sacrebleu\n'), ((9493, 9528), 'sacrebleu.corpus_bleu', 'sacrebleu.corpus_bleu', (['hyps', '[refs]'], {}), '(hyps, [refs])\n', (9514, 9528), False, 'import sacrebleu\n'), ((3406, 3427), 'argparse.Namespace', 'Namespace', ([], {}), '(**gen_args)\n', (3415, 3427), False, 'from argparse import Namespace\n'), ((7736, 7777), 'fairseq.metrics.log_derived', 'metrics.log_derived', (['"""bleu"""', 'compute_bleu'], {}), "('bleu', compute_bleu)\n", (7755, 7777), False, 'from fairseq import metrics, search, utils\n'), ((6701, 6717), 'numpy.array', 'np.array', (['counts'], {}), '(counts)\n', (6709, 6717), True, 'import numpy as np\n'), ((6770, 6786), 'numpy.array', 'np.array', (['totals'], {}), '(totals)\n', (6778, 6786), True, 'import numpy as np\n'), ((7352, 7538), 'sacrebleu.compute_bleu', 'sacrebleu.compute_bleu', ([], {'correct': "meters['_bleu_counts'].sum", 'total': "meters['_bleu_totals'].sum", 'sys_len': "meters['_bleu_sys_len'].sum", 'ref_len': "meters['_bleu_ref_len'].sum"}), "(correct=meters['_bleu_counts'].sum, total=meters[\n '_bleu_totals'].sum, sys_len=meters['_bleu_sys_len'].sum, ref_len=\n meters['_bleu_ref_len'].sum, **smooth)\n", (7374, 7538), False, 'import sacrebleu\n'), ((7090, 7136), 'inspect.getfullargspec', 'inspect.getfullargspec', (['sacrebleu.compute_bleu'], {}), '(sacrebleu.compute_bleu)\n', (7112, 7136), False, 'import inspect\n')] |
from collections import defaultdict
from time import sleep
import numpy as np
import tensorflow as tf
from scipy.misc import imresize
import matplotlib.pyplot as plt
from rllab.envs.mujoco.reach_env import ReachEnv
from rllab.envs.mujoco.arm_env import COLORS, ALL_CONDITIONS
from modular.modular_architecture import construct_network
from modular.get_rollouts import good_environments, get_rollouts, run_sim_policy
from modular.display_network import show_graph
def train_network(sess, env, net, iterations, cost_callback=print):
print("Start training", env)
data_block_locations, data_robot_end_effector, data_joint_angles, data_image, action, last_images = get_rollouts(env)
print("Get rollout")
def resize_all(data_images):
return np.array([imresize(x, (80, 80, 3)) for x in data_images])
sample_time = list(np.random.choice(list(range(150)), 30))
print("Sampled time")
feed_dict = {
# net.input.obs_image[()] : resize_all(data_image)[sample_time],
net.input.block_locations[()] : data_block_locations[sample_time],
net.input.joint_angles[env.number_links] : data_joint_angles[sample_time],
net.label.action[env.number_links]: action[sample_time],
# net.label.end_image[()] : resize_all(last_images)[sample_time]
}
print("Construct feed dictionary")
cost = net.loss.action[("state",) + env.tensorcloud_key]
#get_unified_cost(env, net.loss.action, net.loss.end_image, input_types=["state"], output_types=["joint"])
with tf.device('/gpu:0'):
train_op = initialized_adam(sess, cost)
print("Initialize adam")
for _ in range(iterations):
sess.run(train_op, feed_dict=feed_dict)
cost_callback(env, sess.run(cost, feed_dict=feed_dict))
def get_unified_cost(env, loss_joint, loss_image, input_types=("state", "images"), output_types=("joint", "end_image")):
outputs = lambda vals: {
"joint" : loss_joint[(vals,) + env.tensorcloud_key],
"end_image" : 1e-2 * loss_image[(vals,) + env.task_type_key] / (80 * 80 * 3 * 256^2)
}
return sum((print(vals, output_type), outputs(vals)[output_type])[1]
for output_type in output_types
for vals in input_types)
def initialized_adam(sess, cost, *args, **kwargs):
temp = set(tf.all_variables())
train_step = tf.train.AdamOptimizer(*args, **kwargs).minimize(cost)
sess.run(tf.initialize_variables(set(tf.all_variables()) - temp))
return train_step
def main():
plt.ion()
sess = tf.Session()
net = construct_network(
hidden_size=64, number_layers=1,
conv_size=64, n_conv_layers=2,
image_width=80, image_height=80)
print("Constructed Network")
show_graph(path="/home/abhigupta/log.html")
print("Graph shown")
sess.run(tf.variables_initializer(tf.trainable_variables()))
print("Initialize all variables")
environments = [env
for cls in (ReachEnv,)
for condition in range(len(ALL_CONDITIONS))
for num_links in (3, 4, 5)
for env in cls.all_envs(is_3d=True, condition=condition, number_links=num_links)]
envs = good_environments(environments, plot=False)
print("Filtered environments")
training_trajectories = defaultdict(list)
starting_cost = {}
for _ in range(100):
for env in envs:
def cost_callback(env, cost):
if len(training_trajectories[str(env)]) % 100 == 0:
print("*", end="")
if str(env) not in starting_cost:
starting_cost[str(env)] = cost
training_trajectories[str(env)].append(cost)
print("Start iteration")
train_network(sess, env(), net, 1000, cost_callback=cost_callback)
print()
plt.cla()
for env_name, trajectory in training_trajectories.items():
plt.plot(np.array(trajectory) / starting_cost[env_name], label=env_name)
plt.legend(bbox_to_anchor=(3, 0))
plt.draw()
plt.pause(1)
print("Finished iteration")
if __name__ == '__main__':
main()
| [
"modular.display_network.show_graph",
"tensorflow.trainable_variables",
"tensorflow.all_variables",
"matplotlib.pyplot.legend",
"tensorflow.device",
"tensorflow.Session",
"modular.get_rollouts.good_environments",
"collections.defaultdict",
"matplotlib.pyplot.ion",
"modular.get_rollouts.get_rollout... | [((671, 688), 'modular.get_rollouts.get_rollouts', 'get_rollouts', (['env'], {}), '(env)\n', (683, 688), False, 'from modular.get_rollouts import good_environments, get_rollouts, run_sim_policy\n'), ((2505, 2514), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (2512, 2514), True, 'import matplotlib.pyplot as plt\n'), ((2526, 2538), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2536, 2538), True, 'import tensorflow as tf\n'), ((2549, 2667), 'modular.modular_architecture.construct_network', 'construct_network', ([], {'hidden_size': '(64)', 'number_layers': '(1)', 'conv_size': '(64)', 'n_conv_layers': '(2)', 'image_width': '(80)', 'image_height': '(80)'}), '(hidden_size=64, number_layers=1, conv_size=64,\n n_conv_layers=2, image_width=80, image_height=80)\n', (2566, 2667), False, 'from modular.modular_architecture import construct_network\n'), ((2726, 2769), 'modular.display_network.show_graph', 'show_graph', ([], {'path': '"""/home/abhigupta/log.html"""'}), "(path='/home/abhigupta/log.html')\n", (2736, 2769), False, 'from modular.display_network import show_graph\n'), ((3189, 3232), 'modular.get_rollouts.good_environments', 'good_environments', (['environments'], {'plot': '(False)'}), '(environments, plot=False)\n', (3206, 3232), False, 'from modular.get_rollouts import good_environments, get_rollouts, run_sim_policy\n'), ((3296, 3313), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3307, 3313), False, 'from collections import defaultdict\n'), ((1522, 1541), 'tensorflow.device', 'tf.device', (['"""/gpu:0"""'], {}), "('/gpu:0')\n", (1531, 1541), True, 'import tensorflow as tf\n'), ((2304, 2322), 'tensorflow.all_variables', 'tf.all_variables', ([], {}), '()\n', (2320, 2322), True, 'import tensorflow as tf\n'), ((2341, 2380), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['*args'], {}), '(*args, **kwargs)\n', (2363, 2380), True, 'import tensorflow as tf\n'), ((2833, 2857), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (2855, 2857), True, 'import tensorflow as tf\n'), ((3846, 3855), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (3853, 3855), True, 'import matplotlib.pyplot as plt\n'), ((4028, 4061), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(3, 0)'}), '(bbox_to_anchor=(3, 0))\n', (4038, 4061), True, 'import matplotlib.pyplot as plt\n'), ((4074, 4084), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (4082, 4084), True, 'import matplotlib.pyplot as plt\n'), ((4097, 4109), 'matplotlib.pyplot.pause', 'plt.pause', (['(1)'], {}), '(1)\n', (4106, 4109), True, 'import matplotlib.pyplot as plt\n'), ((772, 796), 'scipy.misc.imresize', 'imresize', (['x', '(80, 80, 3)'], {}), '(x, (80, 80, 3))\n', (780, 796), False, 'from scipy.misc import imresize\n'), ((2437, 2455), 'tensorflow.all_variables', 'tf.all_variables', ([], {}), '()\n', (2453, 2455), True, 'import tensorflow as tf\n'), ((3952, 3972), 'numpy.array', 'np.array', (['trajectory'], {}), '(trajectory)\n', (3960, 3972), True, 'import numpy as np\n')] |
# Function for testing models
import numpy as np
import torch
def test(data, model, pad_idx, device, args):
model.eval()
with torch.no_grad():
all_correct_trials = [] # list of booleans indicating whether correct
for batch in data:
out, attn_wts = model(batch.src, batch.trg)
preds = torch.argmax(out,dim=2)
correct_pred = preds == batch.trg
correct_pred = correct_pred.cpu().numpy()
mask = batch.trg == pad_idx # mask out padding
mask = mask.cpu().numpy()
correct = np.logical_or(mask,correct_pred)
correct = correct.all(0).tolist()
all_correct_trials += correct
accuracy = np.mean(all_correct_trials)
model.train()
return accuracy
| [
"torch.argmax",
"numpy.mean",
"torch.no_grad",
"numpy.logical_or"
] | [((714, 741), 'numpy.mean', 'np.mean', (['all_correct_trials'], {}), '(all_correct_trials)\n', (721, 741), True, 'import numpy as np\n'), ((136, 151), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (149, 151), False, 'import torch\n'), ((334, 358), 'torch.argmax', 'torch.argmax', (['out'], {'dim': '(2)'}), '(out, dim=2)\n', (346, 358), False, 'import torch\n'), ((577, 610), 'numpy.logical_or', 'np.logical_or', (['mask', 'correct_pred'], {}), '(mask, correct_pred)\n', (590, 610), True, 'import numpy as np\n')] |
import numpy as np
from scipy.linalg import lstsq
from qml.fchl import get_local_kernels
from qml.fchl import get_atomic_local_kernels
from qml.fchl import get_atomic_local_gradient_kernels
# Default fchl kernel values
KERNEL_ARGS = {
"verbose": False,
"cut_distance": 1e6,
"kernel": "gaussian",
"kernel_args": {
"sigma": [0.64],
},
}
DX = 0.005
def get_kernel(
representations_a,
representations_b,
displaced_representations_a,
displaced_representations_b,
kernel_args=KERNEL_ARGS,
dx=DX):
kernel_property = get_atomic_local_kernels(representations_a, representations_b, **KERNEL_ARGS)
kernel_derivative = get_atomic_local_gradient_kernels(representations_a, displaced_representations_b, dx=dx, **KERNEL_ARGS)
return kernel_property, kernel_derivative
def get_alphas(
kernel_property,
kernel_derivative,
energies_list,
forces_list):
forces_list = np.concatenate(forces_list)
Y = np.concatenate((energies_list, forces_list.flatten()))
C = np.concatenate((kernel_property.T, kernel_derivative.T))
alphas, residuals, singular_values, rank = lstsq(C, Y, cond=1e-9, lapack_driver="gelsd")
return alphas
| [
"qml.fchl.get_atomic_local_kernels",
"numpy.concatenate",
"qml.fchl.get_atomic_local_gradient_kernels",
"scipy.linalg.lstsq"
] | [((573, 650), 'qml.fchl.get_atomic_local_kernels', 'get_atomic_local_kernels', (['representations_a', 'representations_b'], {}), '(representations_a, representations_b, **KERNEL_ARGS)\n', (597, 650), False, 'from qml.fchl import get_atomic_local_kernels\n'), ((677, 784), 'qml.fchl.get_atomic_local_gradient_kernels', 'get_atomic_local_gradient_kernels', (['representations_a', 'displaced_representations_b'], {'dx': 'dx'}), '(representations_a,\n displaced_representations_b, dx=dx, **KERNEL_ARGS)\n', (710, 784), False, 'from qml.fchl import get_atomic_local_gradient_kernels\n'), ((947, 974), 'numpy.concatenate', 'np.concatenate', (['forces_list'], {}), '(forces_list)\n', (961, 974), True, 'import numpy as np\n'), ((1047, 1103), 'numpy.concatenate', 'np.concatenate', (['(kernel_property.T, kernel_derivative.T)'], {}), '((kernel_property.T, kernel_derivative.T))\n', (1061, 1103), True, 'import numpy as np\n'), ((1151, 1197), 'scipy.linalg.lstsq', 'lstsq', (['C', 'Y'], {'cond': '(1e-09)', 'lapack_driver': '"""gelsd"""'}), "(C, Y, cond=1e-09, lapack_driver='gelsd')\n", (1156, 1197), False, 'from scipy.linalg import lstsq\n')] |
# 2020.09.18
# finalized 2020.09.29
# @yifan
#
# plot and evl functions
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import cv2
import os
import copy
import pickle
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from utli import *
from evaluate import PSNR, SSIM
from main import main
root = '../'
myDataset = {'Kodak':{'path':'data/Kodak/', 'size':(512, 768)},
'DIV2K':{'path':'data/DIV2K/', 'size':(512, 512)},
'Duck':{'path':'data/Duck/', 'size':(1024, 1792)},
'NB':{'path':'data/NormalizedBrodatz/', 'size':(640,640)}}
# compute PSNR, SSIM in one folder
def cal(pathraw, pathres, start, end, name='tmp', write=False, QF_s=10, QF_e=100, QF_step=10, verbose=True):
Nl, psnr, ssim = [], [], []
for N in range(QF_s,QF_e,QF_step):
ps, ss = [], []
print(' Start @N='+str(N))
for i in range(start, end):
try:
ref = cv2.imread(pathraw+str(i)+'.bmp')
dec = cv2.imread(pathres+'decode/'+str(i)+'_'+str(N)+'.bmp')
ps.append(PSNR(ref, dec))
ss.append(SSIM(ref, dec))
if verbose == True:
print(' <INFO> image '+str(i)+' @N='+str(N)+' PSNR='+str(ps[-1])+' SSIM='+str(ss[-1]))
except:
#print(' <WARNING> missing image '+str(i)+' @N='+str(N))
continue
psnr.append(np.mean(ps))
ssim.append(np.mean(ss))
Nl.append(N)
print('<Summary> PSNR='+str(psnr[-1])+' SSIM='+str(ssim[-1])+' @N='+str(Nl[-1]))
if write == True:
with open('../fig/'+name+'.pkl', 'wb') as f:
pickle.dump({'N':Nl, 'PSNR':psnr, 'SSIM':ssim}, f)
print('<INFO> Write to file: '+'../fig/'+name+'.pkl')
return Nl, psnr, ssim
def load(dataset='kodak'):
with open('../fig/'+dataset+'.pkl', 'rb') as f:
d = pickle.load(f)
return np.array(d['N']), np.array(d['Nt']), np.array(d['PSNR']), np.array(d['PSNRt']), np.array(d['SSIM']), np.array(d['SSIMt'])
# opt K computed at Qf=k, images encoded at same Qf
# data[0] is for raw jpeg
def N_diag(N, data):
N_r, data_r = [], []
for i in range(1, N.shape[1]):
N_r.append(N[i+1,i])
data_r.append(data[i+1,i])
return N_r, data_r
def jpeg(N, data):
return N[0], data[0]
# plt opt K computed at Qf=k, images encoded at same Qf
def plt_NvN(p_s_n_r=True, train=True):
N, Nt, psnr, psnrt, ssim, ssimt = load('kodak')
if p_s_n_r == False:
psnr = ssim
psnrt = ssimt
figure(num=None, figsize=(11, 8), dpi=200, facecolor='w', edgecolor='k')
if train == True:
x, y = jpeg(N, psnr)
plt.plot(x[3:-1], y[3:-1], label='JPEG (Kodak train)', color='b', alpha=0.3)
x, y = N_diag(N, psnr)
plt.plot(x[1:-1], y[1:-1], label='(ours) (Kodak train)', color='r')
else:
x, y = jpeg(Nt, psnrt)
plt.plot(x[2:], y[2:], label='JPEG', color='b', alpha=0.3)
x1, y1 = N_diag(Nt, psnrt)
plt.plot(x1[1:], y1[1:], label='Ours', color='r')
print(x1, y1-y[1:])
plt.xlabel('Quality Factor')
if p_s_n_r == False:
plt.ylabel('SSIM')
else:
plt.ylabel('PSNR (dB)')
plt.legend(prop={'size': 12})
plt.show()
figure(num=None, figsize=(11, 8), dpi=200, facecolor='w', edgecolor='k')
N, Nt, psnr, psnrt, ssim, ssimt = load('div2k')
if p_s_n_r == False:
psnr = ssim
psnrt = ssimt
if train == True:
plt.plot(N[0][1:-1], psnr[0][1:-1], label='JPEG (DIV2K train)',color='m', alpha=0.3)
plt.plot(N[1][1:-1], psnr[1][1:-1], label='(ours) (DIV2K train)',color='g')
else:
plt.plot(N[0][1:], psnrt[0][1:], label='JPEG',color='g', alpha=0.3)
plt.plot(N[1][1:], psnrt[1][1:], label='Ours',color='m')
print(N[0][1:], psnrt[1][1:]-psnrt[0][1:])
plt.xlabel('Quality Factor')
if p_s_n_r == False:
plt.ylabel('SSIM')
else:
plt.ylabel('PSNR (dB)')
plt.legend(prop={'size': 12})
plt.show()
# save images with PSNR larger than <th>
def plt_image_block(dataset, win, th=0.8, folder='kodak_opt_inv@N=50', s=10, num=23):
for i in range(s, num):
try:
x = cv2.imread('../data/'+dataset+'/'+str(i)+'.bmp')
y = cv2.imread('../result/'+dataset+'/'+folder+'/decode/'+str(i)+'_70.bmp')
z = cv2.imread('../result/'+dataset+'/jpeg_raw/decode/'+str(i)+'_70.bmp')
print(i, '----------------',PSNR(x,y) - PSNR(x,z))
if abs(PSNR(x,z) - PSNR(x,y))>0.1:
x = view_as_windows(x, (win,win,3), (win,win,3))
x = x.reshape(-1, win, win, 3)
y = view_as_windows(y, (win,win,3), (win,win,3)).reshape(-1, win, win, 3)
z = view_as_windows(z, (win,win,3), (win,win,3)).reshape(-1, win, win, 3)
for j in range(x.shape[0]):
c = PSNR(x[j],y[j]) - PSNR(x[j],z[j])
if c > th:
print(c)
cv2.imwrite('../result/tmp/'+str(i)+'_'+str(j)+'_opt'+str(c)+'.png', y[j])
cv2.imwrite('../result/tmp/'+str(i)+'_'+str(j)+'_jpg.png', z[j])
cv2.imwrite('../result/tmp/'+str(i)+'_'+str(j)+'_raw.png', x[j])
except:
continue
# draw correlation matrix between kerenl computed at different Qf
def draw_dist_png():
with open('../fig/corr.pkl', 'rb') as f:
Z = pickle.load(f)
fig = plt.figure(figsize=(16,8))
ax = fig.gca(projection='3d')
X, Y = [], []
for i in range(5, 100, 10):
X.append(i)
Y.append(i)
X, Y = np.meshgrid(np.array(X), np.array(Y))
surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.set_xlabel('Quality Factor')
ax.set_ylabel('Quality Factor')
ax.set_zlabel('L2 Distance')
plt.show()
# one Qf for images encoded at all Qf
def plt_N_all(name='div2k_kodak@N=80', p_s_n_r=0):
figure(num=None, figsize=(16, 12), dpi=200, facecolor='w', edgecolor='k')
N = [50, 60, 70, 80, 90]
N1 = [50, 70, 90]
psnr = [33.814829041420815, 34.72220158638222, 37.28080544370594]
b = [33.88332856858257, 34.27231624600438, 34.850441813588006, 35.703367093531334, 37.336899920746575]
c = [0.9100433641797571, 0.9296664839477011, 0.9615681240089557]
cc = [0.9114377837974508, 0.9210319443138413, 0.9313069519843874, 0.9455445405903505, 0.9625424889039493]
if p_s_n_r == False:
plt.plot(N1, psnr, label='JPEG (DIV2K)',color='m', alpha=0.3)
plt.plot(N, b, label='ours (DIV2K)',color='g')
else:
plt.plot(N1, c, label='JPEG (DIV2K)',color='m', alpha=0.3)
plt.plot(N, cc, label='ours (DIV2K)',color='g')
plt.xlabel('Quality Factor')
if p_s_n_r == False:
plt.ylabel('SSIM')
else:
plt.ylabel('PSNR (dB)')
plt.legend()
plt.show()
def run(dataset, opt):
Nl, psnr, ssim = cal('../data/'+dataset+'/', '../result/'+dataset+'/'+opt+'/', start=0, end=100, name=opt+'_train', write=0, QF_s=10, QF_e=91, QF_step=20)
Nlt, psnrt, ssimt = cal('../data/'+dataset+'/', '../result/'+dataset+'/'+opt+'/', start=100, end=785, name=opt+'_test', write=0, QF_s=90, QF_e=91, QF_step=20)
with open('../fig/'+dataset+'_'+opt+'tmp.pkl', 'wb') as f:
pickle.dump({'N':0, 'PSNR':0, 'SSIM':0, 'Nt':Nlt, 'PSNRt':psnrt, 'SSIMt':ssimt}, f)
return 0, 0, 0, Nlt, psnrt, ssimt
def multi_run(dataset = 'DIV2K'):
Nl, psnr, ssim, Nlt, psnrt, ssimt = [], [], [], [], [], []
a,b,c,d,e,f = run(dataset, opt='jpeg_raw')
Nl.append(a)
psnr.append(b)
ssim.append(c)
Nlt.append(d)
psnrt.append(e)
ssimt.append(f)
for i in range(50, 51, 10):
print(i,'----------------------------------')
a,b,c,d,e,f = run(dataset, opt='div2k')
Nl.append(a)
psnr.append(b)
ssim.append(c)
Nlt.append(d)
psnrt.append(e)
ssimt.append(f)
with open('../fig/div2k.pkl', 'wb') as f:
pickle.dump({'N':Nl, 'PSNR':psnr, 'SSIM':ssim, 'Nt':Nlt, 'PSNRt':psnrt, 'SSIMt':ssimt}, f)
if __name__ == "__main__":
#plt_NvN(p_s_n_r=0, train=0)
#plt_NvN(p_s_n_r=0, train=1)
#plt_NvN(p_s_n_r=1, train=0)
#plt_NvN(p_s_n_r=1, train=1)
#plt_N_all(p_s_n_r=0)
#plt_N_all(p_s_n_r=1)
#draw_corr('Kodak')
#plt_image_block('DIV2K', 32, th=0.6, folder='@N=70', s=100, num=200)
#a,b,c,d,e,f = run('DIV2K', opt='@N=50')
a = 90
cal(pathraw="/Users/alex/Desktop/proj/compression/data/Kodak/Kodak/",
pathres="/Users/alex/Documents/GitHub/Optimal_Inverse/result/Kodak/jpeg_raw/",
start=10, end=24, name='tmp', write=False, QF_s=a, QF_e=a+2, QF_step=10, verbose=0)
cal(pathraw="/Users/alex/Desktop/proj/compression/data/Kodak/Kodak/",
pathres="/Users/alex/Documents/GitHub/Optimal_Inverse/result/Kodak/@N="+str(a)+"/",
start=10, end=24, name='tmp', write=False, QF_s=a, QF_e=a+2, QF_step=10, verbose=0)
| [
"evaluate.PSNR",
"pickle.dump",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"pickle.load",
"numpy.array",
"matplotlib.ticker.LinearLocator",
"matplotlib.ticker.FormatStrFormatter",
"numpy.mean",
"evaluate.SSIM",
"matplotlib.pypl... | [((2701, 2773), 'matplotlib.pyplot.figure', 'figure', ([], {'num': 'None', 'figsize': '(11, 8)', 'dpi': '(200)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=None, figsize=(11, 8), dpi=200, facecolor='w', edgecolor='k')\n", (2707, 2773), False, 'from matplotlib.pyplot import figure\n'), ((3251, 3279), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Quality Factor"""'], {}), "('Quality Factor')\n", (3261, 3279), True, 'import matplotlib.pyplot as plt\n'), ((3378, 3407), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'prop': "{'size': 12}"}), "(prop={'size': 12})\n", (3388, 3407), True, 'import matplotlib.pyplot as plt\n'), ((3412, 3422), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3420, 3422), True, 'import matplotlib.pyplot as plt\n'), ((3432, 3504), 'matplotlib.pyplot.figure', 'figure', ([], {'num': 'None', 'figsize': '(11, 8)', 'dpi': '(200)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=None, figsize=(11, 8), dpi=200, facecolor='w', edgecolor='k')\n", (3438, 3504), False, 'from matplotlib.pyplot import figure\n'), ((4032, 4060), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Quality Factor"""'], {}), "('Quality Factor')\n", (4042, 4060), True, 'import matplotlib.pyplot as plt\n'), ((4159, 4188), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'prop': "{'size': 12}"}), "(prop={'size': 12})\n", (4169, 4188), True, 'import matplotlib.pyplot as plt\n'), ((4193, 4203), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4201, 4203), True, 'import matplotlib.pyplot as plt\n'), ((5665, 5692), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 8)'}), '(figsize=(16, 8))\n', (5675, 5692), True, 'import matplotlib.pyplot as plt\n'), ((6198, 6208), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6206, 6208), True, 'import matplotlib.pyplot as plt\n'), ((6303, 6376), 'matplotlib.pyplot.figure', 'figure', ([], {'num': 'None', 'figsize': '(16, 12)', 'dpi': '(200)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=None, figsize=(16, 12), dpi=200, facecolor='w', edgecolor='k')\n", (6309, 6376), False, 'from matplotlib.pyplot import figure\n'), ((7072, 7100), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Quality Factor"""'], {}), "('Quality Factor')\n", (7082, 7100), True, 'import matplotlib.pyplot as plt\n'), ((7199, 7211), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7209, 7211), True, 'import matplotlib.pyplot as plt\n'), ((7216, 7226), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7224, 7226), True, 'import matplotlib.pyplot as plt\n'), ((2042, 2056), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2053, 2056), False, 'import pickle\n'), ((2068, 2084), 'numpy.array', 'np.array', (["d['N']"], {}), "(d['N'])\n", (2076, 2084), True, 'import numpy as np\n'), ((2086, 2103), 'numpy.array', 'np.array', (["d['Nt']"], {}), "(d['Nt'])\n", (2094, 2103), True, 'import numpy as np\n'), ((2105, 2124), 'numpy.array', 'np.array', (["d['PSNR']"], {}), "(d['PSNR'])\n", (2113, 2124), True, 'import numpy as np\n'), ((2126, 2146), 'numpy.array', 'np.array', (["d['PSNRt']"], {}), "(d['PSNRt'])\n", (2134, 2146), True, 'import numpy as np\n'), ((2148, 2167), 'numpy.array', 'np.array', (["d['SSIM']"], {}), "(d['SSIM'])\n", (2156, 2167), True, 'import numpy as np\n'), ((2169, 2189), 'numpy.array', 'np.array', (["d['SSIMt']"], {}), "(d['SSIMt'])\n", (2177, 2189), True, 'import numpy as np\n'), ((2833, 2909), 'matplotlib.pyplot.plot', 'plt.plot', (['x[3:-1]', 'y[3:-1]'], {'label': '"""JPEG (Kodak train)"""', 'color': '"""b"""', 'alpha': '(0.3)'}), "(x[3:-1], y[3:-1], label='JPEG (Kodak train)', color='b', alpha=0.3)\n", (2841, 2909), True, 'import matplotlib.pyplot as plt\n'), ((2949, 3016), 'matplotlib.pyplot.plot', 'plt.plot', (['x[1:-1]', 'y[1:-1]'], {'label': '"""(ours) (Kodak train)"""', 'color': '"""r"""'}), "(x[1:-1], y[1:-1], label='(ours) (Kodak train)', color='r')\n", (2957, 3016), True, 'import matplotlib.pyplot as plt\n'), ((3066, 3124), 'matplotlib.pyplot.plot', 'plt.plot', (['x[2:]', 'y[2:]'], {'label': '"""JPEG"""', 'color': '"""b"""', 'alpha': '(0.3)'}), "(x[2:], y[2:], label='JPEG', color='b', alpha=0.3)\n", (3074, 3124), True, 'import matplotlib.pyplot as plt\n'), ((3169, 3218), 'matplotlib.pyplot.plot', 'plt.plot', (['x1[1:]', 'y1[1:]'], {'label': '"""Ours"""', 'color': '"""r"""'}), "(x1[1:], y1[1:], label='Ours', color='r')\n", (3177, 3218), True, 'import matplotlib.pyplot as plt\n'), ((3313, 3331), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""SSIM"""'], {}), "('SSIM')\n", (3323, 3331), True, 'import matplotlib.pyplot as plt\n'), ((3350, 3373), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""PSNR (dB)"""'], {}), "('PSNR (dB)')\n", (3360, 3373), True, 'import matplotlib.pyplot as plt\n'), ((3654, 3743), 'matplotlib.pyplot.plot', 'plt.plot', (['N[0][1:-1]', 'psnr[0][1:-1]'], {'label': '"""JPEG (DIV2K train)"""', 'color': '"""m"""', 'alpha': '(0.3)'}), "(N[0][1:-1], psnr[0][1:-1], label='JPEG (DIV2K train)', color='m',\n alpha=0.3)\n", (3662, 3743), True, 'import matplotlib.pyplot as plt\n'), ((3747, 3823), 'matplotlib.pyplot.plot', 'plt.plot', (['N[1][1:-1]', 'psnr[1][1:-1]'], {'label': '"""(ours) (DIV2K train)"""', 'color': '"""g"""'}), "(N[1][1:-1], psnr[1][1:-1], label='(ours) (DIV2K train)', color='g')\n", (3755, 3823), True, 'import matplotlib.pyplot as plt\n'), ((3844, 3912), 'matplotlib.pyplot.plot', 'plt.plot', (['N[0][1:]', 'psnrt[0][1:]'], {'label': '"""JPEG"""', 'color': '"""g"""', 'alpha': '(0.3)'}), "(N[0][1:], psnrt[0][1:], label='JPEG', color='g', alpha=0.3)\n", (3852, 3912), True, 'import matplotlib.pyplot as plt\n'), ((3920, 3977), 'matplotlib.pyplot.plot', 'plt.plot', (['N[1][1:]', 'psnrt[1][1:]'], {'label': '"""Ours"""', 'color': '"""m"""'}), "(N[1][1:], psnrt[1][1:], label='Ours', color='m')\n", (3928, 3977), True, 'import matplotlib.pyplot as plt\n'), ((4094, 4112), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""SSIM"""'], {}), "('SSIM')\n", (4104, 4112), True, 'import matplotlib.pyplot as plt\n'), ((4131, 4154), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""PSNR (dB)"""'], {}), "('PSNR (dB)')\n", (4141, 4154), True, 'import matplotlib.pyplot as plt\n'), ((5640, 5654), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5651, 5654), False, 'import pickle\n'), ((5839, 5850), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (5847, 5850), True, 'import numpy as np\n'), ((5852, 5863), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (5860, 5863), True, 'import numpy as np\n'), ((6008, 6025), 'matplotlib.ticker.LinearLocator', 'LinearLocator', (['(10)'], {}), '(10)\n', (6021, 6025), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((6060, 6087), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.02f"""'], {}), "('%.02f')\n", (6078, 6087), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((6818, 6880), 'matplotlib.pyplot.plot', 'plt.plot', (['N1', 'psnr'], {'label': '"""JPEG (DIV2K)"""', 'color': '"""m"""', 'alpha': '(0.3)'}), "(N1, psnr, label='JPEG (DIV2K)', color='m', alpha=0.3)\n", (6826, 6880), True, 'import matplotlib.pyplot as plt\n'), ((6888, 6935), 'matplotlib.pyplot.plot', 'plt.plot', (['N', 'b'], {'label': '"""ours (DIV2K)"""', 'color': '"""g"""'}), "(N, b, label='ours (DIV2K)', color='g')\n", (6896, 6935), True, 'import matplotlib.pyplot as plt\n'), ((6953, 7012), 'matplotlib.pyplot.plot', 'plt.plot', (['N1', 'c'], {'label': '"""JPEG (DIV2K)"""', 'color': '"""m"""', 'alpha': '(0.3)'}), "(N1, c, label='JPEG (DIV2K)', color='m', alpha=0.3)\n", (6961, 7012), True, 'import matplotlib.pyplot as plt\n'), ((7020, 7068), 'matplotlib.pyplot.plot', 'plt.plot', (['N', 'cc'], {'label': '"""ours (DIV2K)"""', 'color': '"""g"""'}), "(N, cc, label='ours (DIV2K)', color='g')\n", (7028, 7068), True, 'import matplotlib.pyplot as plt\n'), ((7134, 7152), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""SSIM"""'], {}), "('SSIM')\n", (7144, 7152), True, 'import matplotlib.pyplot as plt\n'), ((7171, 7194), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""PSNR (dB)"""'], {}), "('PSNR (dB)')\n", (7181, 7194), True, 'import matplotlib.pyplot as plt\n'), ((7648, 7741), 'pickle.dump', 'pickle.dump', (["{'N': 0, 'PSNR': 0, 'SSIM': 0, 'Nt': Nlt, 'PSNRt': psnrt, 'SSIMt': ssimt}", 'f'], {}), "({'N': 0, 'PSNR': 0, 'SSIM': 0, 'Nt': Nlt, 'PSNRt': psnrt,\n 'SSIMt': ssimt}, f)\n", (7659, 7741), False, 'import pickle\n'), ((8353, 8453), 'pickle.dump', 'pickle.dump', (["{'N': Nl, 'PSNR': psnr, 'SSIM': ssim, 'Nt': Nlt, 'PSNRt': psnrt, 'SSIMt': ssimt\n }", 'f'], {}), "({'N': Nl, 'PSNR': psnr, 'SSIM': ssim, 'Nt': Nlt, 'PSNRt': psnrt,\n 'SSIMt': ssimt}, f)\n", (8364, 8453), False, 'import pickle\n'), ((1564, 1575), 'numpy.mean', 'np.mean', (['ps'], {}), '(ps)\n', (1571, 1575), True, 'import numpy as np\n'), ((1597, 1608), 'numpy.mean', 'np.mean', (['ss'], {}), '(ss)\n', (1604, 1608), True, 'import numpy as np\n'), ((1807, 1860), 'pickle.dump', 'pickle.dump', (["{'N': Nl, 'PSNR': psnr, 'SSIM': ssim}", 'f'], {}), "({'N': Nl, 'PSNR': psnr, 'SSIM': ssim}, f)\n", (1818, 1860), False, 'import pickle\n'), ((1217, 1231), 'evaluate.PSNR', 'PSNR', (['ref', 'dec'], {}), '(ref, dec)\n', (1221, 1231), False, 'from evaluate import PSNR, SSIM\n'), ((1259, 1273), 'evaluate.SSIM', 'SSIM', (['ref', 'dec'], {}), '(ref, dec)\n', (1263, 1273), False, 'from evaluate import PSNR, SSIM\n'), ((4652, 4662), 'evaluate.PSNR', 'PSNR', (['x', 'y'], {}), '(x, y)\n', (4656, 4662), False, 'from evaluate import PSNR, SSIM\n'), ((4664, 4674), 'evaluate.PSNR', 'PSNR', (['x', 'z'], {}), '(x, z)\n', (4668, 4674), False, 'from evaluate import PSNR, SSIM\n'), ((4694, 4704), 'evaluate.PSNR', 'PSNR', (['x', 'z'], {}), '(x, z)\n', (4698, 4704), False, 'from evaluate import PSNR, SSIM\n'), ((4706, 4716), 'evaluate.PSNR', 'PSNR', (['x', 'y'], {}), '(x, y)\n', (4710, 4716), False, 'from evaluate import PSNR, SSIM\n'), ((5082, 5098), 'evaluate.PSNR', 'PSNR', (['x[j]', 'y[j]'], {}), '(x[j], y[j])\n', (5086, 5098), False, 'from evaluate import PSNR, SSIM\n'), ((5100, 5116), 'evaluate.PSNR', 'PSNR', (['x[j]', 'z[j]'], {}), '(x[j], z[j])\n', (5104, 5116), False, 'from evaluate import PSNR, SSIM\n')] |
"""All the layer functions go here.
"""
#<NAME>
#HW2
from __future__ import print_function, absolute_import
import numpy as np
class FullyConnected(object):
"""Fully connected layer 'y = Wx + b'.
Arguments:
shape (tuple): the shape of the fully connected layer. shape[0] is the
output size and shape[1] is the input size.
weights_init (obj): an object instantiated using any initializer class
in the "initializer" module.
bias_init (obj): an object instantiated using any initializer class
in the "initializer" module.
name (str): the name of the layer.
Attributes:
W (np.array): the weights of the fully connected layer.
b (np.array): the biases of the fully connected layer.
shape (tuple): the shape of the fully connected layer. shape[0] is the
output size and shape[1] is the input size.
name (str): the name of the layer.
"""
def __init__(
self, d_in, d_out, weights_init=None, bias_init=None, name="FullyConnected"
):
shape = (d_out, d_in)
self.W = weights_init.initialize(shape) \
if weights_init else np.random.randn(*shape).astype(np.float32)
self.b = bias_init.initialize((shape[0])) \
if bias_init else np.random.randn(shape[0]).astype(np.float32)
self.shape = shape
self.name = name
def __repr__(self):
return "{}({}, {})".format(self.name, self.shape[0], self.shape[1])
def __call__(self, x):
return self.forward(x)
def forward(self, x):
"""Compute the layer output.
Args:
x (np.array): the input of the layer.
Returns:
The output of the layer.
"""
Y = np.dot(self.W, x) + self.b
return Y
def backward(self, x, dv_y):
"""Compute the gradients of weights and biases and the gradient with
respect to the input.
Args:
x (np.array): the input of the layer.
dv_y (np.array): The derivative of the loss with respect to the
output.
Returns:
dv_x (np.array): The derivative of the loss with respect to the
input.
dv_W (np.array): The derivative of the loss with respect to the
weights.
dv_b (np.array): The derivative of the loss with respect to the
biases.
"""
# TODO: write your implementation below
#print("w",self.W.shape)
#print("x",x.shape)
#print("dv/dy",dv_y.shape)
dv_x = np.empty(x.shape, dtype=np.float32)
dv_W = np.empty(self.W.shape, dtype=np.float32)
dv_b = np.empty(self.b.shape, dtype=np.float32)
dv_x = np.dot(dv_y,self.W)
dv_b = np.dot(dv_y,1)
dv_W = np.dot(dv_y.reshape(dv_y.shape[0],1),x.reshape(1,x.shape[0]))
# don't change the order of return values
return dv_x, dv_W, dv_b
class Conv2D(object):
"""2D convolutional layer.
Arguments:
filter_size (tuple): the shape of the filter. It is a tuple = (
out_channels, in_channels, filter_height, filter_width).
strides (int or tuple): the strides of the convolution operation.
padding (int or tuple): number of zero paddings.
weights_init (obj): an object instantiated using any initializer class
in the "initializer" module.
bias_init (obj): an object instantiated using any initializer class
in the "initializer" module.
name (str): the name of the layer.
Attributes:
W (np.array): the weights of the layer. A 4D array of shape (
out_channels, in_channels, filter_height, filter_width).
b (np.array): the biases of the layer. A 1D array of shape (
in_channels).
filter_size (tuple): the shape of the filter. It is a tuple = (
out_channels, in_channels, filter_height, filter_width).
strides (tuple): the strides of the convolution operation. A tuple = (
height_stride, width_stride).
padding (tuple): the number of zero paddings along the height and
width. A tuple = (height_padding, width_padding).
name (str): the name of the layer.
"""
def __init__(
self, in_channel, out_channel, kernel_size, stride, padding,
weights_init=None, bias_init=None, name="Conv2D"):
filter_size = (out_channel, in_channel, *kernel_size)
self.W = weights_init.initialize(filter_size) \
if weights_init else np.random.randn(*filter_size).astype(np.float32)
self.b = bias_init.initialize((filter_size[0], 1)) \
if bias_init else np.random.randn(out_channel, 1).astype(np.float32)
self.kernel_size = kernel_size
self.stride = (stride, stride) if type(stride) == int else stride
self.padding = (padding, padding) if type(padding) == int else padding
self.name = name
def __repr__(self):
return "{}({}, {}, {})".format(
self.name, self.kernel_size, self.stride, self.padding
)
def __call__(self, x):
return self.forward(x)
def forward(self, x):
"""Compute the layer output.
Args:
x (np.array): the input of the layer. A 3D array of shape (
in_channels, in_heights, in_weights).
Returns:
The output of the layer. A 3D array of shape (out_channels,
out_heights, out_weights).
"""
p, s = self.padding, self.stride
x_padded = np.pad(
x, ((0, 0), (p[0], p[0]), (p[1], p[1])), mode='constant'
)
# check dimensions
assert (x.shape[1] - self.W.shape[2] + 2 * p[0]) / s[0] + 1 > 0, \
'Height doesn\'t work'
assert (x.shape[2] - self.W.shape[3] + 2 * p[1]) / s[1] + 1 > 0, \
'Width doesn\'t work'
y_shape = (
self.W.shape[0],
int((x.shape[1] - self.W.shape[2] + 2 * p[0]) / s[0]) + 1,
int((x.shape[2] - self.W.shape[3] + 2 * p[1]) / s[1]) + 1,
)
y = np.empty(y_shape, dtype=np.float32)
for k in range(y.shape[0]):
for i in range(y.shape[1]):
for j in range(y.shape[2]):
y[k, i, j] = np.sum(
x_padded[
:,
i * s[0] : i * s[0] + self.W.shape[2],
j * s[1] : j * s[1] + self.W.shape[3]
] * self.W[k]
) + self.b[k]
return y
def backward(self, x, dv_y):
"""Compute the gradients of weights and biases and the gradient with
respect to the input.
Args:
x (np.array): the input of the layer. A 3D array of shape (
in_channels, in_heights, in_weights).
dv_y (np.array): The derivative of the loss with respect to the
output. A 3D array of shape (out_channels, out_heights,
out_weights).
Returns:
dv_x (np.array): The derivative of the loss with respect to the
input. It has the same shape as x.
dv_W (np.array): The derivative of the loss with respect to the
weights. It has the same shape as self.W
dv_b (np.array): The derivative of the loss with respect to the
biases. It has the same shape as self.b
"""
p, s = self.padding, self.stride
x_padded = np.pad(
x, ((0, 0), (p[0], p[0]), (p[1], p[1])), mode='constant'
)
dv_W = np.empty(self.W.shape, dtype=np.float32)
dv_b = np.empty(self.b.shape, dtype=np.float32)
dv_x = np.empty(x.shape, dtype=np.float32)
dv_W.fill(0)
dv_b.fill(0)
dv_x.fill(0)
#https://www.jefkine.com/general/2016/09/05/backpropagation-in-convolutional-neural-networks/
#inspired by
C_in, W_in, H_in = x.shape
print("----C_in",C_in,"W_in",W_in,"H_in",H_in)
C_out, W_out, H_out = dv_y.shape
print("C_out",C_out,"W_out",W_in,"H_out",H_in)
C_out, C_in, K_h, K_w = dv_W.shape
print("C_out",C_out,"C_in",C_in,"K_h",K_h,"K_w",K_w)
s_w, s_h = s
print("s_w",s_w,"s_h",s_h)
p_w, p_h = p
print("p_w",s_w,"p_h",s_h)
for inn in range(C_in):
for outt in range(C_out):
for m in range(K_h):
for n in range(K_w):
for i in range(W_out):
for j in range(H_out):
dv_W[outt, inn, m, n] += dv_y[outt, i, j] * x_padded[inn, i * s_w + m, j * s_h + n]
for outt in range(C_out):
dv_b[outt] += np.sum(dv_y[outt, :, :])
for inn in range(C_in):
for outt in range(C_out):
for v in range(W_in):
for u in range(H_in):
for m in range(K_h):
for n in range(K_w):
ip = (v + p_w - m) / s_w
jp = (u + p_h - n) / s_h
if (ip >= 0 and jp >= 0 and ip < W_out and jp < H_out ):
dv_x[inn, v, u] += dv_y[outt, int(ip), int(jp)] * self.W[outt,inn, m, n]
return dv_x, dv_W, dv_b
class MaxPool2D:
def __init__(self, kernel_size, stride, padding, name="MaxPool2D"):
self.kernel_size = kernel_size
self.stride = (stride, stride) if type(stride) == int else stride
self.padding = (padding, padding) if type(padding) == int else padding
self.name = name
def __repr__(self):
return "{}({}, {}, {})".format(
self.name, self.kernel_size, self.stride, self.padding
)
def __call__(self, x):
return self.forward(x)
def forward(self, x):
"""Compute the layer output.
Arguments:
x {[np.array]} -- the input of the layer. A 3D array of shape (
in_channels, in_heights, in_weights).
Returns:
The output of the layer. A 3D array of shape (out_channels,
out_heights, out_weights).
"""
p, s = self.padding, self.stride
x_padded = np.pad(
x, ((0, 0), (p[0], p[0]), (p[1], p[1])), mode='constant'
)
# check dimensions
assert (x.shape[1] - self.kernel_size[0] + 2 * p[0]) / s[0] + 1 > 0, \
'Height doesn\'t work'
assert (x.shape[2] - self.kernel_size[1] + 2 * p[1]) / s[1] + 1 > 0, \
'Width doesn\'t work'
y_shape = (
x.shape[0],
int((x.shape[1] - self.kernel_size[0] + 2 * p[0]) / s[0]) + 1,
int((x.shape[2] - self.kernel_size[1] + 2 * p[1]) / s[1]) + 1,
)
y = np.empty(y_shape, dtype=np.float32)
for i in range(y.shape[1]):
for j in range(y.shape[2]):
y[:, i, j] = np.max(x_padded[
:,
i * s[0]: i * s[0] + self.kernel_size[0],
j * s[1]: j * s[1] + self.kernel_size[1]
].reshape(-1, self.kernel_size[0] * self.kernel_size[1]),
axis=1
)
return y
def backward(self, x, dv_y):
"""Compute the gradients of weights and biases and the gradient with
respect to the input.
Args:
x (np.array): the input of the layer. A 3D array of shape (
in_channels, in_heights, in_weights).
dv_y (np.array): The derivative of the loss with respect to the
output. A 3D array of shape (out_channels, out_heights,
out_weights).
Returns:
dv_x (np.array): The derivative of the loss with respect to the
input. It has the same shape as x.
"""
p, s = self.padding, self.stride
x_padded = np.pad(
x, ((0, 0), (p[0], p[0]), (p[1], p[1])), mode='constant'
)
dv_x = np.empty(x.shape, dtype=np.float32)
dv_x.fill(0)
dv_xp = np.empty(x_padded.shape, dtype=np.float32)
dv_xp.fill(0)
#https://leonardoaraujosantos.gitbooks.io/artificial-inteligence/content/pooling_layer.html
#inspired by
C_in, W_in, H_in = x.shape
C_out, W_out, H_out = dv_y.shape
K_w, K_h = self.kernel_size
s_w, s_h = s
p_w, p_h = p
def unpad(x, pad_width):
slices = []
for c in pad_width:
e = None if c[1] == 0 else -c[1]
slices.append(slice(c[0], e))
return x[tuple(slices)]
for t in range(C_in):
for v in range(W_out):
for u in range(H_out):
x_pool=x_padded[t,v*s_w:v*s_w+K_w,u*s_h:u*s_h+K_h]
mask=(x_pool==np.max(x_pool))
dv_xp[t, v*s_w:v*s_w+K_w,u*s_h:u*s_h+K_h] += dv_y[t, v,u] * mask
dv_x=unpad(dv_xp,((p[0], p[0]), (p[1], p[1])))
return dv_x
| [
"numpy.pad",
"numpy.sum",
"numpy.random.randn",
"numpy.empty",
"numpy.max",
"numpy.dot"
] | [((2631, 2666), 'numpy.empty', 'np.empty', (['x.shape'], {'dtype': 'np.float32'}), '(x.shape, dtype=np.float32)\n', (2639, 2666), True, 'import numpy as np\n'), ((2682, 2722), 'numpy.empty', 'np.empty', (['self.W.shape'], {'dtype': 'np.float32'}), '(self.W.shape, dtype=np.float32)\n', (2690, 2722), True, 'import numpy as np\n'), ((2738, 2778), 'numpy.empty', 'np.empty', (['self.b.shape'], {'dtype': 'np.float32'}), '(self.b.shape, dtype=np.float32)\n', (2746, 2778), True, 'import numpy as np\n'), ((2794, 2814), 'numpy.dot', 'np.dot', (['dv_y', 'self.W'], {}), '(dv_y, self.W)\n', (2800, 2814), True, 'import numpy as np\n'), ((2829, 2844), 'numpy.dot', 'np.dot', (['dv_y', '(1)'], {}), '(dv_y, 1)\n', (2835, 2844), True, 'import numpy as np\n'), ((5669, 5733), 'numpy.pad', 'np.pad', (['x', '((0, 0), (p[0], p[0]), (p[1], p[1]))'], {'mode': '"""constant"""'}), "(x, ((0, 0), (p[0], p[0]), (p[1], p[1])), mode='constant')\n", (5675, 5733), True, 'import numpy as np\n'), ((6225, 6260), 'numpy.empty', 'np.empty', (['y_shape'], {'dtype': 'np.float32'}), '(y_shape, dtype=np.float32)\n', (6233, 6260), True, 'import numpy as np\n'), ((7653, 7717), 'numpy.pad', 'np.pad', (['x', '((0, 0), (p[0], p[0]), (p[1], p[1]))'], {'mode': '"""constant"""'}), "(x, ((0, 0), (p[0], p[0]), (p[1], p[1])), mode='constant')\n", (7659, 7717), True, 'import numpy as np\n'), ((7757, 7797), 'numpy.empty', 'np.empty', (['self.W.shape'], {'dtype': 'np.float32'}), '(self.W.shape, dtype=np.float32)\n', (7765, 7797), True, 'import numpy as np\n'), ((7813, 7853), 'numpy.empty', 'np.empty', (['self.b.shape'], {'dtype': 'np.float32'}), '(self.b.shape, dtype=np.float32)\n', (7821, 7853), True, 'import numpy as np\n'), ((7869, 7904), 'numpy.empty', 'np.empty', (['x.shape'], {'dtype': 'np.float32'}), '(x.shape, dtype=np.float32)\n', (7877, 7904), True, 'import numpy as np\n'), ((10562, 10626), 'numpy.pad', 'np.pad', (['x', '((0, 0), (p[0], p[0]), (p[1], p[1]))'], {'mode': '"""constant"""'}), "(x, ((0, 0), (p[0], p[0]), (p[1], p[1])), mode='constant')\n", (10568, 10626), True, 'import numpy as np\n'), ((11121, 11156), 'numpy.empty', 'np.empty', (['y_shape'], {'dtype': 'np.float32'}), '(y_shape, dtype=np.float32)\n', (11129, 11156), True, 'import numpy as np\n'), ((12432, 12496), 'numpy.pad', 'np.pad', (['x', '((0, 0), (p[0], p[0]), (p[1], p[1]))'], {'mode': '"""constant"""'}), "(x, ((0, 0), (p[0], p[0]), (p[1], p[1])), mode='constant')\n", (12438, 12496), True, 'import numpy as np\n'), ((12535, 12570), 'numpy.empty', 'np.empty', (['x.shape'], {'dtype': 'np.float32'}), '(x.shape, dtype=np.float32)\n', (12543, 12570), True, 'import numpy as np\n'), ((12608, 12650), 'numpy.empty', 'np.empty', (['x_padded.shape'], {'dtype': 'np.float32'}), '(x_padded.shape, dtype=np.float32)\n', (12616, 12650), True, 'import numpy as np\n'), ((1790, 1807), 'numpy.dot', 'np.dot', (['self.W', 'x'], {}), '(self.W, x)\n', (1796, 1807), True, 'import numpy as np\n'), ((8976, 9000), 'numpy.sum', 'np.sum', (['dv_y[outt, :, :]'], {}), '(dv_y[outt, :, :])\n', (8982, 9000), True, 'import numpy as np\n'), ((1199, 1222), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (1214, 1222), True, 'import numpy as np\n'), ((1324, 1349), 'numpy.random.randn', 'np.random.randn', (['shape[0]'], {}), '(shape[0])\n', (1339, 1349), True, 'import numpy as np\n'), ((4648, 4677), 'numpy.random.randn', 'np.random.randn', (['*filter_size'], {}), '(*filter_size)\n', (4663, 4677), True, 'import numpy as np\n'), ((4788, 4819), 'numpy.random.randn', 'np.random.randn', (['out_channel', '(1)'], {}), '(out_channel, 1)\n', (4803, 4819), True, 'import numpy as np\n'), ((6415, 6524), 'numpy.sum', 'np.sum', (['(x_padded[:, i * s[0]:i * s[0] + self.W.shape[2], j * s[1]:j * s[1] + self.\n W.shape[3]] * self.W[k])'], {}), '(x_padded[:, i * s[0]:i * s[0] + self.W.shape[2], j * s[1]:j * s[1] +\n self.W.shape[3]] * self.W[k])\n', (6421, 6524), True, 'import numpy as np\n'), ((13404, 13418), 'numpy.max', 'np.max', (['x_pool'], {}), '(x_pool)\n', (13410, 13418), True, 'import numpy as np\n')] |
import numpy as np
from scipy.interpolate import interp1d
def _Gradient(x,y):
dydx = (y[1:] - y[:-1])/(x[:1] - x[:-1])
return dydx
def _TraceAlt(S,R,B,z,Bm):
Re = 6378.0
#calculate the indices for northern and southern bits of the field line
dRdS = _Gradient(S,R)
zc = 0.5*(z[1:] + z[:-1])
#north
indn = np.where((dRdS < 0) & (zc > 0))[0]
if indn.size > 0:
indn = indn[-1]
fn = interp1d(B[:indn][::-1],R[:indn][::-1],bounds_error=False,fill_value=np.nan)
Rn = fn(Bm)
else:
Rn = np.zeros(Bm.size,dtype='float32') + np.nan
inds = np.where((dRdS > 0) & (zc < 0))[0]
if inds.size > 0:
inds = inds[0]
fs = interp1d(B[inds:],R[inds:],bounds_error=False,fill_value=np.nan)
Rs = fs(Bm)
else:
Rs = np.zeros(Bm.size,dtype='float32') + np.nan
#convert to altitude in km
An = (Rn - 1.0)*Re
As = (Rs - 1.0)*Re
return An,As
def MirrorAlt(T,Bm,alpha):
'''
Calculate the mirror altitude in km for a bunch of traces.
'''
AltN = np.zeros(Bm.shape,dtype='float32') + np.nan
AltS = np.zeros(Bm.shape,dtype='float32') + np.nan
#loop through one trace at a time
nT = T.n
print('Calculating Mirror Altitudes')
for i in range(0,nT):
print('\rTrace {0} of {1}'.format(i+1,nT),end='')
B = np.sqrt(T.Bx[i]**2 + T.By[i]**2 + T.Bz[i]**2)[:np.int32(T.nstep[i])]
S = T.s[i][:np.int32(T.nstep[i])]
R = T.R[i][:np.int32(T.nstep[i])]
z = T.z[i][:np.int32(T.nstep[i])]
AltN[i],AltS[i] = _TraceAlt(S,R,B,z,Bm[i])
print()
#pitch angle < 90 means that the particle should be moving along
#the flux tube in the direction of the field, so should end up in
#the northern hemisphere
Alt = np.copy(AltN)
s = np.where(alpha > 90.0)[0]
Alt[:,s] = AltS[:,s]
return Alt
| [
"numpy.copy",
"numpy.zeros",
"numpy.where",
"numpy.int32",
"scipy.interpolate.interp1d",
"numpy.sqrt"
] | [((1632, 1645), 'numpy.copy', 'np.copy', (['AltN'], {}), '(AltN)\n', (1639, 1645), True, 'import numpy as np\n'), ((321, 352), 'numpy.where', 'np.where', (['((dRdS < 0) & (zc > 0))'], {}), '((dRdS < 0) & (zc > 0))\n', (329, 352), True, 'import numpy as np\n'), ((400, 479), 'scipy.interpolate.interp1d', 'interp1d', (['B[:indn][::-1]', 'R[:indn][::-1]'], {'bounds_error': '(False)', 'fill_value': 'np.nan'}), '(B[:indn][::-1], R[:indn][::-1], bounds_error=False, fill_value=np.nan)\n', (408, 479), False, 'from scipy.interpolate import interp1d\n'), ((562, 593), 'numpy.where', 'np.where', (['((dRdS > 0) & (zc < 0))'], {}), '((dRdS > 0) & (zc < 0))\n', (570, 593), True, 'import numpy as np\n'), ((640, 707), 'scipy.interpolate.interp1d', 'interp1d', (['B[inds:]', 'R[inds:]'], {'bounds_error': '(False)', 'fill_value': 'np.nan'}), '(B[inds:], R[inds:], bounds_error=False, fill_value=np.nan)\n', (648, 707), False, 'from scipy.interpolate import interp1d\n'), ((969, 1004), 'numpy.zeros', 'np.zeros', (['Bm.shape'], {'dtype': '"""float32"""'}), "(Bm.shape, dtype='float32')\n", (977, 1004), True, 'import numpy as np\n'), ((1021, 1056), 'numpy.zeros', 'np.zeros', (['Bm.shape'], {'dtype': '"""float32"""'}), "(Bm.shape, dtype='float32')\n", (1029, 1056), True, 'import numpy as np\n'), ((1651, 1673), 'numpy.where', 'np.where', (['(alpha > 90.0)'], {}), '(alpha > 90.0)\n', (1659, 1673), True, 'import numpy as np\n'), ((505, 539), 'numpy.zeros', 'np.zeros', (['Bm.size'], {'dtype': '"""float32"""'}), "(Bm.size, dtype='float32')\n", (513, 539), True, 'import numpy as np\n'), ((733, 767), 'numpy.zeros', 'np.zeros', (['Bm.size'], {'dtype': '"""float32"""'}), "(Bm.size, dtype='float32')\n", (741, 767), True, 'import numpy as np\n'), ((1232, 1283), 'numpy.sqrt', 'np.sqrt', (['(T.Bx[i] ** 2 + T.By[i] ** 2 + T.Bz[i] ** 2)'], {}), '(T.Bx[i] ** 2 + T.By[i] ** 2 + T.Bz[i] ** 2)\n', (1239, 1283), True, 'import numpy as np\n'), ((1279, 1299), 'numpy.int32', 'np.int32', (['T.nstep[i]'], {}), '(T.nstep[i])\n', (1287, 1299), True, 'import numpy as np\n'), ((1315, 1335), 'numpy.int32', 'np.int32', (['T.nstep[i]'], {}), '(T.nstep[i])\n', (1323, 1335), True, 'import numpy as np\n'), ((1351, 1371), 'numpy.int32', 'np.int32', (['T.nstep[i]'], {}), '(T.nstep[i])\n', (1359, 1371), True, 'import numpy as np\n'), ((1387, 1407), 'numpy.int32', 'np.int32', (['T.nstep[i]'], {}), '(T.nstep[i])\n', (1395, 1407), True, 'import numpy as np\n')] |
import os
from PIL import Image
import torch
from torch.utils import data
import numpy as np
from torchvision import transforms as T
import torchvision
import cv2
import sys
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import pickle
import mxnet as mx
class Dataset(data.Dataset):
def __init__(self, root, data_list_file, phase='train', input_shape=(3, 112,112)):
self.phase = phase
self.input_shape = input_shape
with open(os.path.join(data_list_file), 'r') as fd:
imgs = fd.readlines()
self.imgs = [os.path.join(root, img[:-1]) for img in imgs]
#print(imgs)
#for img in imgs: img.convert('RGB')
#assert()
###imgs randomize
#self.imgs = np.random.permutation(imgs)
# normalize = T.Normalize(mean=[0.5, 0.5, 0.5],
# std=[0.5, 0.5, 0.5])
normalize = T.Normalize(mean=[0.5,0.5,0.5], std=[0.5,0.5,0.5])
self.transforms = T.Compose([
#T.ToPILImage(),
#T.RandomHorizontalFlip(),
T.Resize((112,112)),
T.ToTensor(),
normalize
])
def __getitem__(self, index):
sample = self.imgs[index]
splits = sample.split()
img_path = splits[0]
#print(img_path)
try:
#with open(img_path,'rb') as f:
# bins, issame_list = pickle.load(f,encoding='bytes')
#img = mx.image.imdecode(bins[0])
#print(img.size)
#assert()
data = Image.open(img_path)
#print(data.format,data.size,data.mode)
#data = np.array(data)
#assert()
#data = data.convert('L')
data = data.convert('RGB')
data = self.transforms(data)
label = np.int32(splits[1])
#print("file found")
return data.float(), label
except FileNotFoundError:
#print("file not found")
pass
def __len__(self):
return len(self.imgs)
if __name__ == '__main__':
dataset = Dataset(root='/data/Datasets/fv/dataset_v1.1/dataset_mix_aligned_v1.1',
data_list_file='/data/Datasets/fv/dataset_v1.1/mix_20w.txt',
phase='test',
input_shape=(3, 128, 128))
trainloader = data.DataLoader(dataset, batch_size=10)
for i, (data, label) in enumerate(trainloader):
# imgs, labels = data
# print imgs.numpy().shape
# print data.cpu().numpy()
# if i == 0:
img = torchvision.utils.make_grid(data).numpy()
# print img.shape
# print label.shape
# chw -> hwc
img = np.transpose(img, (1, 2, 0))
# img *= np.array([0.229, 0.224, 0.225])
# img += np.array([0.485, 0.456, 0.406])
img += np.array([1, 1, 1])
img *= 127.5
img = img.astype(np.uint8)
img = img[:, :, [2, 1, 0]]
cv2.imshow('img', img)
cv2.waitKey()
# break
# dst.decode_segmap(labels.numpy()[0], plot=True)
| [
"torch.utils.data.float",
"torch.utils.data.DataLoader",
"cv2.waitKey",
"torch.utils.data.convert",
"numpy.transpose",
"PIL.Image.open",
"torchvision.transforms.ToTensor",
"torchvision.utils.make_grid",
"numpy.array",
"numpy.int32",
"torchvision.transforms.Normalize",
"cv2.imshow",
"os.path.... | [((2387, 2426), 'torch.utils.data.DataLoader', 'data.DataLoader', (['dataset'], {'batch_size': '(10)'}), '(dataset, batch_size=10)\n', (2402, 2426), False, 'from torch.utils import data\n'), ((914, 968), 'torchvision.transforms.Normalize', 'T.Normalize', ([], {'mean': '[0.5, 0.5, 0.5]', 'std': '[0.5, 0.5, 0.5]'}), '(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n', (925, 968), True, 'from torchvision import transforms as T\n'), ((2745, 2773), 'numpy.transpose', 'np.transpose', (['img', '(1, 2, 0)'], {}), '(img, (1, 2, 0))\n', (2757, 2773), True, 'import numpy as np\n'), ((2887, 2906), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (2895, 2906), True, 'import numpy as np\n'), ((3007, 3029), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (3017, 3029), False, 'import cv2\n'), ((3038, 3051), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (3049, 3051), False, 'import cv2\n'), ((574, 602), 'os.path.join', 'os.path.join', (['root', 'img[:-1]'], {}), '(root, img[:-1])\n', (586, 602), False, 'import os\n'), ((1583, 1603), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1593, 1603), False, 'from PIL import Image\n'), ((1770, 1789), 'torch.utils.data.convert', 'data.convert', (['"""RGB"""'], {}), "('RGB')\n", (1782, 1789), False, 'from torch.utils import data\n'), ((1851, 1870), 'numpy.int32', 'np.int32', (['splits[1]'], {}), '(splits[1])\n', (1859, 1870), True, 'import numpy as np\n'), ((476, 504), 'os.path.join', 'os.path.join', (['data_list_file'], {}), '(data_list_file)\n', (488, 504), False, 'import os\n'), ((1084, 1104), 'torchvision.transforms.Resize', 'T.Resize', (['(112, 112)'], {}), '((112, 112))\n', (1092, 1104), True, 'from torchvision import transforms as T\n'), ((1117, 1129), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (1127, 1129), True, 'from torchvision import transforms as T\n'), ((1923, 1935), 'torch.utils.data.float', 'data.float', ([], {}), '()\n', (1933, 1935), False, 'from torch.utils import data\n'), ((2614, 2647), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['data'], {}), '(data)\n', (2641, 2647), False, 'import torchvision\n')] |
import numpy as np
from collections import Counter
from nar_module.nar.nar_model import get_list_id
class RecentlyPopularRecommender():
def __init__(self):
pass
def get_recent_popular_item_ids(self):
# get item recent buffer from google service
# recent_items_buffer = self.clicked_items_state.get_recent_clicks_buffer()
recent_items_buffer = get_list_id()
#recent_items_buffer_nonzero = recent_items_buffer[np.nonzero(recent_items_buffer)]
#Dealing with first batch, when there is no item in the buffer yet
#if len(recent_items_buffer_nonzero) == 0:
# recent_items_buffer_nonzero = [0]
item_counter = Counter(recent_items_buffer)
popular_item_ids, popular_items_count = zip(*item_counter.most_common())
#print(len(recent_items_buffer))
return popular_item_ids
def get_top_k_items_pop(self, popular_item_ids):
return popular_item_ids[:100]
def _get_top_n_valid_items(self, items, topk, valid_items):
count = 0
for item in items:
if count == topk:
break
if (item in valid_items) or (valid_items is None):
count += 1
yield item
def predict(self, users_ids, sessions_items, topk=100, valid_items=None):
popular_item_ids = self.get_recent_popular_item_ids()
session_predictions = np.zeros(dtype=np.int64,
shape=[sessions_items.shape[0],
sessions_items.shape[1],
topk])
for row_idx, session_items in enumerate(sessions_items):
for col_idx, item in enumerate(session_items):
if item != 0:
session_predictions[row_idx, col_idx] = list(
self._get_top_n_valid_items(popular_item_ids, topk, valid_items[row_idx, col_idx]))
return session_predictions
| [
"collections.Counter",
"numpy.zeros",
"nar_module.nar.nar_model.get_list_id"
] | [((387, 400), 'nar_module.nar.nar_model.get_list_id', 'get_list_id', ([], {}), '()\n', (398, 400), False, 'from nar_module.nar.nar_model import get_list_id\n'), ((689, 717), 'collections.Counter', 'Counter', (['recent_items_buffer'], {}), '(recent_items_buffer)\n', (696, 717), False, 'from collections import Counter\n'), ((1415, 1508), 'numpy.zeros', 'np.zeros', ([], {'dtype': 'np.int64', 'shape': '[sessions_items.shape[0], sessions_items.shape[1], topk]'}), '(dtype=np.int64, shape=[sessions_items.shape[0], sessions_items.\n shape[1], topk])\n', (1423, 1508), True, 'import numpy as np\n')] |
import numpy as np
class Arena(object):
def __init__(self, n, h, t):
self.arena = np.array(np.zeros([n, n]), dtype=int)
self.size = n
self.humans = h
self.targets = t
self.action_space = 3
self.time = 0
self.state = 's'
def game_state(self):
if self.humans == 0:
self.state = 'l' # loss
elif self.targets == 0:
self.state = 'w' # win
else:
self.state = 's' # in progress
def print_arena(self):
print(self.arena)
return
def remove_agents(self):
self.arena[self.arena == 1] = 0
self.arena[self.arena == 2] = 0
return
| [
"numpy.zeros"
] | [((106, 122), 'numpy.zeros', 'np.zeros', (['[n, n]'], {}), '([n, n])\n', (114, 122), True, 'import numpy as np\n')] |
from gutfit.experimentalneutrinomassmatrix import ExperimentalNeutrinoMassMatrix
from gutfit.type1and2seesaw_v3 import Type1And2SeeSaw_v3
from gutfit import parameterlist
def overlap(A,B):
A.sort()
B.sort()
minA = min(A)
minB = min(B)
maxA = max(A)
maxB = max(B)
Ni = 0
if maxA < minB: return 0
elif maxB < minA: return 0
elif minA < minB and maxA > maxB: return 1
elif minA > minB and maxA < maxB: return 1
elif minA < minB and maxA < maxB:
i,j = 0,0
while(A[i]<minB): i+=1
while(B[j]<maxA): j+=1
return (len(A)-i + j)/float((len(A)+len(B)))
elif minA > minB and maxA > maxB:
i,j = 0,0
while(A[i]<maxB): i+=1
while(B[j]<minA): j+=1
return (len(A) + i - j)/float((len(A)+len(B)))
else:
print("whoopsiedoodles")
def sample(x,E,S,PL,N,pnames):
D = []
T = []
for _ in range(N):
p=PL()
for num, pn in enumerate(pnames):
p[pn] = x[num]
data = E(p)
p=PL()
for num, pn in enumerate(pnames):
p[pn] = x[num]
theo = S(p)
D.append(data)
T.append(theo)
return D, T
def measure(D, T):
OL = [
overlap([d[0][0] for d in D], [t[0][0] for t in T]),
overlap([d[0][1] for d in D], [t[0][1] for t in T]),
overlap([d[0][2] for d in D], [t[0][2] for t in T]),
overlap([d[1][2] for d in D], [t[1][2] for t in T]),
overlap([d[1][1] for d in D], [t[1][1] for t in T]),
overlap([d[2][2] for d in D], [t[2][2] for t in T])
]
# DS = [
# abs(np.mean([d[0][0] for d in D]) - np.mean([t[0][0] for t in T])),
# abs(np.mean([d[0][1] for d in D]) - np.mean([t[0][1] for t in T])),
# abs(np.mean([d[0][2] for d in D]) - np.mean([t[0][2] for t in T])),
# abs(np.mean([d[1][2] for d in D]) - np.mean([t[1][2] for t in T])),
# abs(np.mean([d[1][1] for d in D]) - np.mean([t[1][1] for t in T])),
# abs(np.mean([d[2][2] for d in D]) - np.mean([t[2][2] for t in T]))
# ]
DS = []
a = np.mean([d[0][0] for d in D])
b = np.mean([t[0][0] for t in T])
d = abs((a - b)/(a + b))
DS.append(d)
a = np.mean([d[0][1] for d in D])
b = np.mean([t[0][1] for t in T])
d = abs((a - b)/(a + b))
DS.append(d)
a = np.mean([d[0][2] for d in D])
b = np.mean([t[0][2] for t in T])
d = abs((a - b)/(a + b))
DS.append(d)
a = np.mean([d[1][2] for d in D])
b = np.mean([t[1][2] for t in T])
d = abs((a - b)/(a + b))
DS.append(d)
a = np.mean([d[1][1] for d in D])
b = np.mean([t[1][1] for t in T])
d = abs((a - b)/(a + b))
DS.append(d)
a = np.mean([d[2][2] for d in D])
b = np.mean([t[2][2] for t in T])
d = abs((a - b)/(a + b))
DS.append(d)
from functools import reduce
return sum([d*d for d,o in zip(DS,OL)]) # works technically
#https://stackoverflow.com/questions/13377046/scipy-fill-a-histogram-reading-from-a-db-event-by-event-in-a-loop
import numpy as np
if __name__=="__main__":
import optparse, os, sys
op = optparse.OptionParser(usage=__doc__)
op.add_option("-o", "--output", dest="OUTPUT", default="nestout", type=str, help="Prefix for outputs (default: %default)")
op.add_option("-v", "--debug", dest="DEBUG", default=False, action="store_true", help="Turn on some debug messages")
op.add_option("-q", "--quiet", dest="QUIET", default=False, action="store_true", help="Turn off messages")
op.add_option("--mn-seed", dest="SEED", default=-1, type=int, help="Multinest seed (default: %default)")
op.add_option("--mn-resume", dest="RESUME", default=False, action='store_true', help="Resume on previous run.")
op.add_option("--mn-multi-mod", dest="MULTIMODE", default=False, action='store_true', help="Set multimodal to true.")
op.add_option("--mn-update", dest="UPDATE", default=1000, type=int, help="Update inteval (default: %default iterations)")
op.add_option("--mn-tol", dest="TOL", default=0.5, type=float, help="Evidence tolerance (default: %default)")
op.add_option("--mn-eff", dest="EFF", default=0.8, type=float, help="Sampling efficiency (default: %default)")
op.add_option("--mn-points", dest="POINTS", default=40, type=int, help="Number of live points in PyMultinest (default: %default)")
op.add_option("--mn-imax", dest="ITMAX", default=0, type=int, help="Max number of iterations PyMultinest, 0 is infinite (default: %default)")
op.add_option("--mn-multimodal", dest="MULTIMODAL", default=False, action='store_true', help="Run in multimodal mode.")
op.add_option("--mn-no-importance",dest="NOIMPSAMP", default=False, action='store_true', help="Turn off importance sampling.")
opts, args = op.parse_args()
try:
os.makedirs(opts.OUTPUT)
except:
pass
import sys
E = ExperimentalNeutrinoMassMatrix()
S = Type1And2SeeSaw_v3()
PL = parameterlist.ParameterList.fromConfigFile(sys.argv[1])#"examples/param_card.dat")
N = 1000 # number of samples of PL
usethese = []
bounds, pnames = PL.getBox(usethese)
PMIN = [b[0] for b in bounds]
PMAX = [b[1] for b in bounds]
PLEN = [PMAX[i] - PMIN[i] for i in range(len(pnames))]
def scaleParam(p, idx):
return PMIN[idx] + p * PLEN[idx]
def myprior(cube, ndim, nparams):
for i in range(ndim):
cube[i] = scaleParam(cube[i], i)
def loglike(cube, ndim, nparams):
PP=[cube[j] for j in range(ndim)]
val = measure(*sample(PP,E,S,PL,N,pnames))
print(val)
if val == 0:
return -1e101
loglikelihood = -val # Ad-hoc
return loglikelihood
import pymultinest
pymultinest.run(loglike, myprior, len(pnames),
importance_nested_sampling = not opts.NOIMPSAMP,
verbose = False if opts.QUIET else True,
multimodal=opts.MULTIMODAL,
resume=opts.RESUME,
n_iter_before_update=opts.UPDATE,
evidence_tolerance=opts.TOL,
sampling_efficiency = opts.EFF,
init_MPI=False,
n_live_points = opts.POINTS,
max_iter=opts.ITMAX,
seed=opts.SEED,
outputfiles_basename='%s/GUTFIT'%opts.OUTPUT)
import json
json.dump(pnames, open('%s/GUTFITparams.json'%opts.OUTPUT, 'w'))
json.dump(pnames, open('%s/params.json'%opts.OUTPUT, 'w'))
NP = len(pnames)
print("Now analyzing output from {}/GUTFIT.txt".format(opts.OUTPUT))
a = pymultinest.Analyzer(n_params = NP, outputfiles_basename='%s/GUTFIT'%opts.OUTPUT)
a.get_data()
try:
s = a.get_stats()
except:
print("There was an error accumulating statistics. Try increasing the number of iterations, e.g. --mn-iterations -1")
sys.exit(1)
from collections import OrderedDict
resraw = a.get_best_fit()["parameters"]
D, T = sample(resraw ,E,S,PL,10*N, pnames)
bestval=measure(D,T)
PP=OrderedDict.fromkeys(pnames)
for num, pname in enumerate(pnames): PP[pname] = resraw[num]
out="# Best fit point (measure: {}):\n".format(bestval)
for k in PP: out+= "%s %.16f\n"%(k,PP[k])
with open("%sconfig.best"%a.outputfiles_basename, "w") as f: f.write(out)
print(out)
print("Measure at best fit point is {}".format(bestval))
from matplotlib import pyplot as plt
import matplotlib as mpl
plt.style.use('ggplot')
plt.clf()
fig, axes = plt.subplots(figsize=(10, 10), sharex=False, sharey=False, ncols=3, nrows=3)
plt.title("Measure: {}".format(bestval))
for i in range(3):
for j in range(3):
if i<j:
axes[i, j].axis('off')
else:
values = [d[i][j] for d in D]
theos = [t[i][j] for t in T]
axes[i, j].hist(values, bins=30)
axes[i, j].hist(theos, bins=30)
# axes[i, j].set_xscale("log")
# axes[i, j].set_yscale("log")
plt.savefig("{}/GUTFITplot.pdf".format(opts.OUTPUT))
| [
"pymultinest.Analyzer",
"gutfit.experimentalneutrinomassmatrix.ExperimentalNeutrinoMassMatrix",
"os.makedirs",
"optparse.OptionParser",
"matplotlib.pyplot.clf",
"gutfit.parameterlist.ParameterList.fromConfigFile",
"collections.OrderedDict.fromkeys",
"matplotlib.pyplot.style.use",
"numpy.mean",
"gu... | [((2176, 2205), 'numpy.mean', 'np.mean', (['[d[0][0] for d in D]'], {}), '([d[0][0] for d in D])\n', (2183, 2205), True, 'import numpy as np\n'), ((2217, 2246), 'numpy.mean', 'np.mean', (['[t[0][0] for t in T]'], {}), '([t[0][0] for t in T])\n', (2224, 2246), True, 'import numpy as np\n'), ((2306, 2335), 'numpy.mean', 'np.mean', (['[d[0][1] for d in D]'], {}), '([d[0][1] for d in D])\n', (2313, 2335), True, 'import numpy as np\n'), ((2347, 2376), 'numpy.mean', 'np.mean', (['[t[0][1] for t in T]'], {}), '([t[0][1] for t in T])\n', (2354, 2376), True, 'import numpy as np\n'), ((2436, 2465), 'numpy.mean', 'np.mean', (['[d[0][2] for d in D]'], {}), '([d[0][2] for d in D])\n', (2443, 2465), True, 'import numpy as np\n'), ((2477, 2506), 'numpy.mean', 'np.mean', (['[t[0][2] for t in T]'], {}), '([t[0][2] for t in T])\n', (2484, 2506), True, 'import numpy as np\n'), ((2566, 2595), 'numpy.mean', 'np.mean', (['[d[1][2] for d in D]'], {}), '([d[1][2] for d in D])\n', (2573, 2595), True, 'import numpy as np\n'), ((2607, 2636), 'numpy.mean', 'np.mean', (['[t[1][2] for t in T]'], {}), '([t[1][2] for t in T])\n', (2614, 2636), True, 'import numpy as np\n'), ((2696, 2725), 'numpy.mean', 'np.mean', (['[d[1][1] for d in D]'], {}), '([d[1][1] for d in D])\n', (2703, 2725), True, 'import numpy as np\n'), ((2737, 2766), 'numpy.mean', 'np.mean', (['[t[1][1] for t in T]'], {}), '([t[1][1] for t in T])\n', (2744, 2766), True, 'import numpy as np\n'), ((2826, 2855), 'numpy.mean', 'np.mean', (['[d[2][2] for d in D]'], {}), '([d[2][2] for d in D])\n', (2833, 2855), True, 'import numpy as np\n'), ((2867, 2896), 'numpy.mean', 'np.mean', (['[t[2][2] for t in T]'], {}), '([t[2][2] for t in T])\n', (2874, 2896), True, 'import numpy as np\n'), ((3243, 3279), 'optparse.OptionParser', 'optparse.OptionParser', ([], {'usage': '__doc__'}), '(usage=__doc__)\n', (3264, 3279), False, 'import optparse, os, sys\n'), ((5168, 5200), 'gutfit.experimentalneutrinomassmatrix.ExperimentalNeutrinoMassMatrix', 'ExperimentalNeutrinoMassMatrix', ([], {}), '()\n', (5198, 5200), False, 'from gutfit.experimentalneutrinomassmatrix import ExperimentalNeutrinoMassMatrix\n'), ((5210, 5230), 'gutfit.type1and2seesaw_v3.Type1And2SeeSaw_v3', 'Type1And2SeeSaw_v3', ([], {}), '()\n', (5228, 5230), False, 'from gutfit.type1and2seesaw_v3 import Type1And2SeeSaw_v3\n'), ((5241, 5296), 'gutfit.parameterlist.ParameterList.fromConfigFile', 'parameterlist.ParameterList.fromConfigFile', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (5283, 5296), False, 'from gutfit import parameterlist\n'), ((6846, 6932), 'pymultinest.Analyzer', 'pymultinest.Analyzer', ([], {'n_params': 'NP', 'outputfiles_basename': "('%s/GUTFIT' % opts.OUTPUT)"}), "(n_params=NP, outputfiles_basename='%s/GUTFIT' % opts.\n OUTPUT)\n", (6866, 6932), False, 'import pymultinest\n'), ((7302, 7330), 'collections.OrderedDict.fromkeys', 'OrderedDict.fromkeys', (['pnames'], {}), '(pnames)\n', (7322, 7330), False, 'from collections import OrderedDict\n'), ((7732, 7755), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (7745, 7755), True, 'from matplotlib import pyplot as plt\n'), ((7761, 7770), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7768, 7770), True, 'from matplotlib import pyplot as plt\n'), ((7789, 7865), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 10)', 'sharex': '(False)', 'sharey': '(False)', 'ncols': '(3)', 'nrows': '(3)'}), '(figsize=(10, 10), sharex=False, sharey=False, ncols=3, nrows=3)\n', (7801, 7865), True, 'from matplotlib import pyplot as plt\n'), ((5089, 5113), 'os.makedirs', 'os.makedirs', (['opts.OUTPUT'], {}), '(opts.OUTPUT)\n', (5100, 5113), False, 'import optparse, os, sys\n'), ((7126, 7137), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7134, 7137), False, 'import sys\n')] |
import numpy as np
import sonnet as snt
import tensorflow as tf
from sklearn import tree
import copy
import os
import matplotlib.pyplot as plt
from tqdm import tqdm
import pickle
skew_rank = accu_rank = [24, 39, 2, 21, 31, 36, 19, 20, 18, 33, 25, 27, 1, 6, 8, 7, 32, 3, 11, 34, 5, 9, 12, 37, 23, 0, 28, 38, 29, 15, 16, 13, 30, 10, 35, 14, 26, 17, 22, 4]
def load_celeba(data_dir, restricted_degree, label_type, print_ratio=False):
"""Returns CelebA as (train_data, train_labels, test_data, test_labels)
Shapes are (162770, 64, 64, 3), (162770, 2), (19962, 64, 64, 3), (19962, 10)
Data is in [0,1] and labels are one-hot
Arg:
restricted_degree: only keep the instances with at least d selected attributes
"""
train_data = np.load(os.path.join(data_dir, 'celeba_train_imgs.npy'))
test_data = np.load(os.path.join(data_dir, 'celeba_test_imgs.npy'))
info_pak = np.load(os.path.join(data_dir, 'celeba_attr.npz'))
train_idxs = info_pak['train_idxs']
val_idxs = info_pak['val_idxs']
test_idxs = info_pak['test_idxs']
attribute_names = info_pak['attribute_names']
attributes = info_pak['attributes']
male_attr_idx = 20
def get_label(data, idxs):
def count_indicators(attr):
important_attributes_idx = [0, 1, 4, 9, 16, 18, 22, 24, 29, 30, 34, 36, 37, 38]
x = np.array([0] * attr.shape[0])
for i in important_attributes_idx:
x = x + attr[:, i]
return x
label = attributes[idxs]
sig = count_indicators(label) >= restricted_degree
label = label[sig]
data = data[sig]
if label_type == 'gender':
label = 1-label[:, male_attr_idx].reshape([-1, 1])
label = np.append(label, 1 - label, 1)
elif label_type == 'subattr':
# decission_tree_attr_idx = [1, 6, 34, 35, 36]
# decission_tree_attr_idx = [0, 1, 6, 7, 8, 9, 12, 18, 19, male_attr_idx, 24, 34, 36, 38, 39]
decission_tree_attr_idx = [i for i in range(label.shape[1])]
sub_attributes_idx = np.array(decission_tree_attr_idx)
label = label[:, sub_attributes_idx]
return data, label
train_data, train_label = get_label(train_data, train_idxs)
test_data, test_label = get_label(test_data, test_idxs)
if print_ratio:
print('\nCelebA restricted degree: {}'.format(restricted_degree))
train_ratio = sum(train_label[:, 1]) / train_label.shape[0]
test_ratio = sum(test_label[:, 1]) / test_label.shape[0]
print('Training set - Male: {:.2f}% ({}/{}), Not male: {:.2f}%'.format(train_ratio * 100,
sum(train_label[:, 1]),
train_label.shape[0],
100 - train_ratio * 100))
print('Testing set - Male: {:.2f}% ({}/{}), Not male: {:.2f}%'.format(test_ratio * 100,
sum(test_label[:, 1]),
test_label.shape[0],
100 - test_ratio * 100))
return train_data, train_label, test_data, test_label
train_data, train_labels, test_data, test_labels = load_celeba(
'H:\\CodeRange\\CelebA\\npy\\', restricted_degree=0, print_ratio=False, label_type='gender')
_, train_latent_labels, _, test_latent_labels = load_celeba(
'H:\\CodeRange\\CelebA\\npy\\', restricted_degree=0, print_ratio=False, label_type='subattr')
attributes_names = np.load('H:\\CodeRange\\CelebA\\npy\\celeba_attr.npz')['attribute_names']
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
def work(skewRankIdx):
path = './saved_model/SkewRank{:02d}_{}/'.format(skewRankIdx, attributes_names[skew_rank[skewRankIdx]])
print(path)
model=tf.saved_model.loader.load(session, [tf.saved_model.tag_constants.SERVING], path+'/saved_model')
def get_variables(model_meta):
graph = tf.get_default_graph()
sig_vars = copy.deepcopy(model_meta.signature_def['serving_default'])
sig_inputs = sig_vars.inputs
sig_outputs = sig_vars.outputs
output = dict()
for k in sig_inputs.keys():
print('{:20}, {}'.format(k,sig_inputs[k].name))
output[k] = graph.get_tensor_by_name(sig_inputs[k].name)
for k in sig_outputs.keys():
print('{:20}, {}'.format(k,sig_outputs[k].name))
output[k] = graph.get_tensor_by_name(sig_outputs[k].name)
return output
tensors = get_variables(model)
t_x = tensors['x']
t_latent_var = tensors['latent_var']
t_output = tensors['output']
epoch_size = 32
def get_latent_var_list(data):
test_data = data
test_data_len = test_data.shape[0]
epoch_num = test_data_len // epoch_size
# epoch_num = 60
instance_num = epoch_num * epoch_size
latent_var = []
for i in tqdm(range(epoch_num)):
epoch_beg = i*epoch_size
epoch_end = (i+1)*epoch_size
epoch_input = test_data[epoch_beg:epoch_end].astype('float32') / 255.0
# print(epoch_beg, epoch_end)
outputs = session.run([t_output, t_latent_var],
feed_dict={t_x:epoch_input})
latent_var.append(outputs[1])
return np.array(latent_var)
train_latent_var_lists = get_latent_var_list(train_data)
test_latent_var_lists = get_latent_var_list(test_data)
np.save(os.path.join(path, 'post_latent_var-train.npy'), train_latent_var_lists)
np.save(os.path.join(path, 'post_latent_var-test.npy'), test_latent_var_lists)
for i in range(40):
work(i)
| [
"numpy.load",
"copy.deepcopy",
"tensorflow.Session",
"tensorflow.ConfigProto",
"numpy.append",
"numpy.array",
"tensorflow.saved_model.loader.load",
"tensorflow.get_default_graph",
"os.path.join"
] | [((3866, 3882), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (3880, 3882), True, 'import tensorflow as tf\n'), ((3932, 3957), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (3942, 3957), True, 'import tensorflow as tf\n'), ((3782, 3836), 'numpy.load', 'np.load', (['"""H:\\\\CodeRange\\\\CelebA\\\\npy\\\\celeba_attr.npz"""'], {}), "('H:\\\\CodeRange\\\\CelebA\\\\npy\\\\celeba_attr.npz')\n", (3789, 3836), True, 'import numpy as np\n'), ((4116, 4218), 'tensorflow.saved_model.loader.load', 'tf.saved_model.loader.load', (['session', '[tf.saved_model.tag_constants.SERVING]', "(path + '/saved_model')"], {}), "(session, [tf.saved_model.tag_constants.SERVING],\n path + '/saved_model')\n", (4142, 4218), True, 'import tensorflow as tf\n'), ((780, 827), 'os.path.join', 'os.path.join', (['data_dir', '"""celeba_train_imgs.npy"""'], {}), "(data_dir, 'celeba_train_imgs.npy')\n", (792, 827), False, 'import os\n'), ((853, 899), 'os.path.join', 'os.path.join', (['data_dir', '"""celeba_test_imgs.npy"""'], {}), "(data_dir, 'celeba_test_imgs.npy')\n", (865, 899), False, 'import os\n'), ((925, 966), 'os.path.join', 'os.path.join', (['data_dir', '"""celeba_attr.npz"""'], {}), "(data_dir, 'celeba_attr.npz')\n", (937, 966), False, 'import os\n'), ((4269, 4291), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (4289, 4291), True, 'import tensorflow as tf\n'), ((4311, 4369), 'copy.deepcopy', 'copy.deepcopy', (["model_meta.signature_def['serving_default']"], {}), "(model_meta.signature_def['serving_default'])\n", (4324, 4369), False, 'import copy\n'), ((5667, 5687), 'numpy.array', 'np.array', (['latent_var'], {}), '(latent_var)\n', (5675, 5687), True, 'import numpy as np\n'), ((5826, 5873), 'os.path.join', 'os.path.join', (['path', '"""post_latent_var-train.npy"""'], {}), "(path, 'post_latent_var-train.npy')\n", (5838, 5873), False, 'import os\n'), ((5911, 5957), 'os.path.join', 'os.path.join', (['path', '"""post_latent_var-test.npy"""'], {}), "(path, 'post_latent_var-test.npy')\n", (5923, 5957), False, 'import os\n'), ((1371, 1400), 'numpy.array', 'np.array', (['([0] * attr.shape[0])'], {}), '([0] * attr.shape[0])\n', (1379, 1400), True, 'import numpy as np\n'), ((1768, 1798), 'numpy.append', 'np.append', (['label', '(1 - label)', '(1)'], {}), '(label, 1 - label, 1)\n', (1777, 1798), True, 'import numpy as np\n'), ((2108, 2141), 'numpy.array', 'np.array', (['decission_tree_attr_idx'], {}), '(decission_tree_attr_idx)\n', (2116, 2141), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from PIL import Image
import numpy as np
import pycocotools.mask as mask_util
import cv2
classes = [line.rstrip('\n') for line in open('coco_classes.txt')]
# You might need to adjust score threshold
def display_objdetect_image(image, boxes, labels, scores, masks, score_threshold=0.7):
# Resize boxes
ratio = 800.0 / min(image.size[0], image.size[1])
boxes /= ratio
_, ax = plt.subplots(1, figsize=(12,9))
image = np.array(image)
for mask, box, label, score in zip(masks, boxes, labels, scores):
# Showing boxes with score > 0.7
if score <= score_threshold:
continue
# Finding contour based on mask
mask = mask[0, :, :, None]
int_box = [int(i) for i in box]
mask = cv2.resize(mask, (int_box[2]-int_box[0]+1, int_box[3]-int_box[1]+1))
mask = mask > 0.5
im_mask = np.zeros((image.shape[0], image.shape[1]), dtype=np.uint8)
x_0 = max(int_box[0], 0)
x_1 = min(int_box[2] + 1, image.shape[1])
y_0 = max(int_box[1], 0)
y_1 = min(int_box[3] + 1, image.shape[0])
mask_y_0 = max(y_0 - box[1], 0)
mask_y_1 = mask_y_0 + y_1 - y_0
mask_x_0 = max(x_0 - box[0], 0)
mask_x_1 = mask_x_0 + x_1 - x_0
im_mask[y_0:y_1, x_0:x_1] = mask[
mask_y_0 : mask_y_1, mask_x_0 : mask_x_1
]
im_mask = im_mask[:, :, None]
# OpenCV version 4.x
contours, hierarchy = cv2.findContours(
im_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
)
image = cv2.drawContours(image, contours, -1, 25, 3)
rect = patches.Rectangle((box[0], box[1]), box[2] - box[0], box[3] - box[1], linewidth=1, edgecolor='b', facecolor='none')
ax.annotate(classes[label] + ':' + str(np.round(score, 2)), (box[0], box[1]), color='w', fontsize=12)
ax.add_patch(rect)
ax.imshow(image)
plt.savefig("annotated.png")
img = Image.open('../images/preprocessed.jpg')
labels = np.fromfile("../labels.data", dtype=np.int64)
NUM_CLASSES = labels.size
boxes = np.fromfile("../boxes.data", dtype=np.float32).reshape((NUM_CLASSES, 4))
scores = np.fromfile("../scores.data", dtype=np.float32).reshape((NUM_CLASSES))
# For model exported from pytorch use 800x800
masks = np.fromfile("../masks.data", dtype=np.float32).reshape((NUM_CLASSES, 1, 28, 28))
display_objdetect_image(img, boxes, labels, scores, masks) | [
"cv2.resize",
"cv2.findContours",
"matplotlib.patches.Rectangle",
"numpy.fromfile",
"numpy.round",
"numpy.zeros",
"PIL.Image.open",
"numpy.array",
"cv2.drawContours",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] | [((2010, 2050), 'PIL.Image.open', 'Image.open', (['"""../images/preprocessed.jpg"""'], {}), "('../images/preprocessed.jpg')\n", (2020, 2050), False, 'from PIL import Image\n'), ((2061, 2106), 'numpy.fromfile', 'np.fromfile', (['"""../labels.data"""'], {'dtype': 'np.int64'}), "('../labels.data', dtype=np.int64)\n", (2072, 2106), True, 'import numpy as np\n'), ((463, 495), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(12, 9)'}), '(1, figsize=(12, 9))\n', (475, 495), True, 'import matplotlib.pyplot as plt\n'), ((508, 523), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (516, 523), True, 'import numpy as np\n'), ((1971, 1999), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""annotated.png"""'], {}), "('annotated.png')\n", (1982, 1999), True, 'import matplotlib.pyplot as plt\n'), ((825, 901), 'cv2.resize', 'cv2.resize', (['mask', '(int_box[2] - int_box[0] + 1, int_box[3] - int_box[1] + 1)'], {}), '(mask, (int_box[2] - int_box[0] + 1, int_box[3] - int_box[1] + 1))\n', (835, 901), False, 'import cv2\n'), ((938, 996), 'numpy.zeros', 'np.zeros', (['(image.shape[0], image.shape[1])'], {'dtype': 'np.uint8'}), '((image.shape[0], image.shape[1]), dtype=np.uint8)\n', (946, 996), True, 'import numpy as np\n'), ((1526, 1591), 'cv2.findContours', 'cv2.findContours', (['im_mask', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(im_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (1542, 1591), False, 'import cv2\n'), ((1631, 1675), 'cv2.drawContours', 'cv2.drawContours', (['image', 'contours', '(-1)', '(25)', '(3)'], {}), '(image, contours, -1, 25, 3)\n', (1647, 1675), False, 'import cv2\n'), ((1692, 1811), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(box[0], box[1])', '(box[2] - box[0])', '(box[3] - box[1])'], {'linewidth': '(1)', 'edgecolor': '"""b"""', 'facecolor': '"""none"""'}), "((box[0], box[1]), box[2] - box[0], box[3] - box[1],\n linewidth=1, edgecolor='b', facecolor='none')\n", (1709, 1811), True, 'import matplotlib.patches as patches\n'), ((2141, 2187), 'numpy.fromfile', 'np.fromfile', (['"""../boxes.data"""'], {'dtype': 'np.float32'}), "('../boxes.data', dtype=np.float32)\n", (2152, 2187), True, 'import numpy as np\n'), ((2223, 2270), 'numpy.fromfile', 'np.fromfile', (['"""../scores.data"""'], {'dtype': 'np.float32'}), "('../scores.data', dtype=np.float32)\n", (2234, 2270), True, 'import numpy as np\n'), ((2348, 2394), 'numpy.fromfile', 'np.fromfile', (['"""../masks.data"""'], {'dtype': 'np.float32'}), "('../masks.data', dtype=np.float32)\n", (2359, 2394), True, 'import numpy as np\n'), ((1855, 1873), 'numpy.round', 'np.round', (['score', '(2)'], {}), '(score, 2)\n', (1863, 1873), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from numpy import (
array,
linspace,
argmin,
take,
isclose,
isin,
around,
all,
abs as np_abs,
)
from scipy import interpolate
def get_common_base(values1, values2, is_extrap=False, is_downsample=False):
"""Returns a common base for vectors values1 and values2
Parameters
----------
values1: list
values of the first axis
values2: list
values of the second axis
is_extrap: bool
Boolean indicating if we want to keep the widest vector and extrapolate the other one
is_downsample: bool
Boolean indicating if we want to keep the smallest number of points and downsample the other one
Returns
-------
list of the common axis values
"""
if is_extrap:
initial = min(values1[0], values2[0])
final = max(values1[-1], values2[-1])
else:
initial = max(values1[0], values2[0])
final = min(values1[-1], values2[-1])
if is_downsample:
number = min(
len([i for i in values1 if i >= initial and i <= final]),
len([i for i in values2 if i >= initial and i <= final]),
)
else:
length1 = len([i for i in values1 if i >= initial and i <= final])
length2 = len([i for i in values2 if i >= initial and i <= final])
if length1 > length2:
number = length1
if initial not in values1:
initial = values1[argmin(np_abs([i - initial for i in values1])) + 1]
if final not in values1:
final = values1[argmin(np_abs([i - final for i in values1])) - 1]
else:
number = length2
if initial not in values2:
initial = values2[argmin(np_abs([i - initial for i in values2])) + 1]
if final not in values2:
final = values2[argmin(np_abs([i - final for i in values2])) - 1]
return linspace(initial, final, int(number), endpoint=True)
def get_interpolation(values, axis_values, new_axis_values, is_step=False):
"""Returns the interpolated field along one axis, given the new axis
Parameters
----------
values: ndarray
1Darray of a field along one axis
axis_values: list
values of the original axis
new_axis_values: list
values of the new axis
Returns
-------
ndarray of the interpolated field
"""
if str(axis_values) == "whole": # Whole axis -> no interpolation
return values
elif len(new_axis_values) == 1: # Single point -> use argmin
idx = argmin(np_abs(axis_values - new_axis_values[0]))
return take(values, [idx])
elif len(axis_values) == len(new_axis_values) and all(
isclose(axis_values, new_axis_values, rtol=1e-03)
): # Same axes -> no interpolation
return values
elif isin(
around(new_axis_values, 5), around(axis_values, 5), assume_unique=True
).all(): # New axis is subset -> no interpolation
return values[
isin(around(axis_values, 5), around(new_axis_values, 5), assume_unique=True)
]
elif is_step:
if len(axis_values) == 1:
return array([values[0] for i in range(len(new_axis_values))])
else:
new_values = []
for i in range(len(new_axis_values)):
for j in range(len(axis_values) - 1):
if (
new_axis_values[i] == axis_values[j]
and new_axis_values[i] == axis_values[j + 1]
):
new_values.append((values[j] + values[j + 1]) / 2)
break
elif (
new_axis_values[i] >= axis_values[j]
and new_axis_values[i] < axis_values[j + 1]
):
new_values.append(values[j])
break
elif (
j == len(axis_values) - 2
and new_axis_values[i] == axis_values[j + 1]
):
new_values.append(values[j + 1])
break
return array(new_values)
else:
f = interpolate.interp1d(axis_values, values)
return f(new_axis_values)
| [
"numpy.abs",
"numpy.around",
"numpy.isclose",
"numpy.take",
"numpy.array",
"scipy.interpolate.interp1d"
] | [((2653, 2672), 'numpy.take', 'take', (['values', '[idx]'], {}), '(values, [idx])\n', (2657, 2672), False, 'from numpy import array, linspace, argmin, take, isclose, isin, around, all, abs as np_abs\n'), ((2596, 2636), 'numpy.abs', 'np_abs', (['(axis_values - new_axis_values[0])'], {}), '(axis_values - new_axis_values[0])\n', (2602, 2636), True, 'from numpy import array, linspace, argmin, take, isclose, isin, around, all, abs as np_abs\n'), ((2740, 2789), 'numpy.isclose', 'isclose', (['axis_values', 'new_axis_values'], {'rtol': '(0.001)'}), '(axis_values, new_axis_values, rtol=0.001)\n', (2747, 2789), False, 'from numpy import array, linspace, argmin, take, isclose, isin, around, all, abs as np_abs\n'), ((4256, 4297), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['axis_values', 'values'], {}), '(axis_values, values)\n', (4276, 4297), False, 'from scipy import interpolate\n'), ((1474, 1514), 'numpy.abs', 'np_abs', (['[(i - initial) for i in values1]'], {}), '([(i - initial) for i in values1])\n', (1480, 1514), True, 'from numpy import array, linspace, argmin, take, isclose, isin, around, all, abs as np_abs\n'), ((1595, 1633), 'numpy.abs', 'np_abs', (['[(i - final) for i in values1]'], {}), '([(i - final) for i in values1])\n', (1601, 1633), True, 'from numpy import array, linspace, argmin, take, isclose, isin, around, all, abs as np_abs\n'), ((1761, 1801), 'numpy.abs', 'np_abs', (['[(i - initial) for i in values2]'], {}), '([(i - initial) for i in values2])\n', (1767, 1801), True, 'from numpy import array, linspace, argmin, take, isclose, isin, around, all, abs as np_abs\n'), ((1882, 1920), 'numpy.abs', 'np_abs', (['[(i - final) for i in values2]'], {}), '([(i - final) for i in values2])\n', (1888, 1920), True, 'from numpy import array, linspace, argmin, take, isclose, isin, around, all, abs as np_abs\n'), ((2875, 2901), 'numpy.around', 'around', (['new_axis_values', '(5)'], {}), '(new_axis_values, 5)\n', (2881, 2901), False, 'from numpy import array, linspace, argmin, take, isclose, isin, around, all, abs as np_abs\n'), ((2903, 2925), 'numpy.around', 'around', (['axis_values', '(5)'], {}), '(axis_values, 5)\n', (2909, 2925), False, 'from numpy import array, linspace, argmin, take, isclose, isin, around, all, abs as np_abs\n'), ((3041, 3063), 'numpy.around', 'around', (['axis_values', '(5)'], {}), '(axis_values, 5)\n', (3047, 3063), False, 'from numpy import array, linspace, argmin, take, isclose, isin, around, all, abs as np_abs\n'), ((3065, 3091), 'numpy.around', 'around', (['new_axis_values', '(5)'], {}), '(new_axis_values, 5)\n', (3071, 3091), False, 'from numpy import array, linspace, argmin, take, isclose, isin, around, all, abs as np_abs\n'), ((4216, 4233), 'numpy.array', 'array', (['new_values'], {}), '(new_values)\n', (4221, 4233), False, 'from numpy import array, linspace, argmin, take, isclose, isin, around, all, abs as np_abs\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the
# GriSPy Project (https://github.com/mchalela/GriSPy).
# Copyright (c) 2019, <NAME>
# License: MIT
# Full Text: https://github.com/mchalela/GriSPy/blob/master/LICENSE
import numpy as np
import matplotlib.pyplot as plt
from grispy import GriSPy
# Example 1. 2D Uniform Distribution ------------------------------------------
# This example generates a 2D random uniform distribution.
# Periodic conditions on y-axis, or axis=1.
# We search for neighbors within a given radius and n-nearest neighbors.
# Create random points and centres
Npoints = 10 ** 4
Ncentres = 2
dim = 2
Lbox = 100.0
np.random.seed(2)
data = np.random.uniform(0, Lbox, size=(Npoints, dim))
centres = np.random.uniform(0, Lbox, size=(Ncentres, dim))
# Grispy params
upper_radii = 15.0
lower_radii = 10.0
n_nearest = 100
periodic = {0: (0, Lbox), 1: (0, Lbox)}
# Build the grid with the data
gsp = GriSPy(data, periodic=periodic)
# Query for neighbors within upper_radii
bubble_dist, bubble_ind = gsp.bubble_neighbors(
centres, distance_upper_bound=upper_radii
)
# Query for neighbors in a shell within lower_radii and upper_radii
shell_dist, shell_ind = gsp.shell_neighbors(
centres, distance_lower_bound=lower_radii, distance_upper_bound=upper_radii
)
# Query for nth nearest neighbors
near_dist, near_ind = gsp.nearest_neighbors(centres, n=n_nearest)
# Plot results
plt.figure(4, figsize=(10, 3.2))
plt.subplot(1, 3, 1, aspect="equal")
plt.title("Bubble query")
plt.scatter(data[:, 0], data[:, 1], c="k", marker=".", s=3)
for ind in bubble_ind:
plt.scatter(data[ind, 0], data[ind, 1], c="C3", marker="o", s=5)
plt.plot(centres[:, 0], centres[:, 1], "ro", ms=10)
plt.subplot(1, 3, 2, aspect="equal")
plt.title("Shell query")
plt.scatter(data[:, 0], data[:, 1], c="k", marker=".", s=2)
for ind in shell_ind:
plt.scatter(data[ind, 0], data[ind, 1], c="C2", marker="o", s=5)
plt.plot(centres[:, 0], centres[:, 1], "ro", ms=10)
plt.subplot(1, 3, 3, aspect="equal")
plt.title("n-Nearest query")
plt.scatter(data[:, 0], data[:, 1], c="k", marker=".", s=2)
for ind in near_ind:
plt.scatter(data[ind, 0], data[ind, 1], c="C0", marker="o", s=5)
plt.plot(centres[:, 0], centres[:, 1], "ro", ms=10)
plt.tight_layout()
plt.show()
| [
"matplotlib.pyplot.title",
"numpy.random.uniform",
"matplotlib.pyplot.subplot",
"numpy.random.seed",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"grispy.GriSPy",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout"
] | [((671, 688), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (685, 688), True, 'import numpy as np\n'), ((696, 743), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'Lbox'], {'size': '(Npoints, dim)'}), '(0, Lbox, size=(Npoints, dim))\n', (713, 743), True, 'import numpy as np\n'), ((754, 802), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'Lbox'], {'size': '(Ncentres, dim)'}), '(0, Lbox, size=(Ncentres, dim))\n', (771, 802), True, 'import numpy as np\n'), ((952, 983), 'grispy.GriSPy', 'GriSPy', (['data'], {'periodic': 'periodic'}), '(data, periodic=periodic)\n', (958, 983), False, 'from grispy import GriSPy\n'), ((1436, 1468), 'matplotlib.pyplot.figure', 'plt.figure', (['(4)'], {'figsize': '(10, 3.2)'}), '(4, figsize=(10, 3.2))\n', (1446, 1468), True, 'import matplotlib.pyplot as plt\n'), ((1470, 1506), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {'aspect': '"""equal"""'}), "(1, 3, 1, aspect='equal')\n", (1481, 1506), True, 'import matplotlib.pyplot as plt\n'), ((1507, 1532), 'matplotlib.pyplot.title', 'plt.title', (['"""Bubble query"""'], {}), "('Bubble query')\n", (1516, 1532), True, 'import matplotlib.pyplot as plt\n'), ((1533, 1592), 'matplotlib.pyplot.scatter', 'plt.scatter', (['data[:, 0]', 'data[:, 1]'], {'c': '"""k"""', 'marker': '"""."""', 's': '(3)'}), "(data[:, 0], data[:, 1], c='k', marker='.', s=3)\n", (1544, 1592), True, 'import matplotlib.pyplot as plt\n'), ((1685, 1736), 'matplotlib.pyplot.plot', 'plt.plot', (['centres[:, 0]', 'centres[:, 1]', '"""ro"""'], {'ms': '(10)'}), "(centres[:, 0], centres[:, 1], 'ro', ms=10)\n", (1693, 1736), True, 'import matplotlib.pyplot as plt\n'), ((1738, 1774), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {'aspect': '"""equal"""'}), "(1, 3, 2, aspect='equal')\n", (1749, 1774), True, 'import matplotlib.pyplot as plt\n'), ((1775, 1799), 'matplotlib.pyplot.title', 'plt.title', (['"""Shell query"""'], {}), "('Shell query')\n", (1784, 1799), True, 'import matplotlib.pyplot as plt\n'), ((1800, 1859), 'matplotlib.pyplot.scatter', 'plt.scatter', (['data[:, 0]', 'data[:, 1]'], {'c': '"""k"""', 'marker': '"""."""', 's': '(2)'}), "(data[:, 0], data[:, 1], c='k', marker='.', s=2)\n", (1811, 1859), True, 'import matplotlib.pyplot as plt\n'), ((1951, 2002), 'matplotlib.pyplot.plot', 'plt.plot', (['centres[:, 0]', 'centres[:, 1]', '"""ro"""'], {'ms': '(10)'}), "(centres[:, 0], centres[:, 1], 'ro', ms=10)\n", (1959, 2002), True, 'import matplotlib.pyplot as plt\n'), ((2004, 2040), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {'aspect': '"""equal"""'}), "(1, 3, 3, aspect='equal')\n", (2015, 2040), True, 'import matplotlib.pyplot as plt\n'), ((2041, 2069), 'matplotlib.pyplot.title', 'plt.title', (['"""n-Nearest query"""'], {}), "('n-Nearest query')\n", (2050, 2069), True, 'import matplotlib.pyplot as plt\n'), ((2070, 2129), 'matplotlib.pyplot.scatter', 'plt.scatter', (['data[:, 0]', 'data[:, 1]'], {'c': '"""k"""', 'marker': '"""."""', 's': '(2)'}), "(data[:, 0], data[:, 1], c='k', marker='.', s=2)\n", (2081, 2129), True, 'import matplotlib.pyplot as plt\n'), ((2220, 2271), 'matplotlib.pyplot.plot', 'plt.plot', (['centres[:, 0]', 'centres[:, 1]', '"""ro"""'], {'ms': '(10)'}), "(centres[:, 0], centres[:, 1], 'ro', ms=10)\n", (2228, 2271), True, 'import matplotlib.pyplot as plt\n'), ((2273, 2291), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2289, 2291), True, 'import matplotlib.pyplot as plt\n'), ((2292, 2302), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2300, 2302), True, 'import matplotlib.pyplot as plt\n'), ((1620, 1684), 'matplotlib.pyplot.scatter', 'plt.scatter', (['data[ind, 0]', 'data[ind, 1]'], {'c': '"""C3"""', 'marker': '"""o"""', 's': '(5)'}), "(data[ind, 0], data[ind, 1], c='C3', marker='o', s=5)\n", (1631, 1684), True, 'import matplotlib.pyplot as plt\n'), ((1886, 1950), 'matplotlib.pyplot.scatter', 'plt.scatter', (['data[ind, 0]', 'data[ind, 1]'], {'c': '"""C2"""', 'marker': '"""o"""', 's': '(5)'}), "(data[ind, 0], data[ind, 1], c='C2', marker='o', s=5)\n", (1897, 1950), True, 'import matplotlib.pyplot as plt\n'), ((2155, 2219), 'matplotlib.pyplot.scatter', 'plt.scatter', (['data[ind, 0]', 'data[ind, 1]'], {'c': '"""C0"""', 'marker': '"""o"""', 's': '(5)'}), "(data[ind, 0], data[ind, 1], c='C0', marker='o', s=5)\n", (2166, 2219), True, 'import matplotlib.pyplot as plt\n')] |
import os
from config import *
import numpy as np
import tensorflow as tf
import scipy.io
import random
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import colors as mcolors
from skimage import util
import tensorflow.contrib.slim as slim
from tensorflow import initializers as tfinit
# DATASETs
def loadDatasetFromMat(dataset_dir, dataset_name):
"""
Load image and corresponding label image from original mat files.
"""
print('-'*70)
if (dataset_name == "ss"):
image = scipy.io.loadmat(os.path.join(dataset_dir, \
'Salinas.mat'))['salinas']
label = scipy.io.loadmat(os.path.join(dataset_dir, \
'Salinas_gt.mat'))['salinas_gt']
print("Salinal dataset is loaded")
elif (dataset_name == "ssc"):
image = scipy.io.loadmat(os.path.join(dataset_dir, \
'Salinas_corrected.mat'))['salinas_corrected']
label = scipy.io.loadmat(os.path.join(dataset_dir, \
'Salinas_gt.mat'))['salinas_gt']
print("Salinal CORRECTED dataset is loaded")
elif (dataset_name == "ssc815"):
image = scipy.io.loadmat(os.path.join(dataset_dir, \
'Salinas_corrected.mat'))['salinas_corrected']
label = scipy.io.loadmat(os.path.join(dataset_dir, \
'Salinas_gt_815.mat'))['salinas_gt_815']
print("Salinal CORRECTED 815 dataset is loaded")
elif (dataset_name == "ssc_pca"):
image = scipy.io.loadmat(os.path.join(dataset_dir, \
'Salinas_corrected_pca.mat'))['salinas_corrected_pca']
label = scipy.io.loadmat(os.path.join(dataset_dir, \
'Salinas_gt.mat'))['salinas_gt']
print("Salinal CORRECTED PCA3 dataset is loaded")
elif (dataset_name == "ip"):
image = scipy.io.loadmat(os.path.join(dataset_dir, \
'Indian_pines.mat'))['indian_pines']
label = scipy.io.loadmat(os.path.join(dataset_dir, \
'Indian_pines_gt.mat'))['indian_pines_gt']
print("Indian Pines dataset is loaded")
elif (dataset_name == "ipc"):
image = scipy.io.loadmat(os.path.join(dataset_dir, \
'Indian_pines_corrected.mat'))['indian_pines_corrected']
label = scipy.io.loadmat(os.path.join(dataset_dir, \
'Indian_pines_gt.mat'))['indian_pines_gt']
print("Indian Pines CORRECTED dataset is loaded")
elif (dataset_name == "ipc_pca"):
image = scipy.io.loadmat(os.path.join(dataset_dir, \
'Indian_pines_corrected_pca.mat'))['indian_pines_corrected_pca']
label = scipy.io.loadmat(os.path.join(dataset_dir, \
'Indian_pines_gt.mat'))['indian_pines_gt']
print("Indian Pines CORRECTED PCA3 dataset is loaded")
elif (dataset_name == "pu"):
image = scipy.io.loadmat(os.path.join(dataset_dir, \
'PaviaU.mat'))['paviaU']
label = scipy.io.loadmat(os.path.join(dataset_dir, \
'PaviaU_gt.mat'))['paviaU_gt']
print("Pavia University dataset is loaded")
elif (dataset_name == "pc"):
image = scipy.io.loadmat(os.path.join(dataset_dir, \
'Pavia.mat'))['pavia']
label = scipy.io.loadmat(os.path.join(dataset_dir, \
'Pavia_gt.mat'))['pavia_gt']
print("Pavia Centre dataset is loaded")
elif (dataset_name == "ksc"):
image = scipy.io.loadmat(os.path.join(dataset_dir, \
'KSC.mat'))['KSC']
label = scipy.io.loadmat(os.path.join(dataset_dir, \
'KSC_gt.mat'))['KSC_gt']
print("Kennedy Space Center dataset is loaded")
elif (dataset_name == "b"):
image = scipy.io.loadmat(os.path.join(dataset_dir, \
'Botswana.mat'))['Botswana']
label = scipy.io.loadmat(os.path.join(dataset_dir, \
'Botswana_gt.mat'))['Botswana_gt']
print("Botswana dataset is loaded")
return image, label
def getDatasetProperty(image, label):
"""
Get dataset properties such as height, width, depth of image and
maximum class number of corresponding label image
"""
if image.shape[0]==label.shape[0] and image.shape[1]==label.shape[1]:
height = image.shape[0]
width = image.shape[1]
depth = image.shape[2]
class_number = label.max()
return height, width, depth, class_number
# NORMALIZATIONs
def reluNormalization(data):
"""
Normalize data between 0 and 1
"""
max_minus_min = data.max() - data.min()
data = data.astype(float)
data -= data.min()
data /= max_minus_min
print('-'*70)
print("ReLU normalization")
print("min value =\t\t",data.min())
print("max value =\t\t",data.max())
return data
def tanhNormalization(data):
"""
Normalize data in range between -1 and 1
"""
max_minus_min = data.max() - data.min()
data = data.astype(float)
data -= data.min()
data /= max_minus_min
data *= 2
data -= 1
print('-'*70)
print("Tanh normalization")
print("min value =\t\t",data.min())
print("max value =\t\t",data.max())
return data
def meanNormalization(data):
"""
Mean Normalize (Standardize data)
"""
std = data.std()
data = data.astype(float)
data -= data.mean()
data /= std
print('-'*70)
print("Mean normalization")
print("mean value =\t\t",data.mean())
print("std value =\t\t",data.std())
return data
# DATA PREPARATION
def getMargin(patch_size):
"""
Return patch margin size from a center point.
"""
if (patch_size%2 == 0):
raise ValueError("patch_size should be odd!")
else:
margin = (patch_size-1) // 2
return margin
def patchCentered(data, pos_x, pos_y, patch_size):
"""
Patch input data of defined size centered at (pos_x, pos_y)
coordinates and return it in HWC.
"""
margin = (patch_size-1) // 2
x_top = pos_x - margin
x_bottom = pos_x + margin+1
y_left = pos_y - margin
y_right = pos_y + margin+1
patch = data[x_top:x_bottom, y_left:y_right, :]
return patch
# tr, vl, ev coords
def generateCoordsList(image, label, patch_size):
"""
Form lists of coordinates for each of the classes and stores
them in another one list.
"""
h, w, d, cl_num = getDatasetProperty(image, label)
m = getMargin(patch_size)
coords = []
for cl in range(cl_num):
coords.append([])
for x in range(m, h - m):
for y in range(m, w - m):
curr_tar = label[x,y]
if curr_tar > 0:
coords[curr_tar-1].append([x,y])
return coords
def printCoordsListInfo(coords):
"""
Outputs information about dataset based on the list of coordinates.
"""
print('-'*70)
print('\t\t\tlen')
cl_num = len(coords)
for cl in range(cl_num):
cur_coords = coords[cl]
print("Class "+str(cl+1).zfill(2)+"\t\t"+str(len(cur_coords)))
def splitCoordsListByFrac(coords, vl_frac, ev_frac):
"""
Splits labeled part of image into train, validation and evaluation subsets
based on 'vl_frac' and 'ev_frac'
"""
cl_num = len(coords)
tr_coords, vl_coords, ev_coords = [], [], []
for cl in range(cl_num):
cur_coords = coords[cl]
cur_population = len(cur_coords)
vl_split_size = int(cur_population*vl_frac)
ev_split_size = int(cur_population*ev_frac)
tr_split_size = cur_population - vl_split_size - ev_split_size
random.shuffle(cur_coords)
tr_coords.append(cur_coords[:tr_split_size])
vl_coords.append(cur_coords[tr_split_size:tr_split_size+vl_split_size])
ev_coords.append(cur_coords[tr_split_size+vl_split_size:])
return tr_coords, vl_coords, ev_coords
def splitCoordsByEvCount(image, label, coords, tr_frac, ev_count, patch_size):
"""
Splits labeled part of image into train, validation and evaluation subsets.
First forms evaluation subset with 'ev_count' coordinates for each class
and guarantees that there would be no overlapping within evaluation patches.
After that forms training and validation subsets of coordinates based on
'tr_frac'. Based on them training and validation patches can intersect with
each other.
"""
h, w, d, cl_num = getDatasetProperty(image, label)
m = getMargin(patch_size)
# gt image
# mat = plt.imshow(label,
# cmap=mcolors.ListedColormap(COLORS_D17),
# vmin = 0-.5,
# vmax = len(COLORS_D17)-1+.5,
# alpha=1)
# cax = plt.colorbar(mat, ticks=np.unique(label))
# plt.show()
ev_map = np.zeros((h, w), dtype=np.uint8)
ev_map_with_margin = np.zeros((h, w), dtype=np.uint8)
temp_map = np.zeros((h, w), dtype=np.uint8)
# extract count evaluation points
ev_coords = []
for cl in range(cl_num):
ev_coords.append([])
counter = 0
cur_cl_coors = coords[cl]
range_index = (0, len(cur_cl_coors))
while counter < ev_count:
temp_map = temp_map*0
index = random.randrange(*range_index)
x, y = cur_cl_coors[index]
temp_map[x-m:x+m+1, y-m:y+m+1] = 1
if np.sum(ev_map*temp_map) == 0:
ev_coords[cl].append([x, y])
ev_map[x-m:x+m+1, y-m:y+m+1] = 1
ev_map_with_margin[x-2*m:x+2*m+1, y-2*m:y+2*m+1] = 1
counter += 1
# plt.imshow(ev_map,
# cmap=mcolors.ListedColormap(["white", "black"]),
# alpha=0.5)
# plt.show()
ev_map_with_margin_inv = util.invert(ev_map_with_margin.astype(float))
tr_vl_label = label*ev_map_with_margin_inv.astype(int)
tr_vl_coords = []
for i in range(cl_num):
tr_vl_coords.append([])
for x in range(m, h - m):
for y in range(m, w - m):
curr_tar = tr_vl_label[x,y]
if curr_tar > 0:
tr_vl_coords[curr_tar-1].append([x,y])
tr_coords, vl_coords = [], []
for cl in range(cl_num):
cur_coords = tr_vl_coords[cl]
cur_population = len(tr_vl_coords[cl])
tr_split_size = int(cur_population*tr_frac)
random.shuffle(cur_coords)
tr_coords.append(cur_coords[:tr_split_size])
vl_coords.append(cur_coords[tr_split_size:])
return tr_coords, vl_coords, ev_coords
def printSplitInfo(tr_coords, vl_coords, ev_coords):
"""
Prints split information for each class.
"""
cl_num = len(tr_coords)
print('-'*70)
print('\t\t\tlen(tr)\t\tlen(vl)\t\tlen(ev)\t\tlen(sum)')
for cl in range(cl_num):
print("Class%s \t\t %s \t\t %s \t\t %s \t\t"% (\
str(cl+1).zfill(2),
str(len(tr_coords[cl])).zfill(5),
str(len(vl_coords[cl])).zfill(5),
str(len(ev_coords[cl])).zfill(5)))
# print(str(len(tr_coords[cl]+len(vl_coords[cl])))); quit()
def formArrayFromCoordsList(height, width, coords):
"""
Generates ground truth array based on coordinate list.
"""
array = np.zeros((height, width), dtype=np.uint8)
cl_num = len(coords)
for cl in range(cl_num):
current_coords = coords[cl]
count = len(coords[cl])
for i in range(count):
x = current_coords[i][0]
y = current_coords[i][1]
array[x,y] = cl + 1
return array
def splitChecker(height, width, tr_coords, vl_coords, ev_coords):
"""
Checks whether there is some intersection between train, validation and
evaluation coordinate lists. In case if there is one raises ValueError.
"""
tr_arr = formArrayFromCoordsList(height, width, tr_coords)
vl_arr = formArrayFromCoordsList(height, width, vl_coords)
ev_arr = formArrayFromCoordsList(height, width, ev_coords)
mul = tr_arr*vl_arr*ev_arr
if np.sum(mul) > 0:
raise ValueError('Something wrong with splitting. Intersection detected')
def saveCoords(coords_file, tr_coords, vl_coords, ev_coords):
"""
Saves training, validation and evaluation coordinate lists into
mat file. Dataset are saved as class 'cell'.
"""
dictionary = {}
len_tr_coords = len(tr_coords)
tr_obj_array = np.zeros((len_tr_coords,), dtype=np.object)
for cl in range(len_tr_coords):
tr_obj_array[cl] = tr_coords[cl]
dictionary["tr_coords"] = tr_obj_array
len_vl_coords = len(vl_coords)
vl_obj_array = np.zeros((len_vl_coords,), dtype=np.object)
for cl in range(len_vl_coords):
vl_obj_array[cl] = vl_coords[cl]
dictionary["vl_coords"] = vl_obj_array
len_ev_coords = len(ev_coords)
ev_obj_array = np.zeros((len_ev_coords,), dtype=np.object)
for cl in range(len_ev_coords):
ev_obj_array[cl] = ev_coords[cl]
dictionary["ev_coords"] = ev_obj_array
scipy.io.savemat(coords_file, dictionary)
print('-'*70)
print('Train, Validation and Evaluation coordinates are saved')
print(coords_file)
def loadCoords(coords_file):
"""
Loads train, validation and evaluation coordinate lists from mat
file. Index '0' is added in order to load the content of the cells.
"""
coords = scipy.io.loadmat(coords_file)
tr_coords = coords['tr_coords'][0]
vl_coords = coords['vl_coords'][0]
ev_coords = coords['ev_coords'][0]
print('-'*70)
print('Train, Validation and Evaluation coordinates are loaded')
print(coords_file)
return tr_coords, vl_coords, ev_coords
# tr, vl, ev patches
def loadPatches(image, patch_size, tr_coords, vl_coords, ev_coords):
'''
Loads centered patches based on train, validation and evaluation
coordinate lists.
'''
tr_patches, vl_patches, ev_patches = [], [], []
for cl in range(len(tr_coords)):
tr_patches.append([])
cur_cl_coords = tr_coords[cl]
cur_cl_patches = tr_patches[cl]
for i in range(len(cur_cl_coords)):
x, y = cur_cl_coords[i]
cur_cl_patches.append(patchCentered(image, x, y, patch_size))
for cl in range(len(vl_coords)):
vl_patches.append([])
cur_cl_coords = vl_coords[cl]
cur_cl_patches = vl_patches[cl]
for i in range(len(cur_cl_coords)):
x, y = cur_cl_coords[i]
cur_cl_patches.append(patchCentered(image, x, y, patch_size))
for cl in range(len(ev_coords)):
ev_patches.append([])
cur_cl_coords = ev_coords[cl]
cur_cl_patches = ev_patches[cl]
for i in range(len(cur_cl_coords)):
x, y = cur_cl_coords[i]
cur_cl_patches.append(patchCentered(image, x, y, patch_size))
return tr_patches, vl_patches, ev_patches
def simpleAugmentation(patch_list):
"""
Performs simple augmentation (90, 180 and 270 degrees) and shuffles
new augmented lists.
Uses np.rot90 since it is faster that skimage.transform.rotate.
"""
cl_num = len(patch_list)
augmented = []
for cl in range(cl_num):
augmented.append([])
cur_cl_patches = patch_list[cl]
for patch in cur_cl_patches:
augmented[cl].append(patch)
augmented[cl].append(np.rot90(patch, k=1, axes=(1,2)))
augmented[cl].append(np.rot90(patch, k=2, axes=(1,2)))
augmented[cl].append(np.rot90(patch, k=3, axes=(1,2)))
random.shuffle(augmented[cl])
return augmented
def plotArray(array, cl_num, color_list, colorbar=True):
"""
Plot array as a ground truth image. If needed include colorbar.
"""
plt.figure(figsize=(7, 12))
mat = plt.imshow(array,
cmap=mcolors.ListedColormap(color_list),
vmin = 0-.5,
vmax = len(color_list)-1+.5,
alpha=1)
if colorbar:
cax = plt.colorbar(mat, ticks=[i for i in range(cl_num+1)])
plt.show()
# patch, batch generation
def getRandPatch(patch_list, order):
"""
Returns randomly choosen patch and corresponding ground truth value.
'NHWC' order for 2DCNN, where C is the depth of input patch.
'NDHWC' order for 3DCNN, where D is the depth of input patch.
"""
cl_num = len(patch_list)
cl = random.randint(0, cl_num-1)
idx = random.randint(0, len(patch_list[cl])-1)
if order == 'NHWC':
patch = patch_list[cl][idx]
elif order == 'NDHWC':
# HWC -> DHW -> DHW1
patch = patch_list[cl][idx]
patch = np.transpose(patch,(2,0,1))
patch = np.expand_dims(patch, axis=3)
target = cl
patch = np.expand_dims(patch, 0)
target = np.expand_dims(target, 0)
return patch, target
def getRandBatch(patch_list, batch_size, order):
"""
Randomly forms batch and corresponding ground truth list.
"""
image_batch, target_batch = getRandPatch(patch_list, order=order)
for i in range(batch_size-1):
image, target = getRandPatch(patch_list, order = order)
image_batch = np.concatenate((image_batch, image), axis=0)
target_batch = np.concatenate((target_batch, target), axis=0)
return image_batch, target_batch
# evaluation
def getPredictionAcc(batch_prediction, targets):
"""
Calculate accuracy value for the batch of predictions.
"""
batch_size = batch_prediction.shape[0]
logits = np.argmax(batch_prediction, axis=1)
num_correct = np.sum(np.equal(logits, targets))
acc = 100. * (num_correct/batch_size)
return acc
# placeholders
def declarePlaceholder2D(patch_size, depth, order='NHWC'):
"""
Declare image and target placeholders for 2D CNN. Input image placeholder
is in NHWC order. Target image placeholder is of size B, since only one
label can be assigned in classification taks.
"""
if order == 'NHWC':
x_input_shape = (None, patch_size, patch_size, depth)
x_input = tf.placeholder(tf.float32,
shape = x_input_shape,
name = 'input_image')
Y_target = tf.placeholder(tf.int32,
shape = [None],
name = 'target_label')
return x_input, Y_target
def declarePlaceholder3D(patch_size, depth, order='NDHWC'):
"""
Declare image and target placeholders for 3D CNN. Input image placeholder
is in NDHWC order. Target image placeholder is of size N, since only one
label can be assigned in classification taks.
"""
if order == 'NDHWC':
x_input_shape = (None, depth, patch_size, patch_size, 1)
x_input = tf.placeholder(tf.float32,
shape=x_input_shape,
name = 'input_image')
Y_target = tf.placeholder(tf.int32,
shape = [None],
name = 'target_label')
return x_input, Y_target
| [
"matplotlib.pyplot.show",
"random.randint",
"numpy.sum",
"numpy.argmax",
"os.path.join",
"random.shuffle",
"numpy.zeros",
"numpy.expand_dims",
"numpy.transpose",
"numpy.equal",
"tensorflow.placeholder",
"matplotlib.pyplot.figure",
"numpy.rot90",
"random.randrange",
"matplotlib.colors.Lis... | [((8688, 8720), 'numpy.zeros', 'np.zeros', (['(h, w)'], {'dtype': 'np.uint8'}), '((h, w), dtype=np.uint8)\n', (8696, 8720), True, 'import numpy as np\n'), ((8746, 8778), 'numpy.zeros', 'np.zeros', (['(h, w)'], {'dtype': 'np.uint8'}), '((h, w), dtype=np.uint8)\n', (8754, 8778), True, 'import numpy as np\n'), ((8794, 8826), 'numpy.zeros', 'np.zeros', (['(h, w)'], {'dtype': 'np.uint8'}), '((h, w), dtype=np.uint8)\n', (8802, 8826), True, 'import numpy as np\n'), ((11134, 11175), 'numpy.zeros', 'np.zeros', (['(height, width)'], {'dtype': 'np.uint8'}), '((height, width), dtype=np.uint8)\n', (11142, 11175), True, 'import numpy as np\n'), ((12296, 12339), 'numpy.zeros', 'np.zeros', (['(len_tr_coords,)'], {'dtype': 'np.object'}), '((len_tr_coords,), dtype=np.object)\n', (12304, 12339), True, 'import numpy as np\n'), ((12515, 12558), 'numpy.zeros', 'np.zeros', (['(len_vl_coords,)'], {'dtype': 'np.object'}), '((len_vl_coords,), dtype=np.object)\n', (12523, 12558), True, 'import numpy as np\n'), ((12734, 12777), 'numpy.zeros', 'np.zeros', (['(len_ev_coords,)'], {'dtype': 'np.object'}), '((len_ev_coords,), dtype=np.object)\n', (12742, 12777), True, 'import numpy as np\n'), ((15619, 15646), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 12)'}), '(figsize=(7, 12))\n', (15629, 15646), True, 'import matplotlib.pyplot as plt\n'), ((15938, 15948), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15946, 15948), True, 'import matplotlib.pyplot as plt\n'), ((16272, 16301), 'random.randint', 'random.randint', (['(0)', '(cl_num - 1)'], {}), '(0, cl_num - 1)\n', (16286, 16301), False, 'import random\n'), ((16628, 16652), 'numpy.expand_dims', 'np.expand_dims', (['patch', '(0)'], {}), '(patch, 0)\n', (16642, 16652), True, 'import numpy as np\n'), ((16666, 16691), 'numpy.expand_dims', 'np.expand_dims', (['target', '(0)'], {}), '(target, 0)\n', (16680, 16691), True, 'import numpy as np\n'), ((17383, 17418), 'numpy.argmax', 'np.argmax', (['batch_prediction'], {'axis': '(1)'}), '(batch_prediction, axis=1)\n', (17392, 17418), True, 'import numpy as np\n'), ((17930, 17997), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'x_input_shape', 'name': '"""input_image"""'}), "(tf.float32, shape=x_input_shape, name='input_image')\n", (17944, 17997), True, 'import tensorflow as tf\n'), ((18034, 18093), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None]', 'name': '"""target_label"""'}), "(tf.int32, shape=[None], name='target_label')\n", (18048, 18093), True, 'import tensorflow as tf\n'), ((18529, 18596), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': 'x_input_shape', 'name': '"""input_image"""'}), "(tf.float32, shape=x_input_shape, name='input_image')\n", (18543, 18596), True, 'import tensorflow as tf\n'), ((18631, 18690), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None]', 'name': '"""target_label"""'}), "(tf.int32, shape=[None], name='target_label')\n", (18645, 18690), True, 'import tensorflow as tf\n'), ((7505, 7531), 'random.shuffle', 'random.shuffle', (['cur_coords'], {}), '(cur_coords)\n', (7519, 7531), False, 'import random\n'), ((10250, 10276), 'random.shuffle', 'random.shuffle', (['cur_coords'], {}), '(cur_coords)\n', (10264, 10276), False, 'import random\n'), ((11920, 11931), 'numpy.sum', 'np.sum', (['mul'], {}), '(mul)\n', (11926, 11931), True, 'import numpy as np\n'), ((15422, 15451), 'random.shuffle', 'random.shuffle', (['augmented[cl]'], {}), '(augmented[cl])\n', (15436, 15451), False, 'import random\n'), ((17035, 17079), 'numpy.concatenate', 'np.concatenate', (['(image_batch, image)'], {'axis': '(0)'}), '((image_batch, image), axis=0)\n', (17049, 17079), True, 'import numpy as np\n'), ((17103, 17149), 'numpy.concatenate', 'np.concatenate', (['(target_batch, target)'], {'axis': '(0)'}), '((target_batch, target), axis=0)\n', (17117, 17149), True, 'import numpy as np\n'), ((17444, 17469), 'numpy.equal', 'np.equal', (['logits', 'targets'], {}), '(logits, targets)\n', (17452, 17469), True, 'import numpy as np\n'), ((9134, 9164), 'random.randrange', 'random.randrange', (['*range_index'], {}), '(*range_index)\n', (9150, 9164), False, 'import random\n'), ((15700, 15734), 'matplotlib.colors.ListedColormap', 'mcolors.ListedColormap', (['color_list'], {}), '(color_list)\n', (15722, 15734), True, 'from matplotlib import colors as mcolors\n'), ((16524, 16554), 'numpy.transpose', 'np.transpose', (['patch', '(2, 0, 1)'], {}), '(patch, (2, 0, 1))\n', (16536, 16554), True, 'import numpy as np\n'), ((16568, 16597), 'numpy.expand_dims', 'np.expand_dims', (['patch'], {'axis': '(3)'}), '(patch, axis=3)\n', (16582, 16597), True, 'import numpy as np\n'), ((539, 579), 'os.path.join', 'os.path.join', (['dataset_dir', '"""Salinas.mat"""'], {}), "(dataset_dir, 'Salinas.mat')\n", (551, 579), False, 'import os\n'), ((639, 682), 'os.path.join', 'os.path.join', (['dataset_dir', '"""Salinas_gt.mat"""'], {}), "(dataset_dir, 'Salinas_gt.mat')\n", (651, 682), False, 'import os\n'), ((9266, 9291), 'numpy.sum', 'np.sum', (['(ev_map * temp_map)'], {}), '(ev_map * temp_map)\n', (9272, 9291), True, 'import numpy as np\n'), ((15246, 15279), 'numpy.rot90', 'np.rot90', (['patch'], {'k': '(1)', 'axes': '(1, 2)'}), '(patch, k=1, axes=(1, 2))\n', (15254, 15279), True, 'import numpy as np\n'), ((15313, 15346), 'numpy.rot90', 'np.rot90', (['patch'], {'k': '(2)', 'axes': '(1, 2)'}), '(patch, k=2, axes=(1, 2))\n', (15321, 15346), True, 'import numpy as np\n'), ((15380, 15413), 'numpy.rot90', 'np.rot90', (['patch'], {'k': '(3)', 'axes': '(1, 2)'}), '(patch, k=3, axes=(1, 2))\n', (15388, 15413), True, 'import numpy as np\n'), ((822, 872), 'os.path.join', 'os.path.join', (['dataset_dir', '"""Salinas_corrected.mat"""'], {}), "(dataset_dir, 'Salinas_corrected.mat')\n", (834, 872), False, 'import os\n'), ((942, 985), 'os.path.join', 'os.path.join', (['dataset_dir', '"""Salinas_gt.mat"""'], {}), "(dataset_dir, 'Salinas_gt.mat')\n", (954, 985), False, 'import os\n'), ((1138, 1188), 'os.path.join', 'os.path.join', (['dataset_dir', '"""Salinas_corrected.mat"""'], {}), "(dataset_dir, 'Salinas_corrected.mat')\n", (1150, 1188), False, 'import os\n'), ((1258, 1305), 'os.path.join', 'os.path.join', (['dataset_dir', '"""Salinas_gt_815.mat"""'], {}), "(dataset_dir, 'Salinas_gt_815.mat')\n", (1270, 1305), False, 'import os\n'), ((1467, 1521), 'os.path.join', 'os.path.join', (['dataset_dir', '"""Salinas_corrected_pca.mat"""'], {}), "(dataset_dir, 'Salinas_corrected_pca.mat')\n", (1479, 1521), False, 'import os\n'), ((1595, 1638), 'os.path.join', 'os.path.join', (['dataset_dir', '"""Salinas_gt.mat"""'], {}), "(dataset_dir, 'Salinas_gt.mat')\n", (1607, 1638), False, 'import os\n'), ((1792, 1837), 'os.path.join', 'os.path.join', (['dataset_dir', '"""Indian_pines.mat"""'], {}), "(dataset_dir, 'Indian_pines.mat')\n", (1804, 1837), False, 'import os\n'), ((1902, 1950), 'os.path.join', 'os.path.join', (['dataset_dir', '"""Indian_pines_gt.mat"""'], {}), "(dataset_dir, 'Indian_pines_gt.mat')\n", (1914, 1950), False, 'import os\n'), ((2100, 2155), 'os.path.join', 'os.path.join', (['dataset_dir', '"""Indian_pines_corrected.mat"""'], {}), "(dataset_dir, 'Indian_pines_corrected.mat')\n", (2112, 2155), False, 'import os\n'), ((2230, 2278), 'os.path.join', 'os.path.join', (['dataset_dir', '"""Indian_pines_gt.mat"""'], {}), "(dataset_dir, 'Indian_pines_gt.mat')\n", (2242, 2278), False, 'import os\n'), ((2442, 2501), 'os.path.join', 'os.path.join', (['dataset_dir', '"""Indian_pines_corrected_pca.mat"""'], {}), "(dataset_dir, 'Indian_pines_corrected_pca.mat')\n", (2454, 2501), False, 'import os\n'), ((2580, 2628), 'os.path.join', 'os.path.join', (['dataset_dir', '"""Indian_pines_gt.mat"""'], {}), "(dataset_dir, 'Indian_pines_gt.mat')\n", (2592, 2628), False, 'import os\n'), ((2792, 2831), 'os.path.join', 'os.path.join', (['dataset_dir', '"""PaviaU.mat"""'], {}), "(dataset_dir, 'PaviaU.mat')\n", (2804, 2831), False, 'import os\n'), ((2890, 2932), 'os.path.join', 'os.path.join', (['dataset_dir', '"""PaviaU_gt.mat"""'], {}), "(dataset_dir, 'PaviaU_gt.mat')\n", (2902, 2932), False, 'import os\n'), ((3079, 3117), 'os.path.join', 'os.path.join', (['dataset_dir', '"""Pavia.mat"""'], {}), "(dataset_dir, 'Pavia.mat')\n", (3091, 3117), False, 'import os\n'), ((3175, 3216), 'os.path.join', 'os.path.join', (['dataset_dir', '"""Pavia_gt.mat"""'], {}), "(dataset_dir, 'Pavia_gt.mat')\n", (3187, 3216), False, 'import os\n'), ((3359, 3395), 'os.path.join', 'os.path.join', (['dataset_dir', '"""KSC.mat"""'], {}), "(dataset_dir, 'KSC.mat')\n", (3371, 3395), False, 'import os\n'), ((3451, 3490), 'os.path.join', 'os.path.join', (['dataset_dir', '"""KSC_gt.mat"""'], {}), "(dataset_dir, 'KSC_gt.mat')\n", (3463, 3490), False, 'import os\n'), ((3637, 3678), 'os.path.join', 'os.path.join', (['dataset_dir', '"""Botswana.mat"""'], {}), "(dataset_dir, 'Botswana.mat')\n", (3649, 3678), False, 'import os\n'), ((3739, 3783), 'os.path.join', 'os.path.join', (['dataset_dir', '"""Botswana_gt.mat"""'], {}), "(dataset_dir, 'Botswana_gt.mat')\n", (3751, 3783), False, 'import os\n')] |
# pyrates imports
from pyrates.frontend import EdgeTemplate, CircuitTemplate
from pyrates.backend import ComputeGraph
from pyrates.ir import CircuitIR
# additional imports
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter1d
# general parameters
dt = 1e-1 # integration step size in s
dts = 1.0 # variable storage sub-sampling step size in s
sub = int(dts/dt) # sub-sampling rate
T = 8000 # total simulation time in ms
delay = 1000
reg_dur = 3500
dur = 50.0
ramp = 15.0
# population numbers
N = 250
N_c = 50
N_b = N - N_c
# connection probabilities
p_ee_cc = 0.1
p_ee_bb = 0.1
p_ee_bc = 0.05
p_ie_cc = 0.2
p_ie_bb = 0.2
# connection strengths
k_e = 10.0
k_i = 1.0
# connectivity matrices
C_ee_cc = np.random.randn(N_c, N_c)
C_ee_cc[np.eye(N_c, dtype=np.int32)] = 0
c_sorted = np.sort(C_ee_cc.flatten())
threshold = c_sorted[int(p_ee_cc*len(c_sorted))]
C_ee_cc[C_ee_cc < threshold] = 0.0
C_ee_cc[C_ee_cc >= threshold] = 1.0
C_ie_cc = np.random.randn(N_c, N_c)
C_ie_cc[np.eye(N_c, dtype=np.int32)] = 0
c_sorted = np.sort(C_ie_cc.flatten())
threshold = c_sorted[int(p_ie_cc*len(c_sorted))]
C_ie_cc[C_ie_cc < threshold] = 0.0
C_ie_cc[C_ie_cc >= threshold] = 1.0
C_ee_bb = np.random.randn(N_b, N_b)
C_ee_bb[np.eye(N_b, dtype=np.int32)] = 0
c_sorted = np.sort(C_ee_bb.flatten())
threshold = c_sorted[int(p_ee_bb*len(c_sorted))]
C_ee_bb[C_ee_bb < threshold] = 0.0
C_ee_bb[C_ee_bb >= threshold] = 1.0
C_ie_bb = np.random.randn(N_b, N_b)
C_ie_bb[np.eye(N_b, dtype=np.int32)] = 0
c_sorted = np.sort(C_ie_bb.flatten())
threshold = c_sorted[int(p_ie_bb*len(c_sorted))]
C_ie_bb[C_ie_bb < threshold] = 0.0
C_ie_bb[C_ie_bb >= threshold] = 1.0
C_ee_bc = np.random.randn(N, N)
C_ee_bc[0:N_c, 0:N_c] = 0.0
C_ee_bc[N_c:, N_c:] = 0.0
C_ee_bc[np.eye(N, dtype=np.int32)] = 0
c_sorted = np.sort(C_ee_bc.flatten())
threshold = c_sorted[int(p_ee_bc*len(c_sorted))]
C_ee_bc[C_ee_bc < threshold] = 0.0
C_ee_bc[C_ee_bc >= threshold] = 1.0
C_ee_bc[0:N_c, 0:N_c] = C_ee_cc
C_ee_bc[N_c:, N_c:] = C_ee_bb
C_ie_bc = np.zeros((N, N))
C_ie_bc[0:N_c, 0:N_c] = C_ie_cc
C_ie_bc[N_c:, N_c:] = C_ie_bb
for i in range(N):
for C, c_scale in zip([C_ee_bc, C_ie_bc], [k_e, k_i]):
c_max = np.max(C[i, :])
if c_max > 0:
C[i, :] /= np.sum(C[i, :])
C[i, :] *= c_scale
ms = [5]
results = []
for m in ms:
inp = np.zeros((int(T/dt), N), dtype='float32')
i = 0
inp_nodes = np.random.randint(0, N_c, m)
while (i+1)*dur < T:
if i*dur > delay:
sequential = i*dur < delay+reg_dur
i_tmp = i % m if sequential else int(np.random.uniform(0, m))
inp[int(((i+1)*dur+ramp)/dt):int(((i+2)*dur-ramp)/dt), inp_nodes[i_tmp]] = 1.0
i += 1
inp = gaussian_filter1d(inp, sigma=0.5*ramp/dt, axis=0)
# circuit setup
circuit = CircuitIR()
edge1 = EdgeTemplate.from_yaml("../config/wc_templates/EE_edge")
edge2 = EdgeTemplate.from_yaml("../config/wc_templates/EI_edge")
for idx in range(N):
circuit.add_circuit(f'wc_{idx}', CircuitIR.from_yaml("../config/wc_templates/WC"))
circuit.add_edges_from_matrix(source_var="E/E_op/m", target_var="E/E_op/I_e", template=edge1,
nodes=[f'wc_{idx}' for idx in range(N)], weight=C_ee_bc)
circuit.add_edges_from_matrix(source_var="E/E_op/m", target_var="I/I_op/I_e", template=edge2,
nodes=[f'wc_{idx}' for idx in range(N)], weight=C_ie_bc)
# circuit compilation and simulation
compute_graph = circuit.compile(vectorization=True, backend='numpy', name='wc_net', step_size=dt, solver='euler')
r, t = compute_graph.run(T,
inputs={"all/E/E_op/I_ext": inp},
outputs={"meg": "all/E/E_op/meg"},
sampling_step_size=dts,
profile=True,
verbose=True,
)
results.append(r)
# visualization
plt.figure()
for r in results:
r.mean(axis=1).plot()
plt.legend([f"m = {m}" for m in ms])
plt.show()
plt.savefig(f'EEG_reg_rand_avg.svg', dpi=600, format='svg')
| [
"pyrates.ir.CircuitIR",
"numpy.random.uniform",
"scipy.ndimage.filters.gaussian_filter1d",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.random.randn",
"matplotlib.pyplot.legend",
"numpy.zeros",
"pyrates.frontend.EdgeTemplate.from_yaml",
"pyrates.ir.CircuitIR.from_yaml",
"matplotlib.pyplot.figur... | [((892, 917), 'numpy.random.randn', 'np.random.randn', (['N_c', 'N_c'], {}), '(N_c, N_c)\n', (907, 917), True, 'import numpy as np\n'), ((1128, 1153), 'numpy.random.randn', 'np.random.randn', (['N_c', 'N_c'], {}), '(N_c, N_c)\n', (1143, 1153), True, 'import numpy as np\n'), ((1364, 1389), 'numpy.random.randn', 'np.random.randn', (['N_b', 'N_b'], {}), '(N_b, N_b)\n', (1379, 1389), True, 'import numpy as np\n'), ((1600, 1625), 'numpy.random.randn', 'np.random.randn', (['N_b', 'N_b'], {}), '(N_b, N_b)\n', (1615, 1625), True, 'import numpy as np\n'), ((1836, 1857), 'numpy.random.randn', 'np.random.randn', (['N', 'N'], {}), '(N, N)\n', (1851, 1857), True, 'import numpy as np\n'), ((2182, 2198), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (2190, 2198), True, 'import numpy as np\n'), ((4155, 4167), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4165, 4167), True, 'import matplotlib.pyplot as plt\n'), ((4212, 4248), 'matplotlib.pyplot.legend', 'plt.legend', (["[f'm = {m}' for m in ms]"], {}), "([f'm = {m}' for m in ms])\n", (4222, 4248), True, 'import matplotlib.pyplot as plt\n'), ((4249, 4259), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4257, 4259), True, 'import matplotlib.pyplot as plt\n'), ((4260, 4319), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""EEG_reg_rand_avg.svg"""'], {'dpi': '(600)', 'format': '"""svg"""'}), "(f'EEG_reg_rand_avg.svg', dpi=600, format='svg')\n", (4271, 4319), True, 'import matplotlib.pyplot as plt\n'), ((926, 953), 'numpy.eye', 'np.eye', (['N_c'], {'dtype': 'np.int32'}), '(N_c, dtype=np.int32)\n', (932, 953), True, 'import numpy as np\n'), ((1162, 1189), 'numpy.eye', 'np.eye', (['N_c'], {'dtype': 'np.int32'}), '(N_c, dtype=np.int32)\n', (1168, 1189), True, 'import numpy as np\n'), ((1398, 1425), 'numpy.eye', 'np.eye', (['N_b'], {'dtype': 'np.int32'}), '(N_b, dtype=np.int32)\n', (1404, 1425), True, 'import numpy as np\n'), ((1634, 1661), 'numpy.eye', 'np.eye', (['N_b'], {'dtype': 'np.int32'}), '(N_b, dtype=np.int32)\n', (1640, 1661), True, 'import numpy as np\n'), ((1920, 1945), 'numpy.eye', 'np.eye', (['N'], {'dtype': 'np.int32'}), '(N, dtype=np.int32)\n', (1926, 1945), True, 'import numpy as np\n'), ((2581, 2609), 'numpy.random.randint', 'np.random.randint', (['(0)', 'N_c', 'm'], {}), '(0, N_c, m)\n', (2598, 2609), True, 'import numpy as np\n'), ((2898, 2951), 'scipy.ndimage.filters.gaussian_filter1d', 'gaussian_filter1d', (['inp'], {'sigma': '(0.5 * ramp / dt)', 'axis': '(0)'}), '(inp, sigma=0.5 * ramp / dt, axis=0)\n', (2915, 2951), False, 'from scipy.ndimage.filters import gaussian_filter1d\n'), ((2983, 2994), 'pyrates.ir.CircuitIR', 'CircuitIR', ([], {}), '()\n', (2992, 2994), False, 'from pyrates.ir import CircuitIR\n'), ((3007, 3063), 'pyrates.frontend.EdgeTemplate.from_yaml', 'EdgeTemplate.from_yaml', (['"""../config/wc_templates/EE_edge"""'], {}), "('../config/wc_templates/EE_edge')\n", (3029, 3063), False, 'from pyrates.frontend import EdgeTemplate, CircuitTemplate\n'), ((3076, 3132), 'pyrates.frontend.EdgeTemplate.from_yaml', 'EdgeTemplate.from_yaml', (['"""../config/wc_templates/EI_edge"""'], {}), "('../config/wc_templates/EI_edge')\n", (3098, 3132), False, 'from pyrates.frontend import EdgeTemplate, CircuitTemplate\n'), ((2356, 2371), 'numpy.max', 'np.max', (['C[i, :]'], {}), '(C[i, :])\n', (2362, 2371), True, 'import numpy as np\n'), ((2417, 2432), 'numpy.sum', 'np.sum', (['C[i, :]'], {}), '(C[i, :])\n', (2423, 2432), True, 'import numpy as np\n'), ((3199, 3247), 'pyrates.ir.CircuitIR.from_yaml', 'CircuitIR.from_yaml', (['"""../config/wc_templates/WC"""'], {}), "('../config/wc_templates/WC')\n", (3218, 3247), False, 'from pyrates.ir import CircuitIR\n'), ((2757, 2780), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'm'], {}), '(0, m)\n', (2774, 2780), True, 'import numpy as np\n')] |
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import os, sys, getopt, pdb
from numpy import *
from numpy.linalg import *
from numpy.random import *
import pylab
namelist = ['Data_P1_RB1.pkl', 'Data_P1_RB2.pkl', 'Data_P1_RB3.pkl']
list_of_dfs = []
for name in namelist:
df = pd.read_pickle('MPIIMobileAttention/' + name)
list_of_dfs.append(df)
big_df = pd.concat(list_of_dfs)
big_df.to_pickle("JointPreProcessedPkls/Data_P1.pkl")
df = big_df.replace([np.inf, -np.inf], np.nan)
"""
df_num = df.select_dtypes(include=['number'])
df_bool = df.select_dtypes(include=['bool'])
df = pd.concat([df_num.mask(df_num >= 3E38, np.nan), df_bool], axis = 1)
"""
df = df.select_dtypes(include=['number', 'bool'])
df = df.mask(df >= 3E38, np.nan)
df = df.dropna(axis=1, how='all')
df.loc[:, df.ne(0).any()]
#df = df.dropna(axis=0)
df.fillna(df.mean())
phone_df = df.loc[:,(df.columns.str.startswith("phone_")) | (df.columns.str.startswith("app_")) |
(df.columns.str.startswith("touch_")) | (df.columns.str.startswith("gps_")) |
(df.columns.str.startswith("screen")) | (df.columns.str.startswith("disp")) |
(df.columns.str.startswith("whatsapp")) | (df.columns.str.startswith("temp")) |
(df.columns.str.startswith("distance_cam"))]
rgb_df1 = df.loc[:,(df.columns.str.startswith("objectclass_")) | (df.columns.str.startswith("objectness_")) |
(df.columns.str.startswith("saliency_")) | (df.columns.str.startswith("segmentationclass_"))]
rgb_df2 = df[['gaze_xundist', 'gaze_yundist', 'corner1_xundist', 'corner1_yundist',
'corner2_xundist', 'corner2_yundist', 'corner3_xundist', 'corner3_yundist', 'corner4_xundist',
'corner4_yundist', 'corner1_xundistext', 'corner1_yundistext', 'corner2_xundistext',
'corner2_yundistext','corner3_xundistext', 'corner3_yundistext', 'corner4_xundistext',
'corner4_yundistext','corner1ext_x', 'corner1ext_y', 'corner2ext_x', 'corner2ext_y',
'corner3ext_x', 'corner3ext_y','corner4ext_x', 'corner4ext_y', 'face_detections_world']]
rgb_df = pd.concat([rgb_df1, rgb_df2], axis=1)
#'object_seg', 'sem_seg',
headimu_df = df.loc[:,(df.columns.str.startswith("accelerometer_")) | (df.columns.str.startswith("gyro_")) |
(df.columns.str.startswith("mobilephone_in_scene_vid")) | (df.columns.str.startswith("corner1_x")) |
(df.columns.str.startswith("corner1_y")) | (df.columns.str.startswith("corner2_y")) |
(df.columns.str.startswith("corner2_x")) | (df.columns.str.startswith("corner3_x")) |
(df.columns.str.startswith("corner3_y")) | (df.columns.str.startswith("corner4_x")) |
(df.columns.str.startswith("corner4_y")) ]
depth_df = df.loc[:,df.columns.str.startswith("depth_")]
gaze_df = df[['depth', 'pupil_x', 'pupil_y','gaze_x', 'gaze_y', 'diameter', 'major', 'minor', 'angle']]
# ,'fix_dispersions', 'fix_durations', 'fix_centroids_y', 'fix_centroids_x',
# 'fix_centroidsext_x', 'fix_centroidsext_y,''saliency', 'objectness',
egocentric_df = pd.concat([headimu_df, pd.concat([rgb_df, depth_df], axis = 1)], axis = 1) # the egocentric sensors if we want to revisit as the paper did
X = pd.concat([headimu_df, pd.concat([rgb_df, pd.concat([depth_df, pd.concat([phone_df, gaze_df], axis = 1)], axis = 1)], axis = 1)], axis = 1)
# X = pd.get_dummies(X)
X= np.array(X).astype(np.float32)
# X = X[X < 1E308]
y = np.array(df['gaze_on_screen'])
print('X: \n', X.dtype, X)
print('y: \n',y.dtype, y)
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.25, random_state=0)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
print('X_train: \n', X_train.dtype, X_train)
print('X_test: \n', X_test.dtype, X_test)
X_train = X_train.astype(np.float32)
X_test = X_test.astype(np.float32)
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
from sklearn.ensemble import RandomForestClassifier
#Create a Gaussian Classifier
clf=RandomForestClassifier(n_estimators=75)
#Train the model using the training sets y_pred=clf.predict(X_test)
clf.fit(X_train,y_train)
# METRICS
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import average_precision_score
probs = clf.predict_proba(X_test)
gaze_on_probs = probs[:,1]
fpr, tpr, thresholds = roc_curve(y_test, gaze_on_probs)
roc_auc = auc(fpr, tpr)
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'y', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
y_pred = clf.predict(X_test)
pre_score = precision_score(y_test, y_pred)
re_score= recall_score(y_test, y_pred)
f1_score = f1_score(y_test, y_pred)
print('F1 score: ', f1_score)
"""
precision, recall, threshold = precision_recall_curve(y_test, y_pred)
average_precision = average_precision_score(y_test, y_pred)
step_kwargs = ({'step': 'post'} if 'step' in signature(plt.fill_between).parameters else {})
plt.step(recall, precision, color='r', alpha=0.2, where='post')
plt.fill_between(recall, precision, alpha=0.2, color='r', **step_kwargs)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.0])
plt.xlim([0.0, 1.0])
plt.title('2-class Precision-Recall curve: AP={0:0.2f}'.format(average_precision))
"""
from proximityMatrix import proximityMatrix
proxMat = proximityMatrix(clf, X_train)
print("proxMat: \n", proxMat)
from sklearn.manifold import TSNE
embeddedModel = TSNE(n_components=2).fit_transform(proxMat)
print(embeddedModel.shape)
embeddedModel_ = TSNE(n_components=6).fit_transform(proxMat)
print(embeddedModel_.shape)
from mds import mds, square_points
Y, eigs = mds(proxMat)
pylab.figure(1)
pylab.plot(Y[:,0],Y[:,1],'.')
pylab.figure(2)
pylab.plot(points[:,0], points[:,1], '.')
pylab.show()
| [
"matplotlib.pyplot.title",
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.f1_score",
"pylab.figure",
"proximityMatrix.proximityMatrix",
"pandas.concat",
"sklearn.ensemble.RandomForestClassifier",
"matplotlib.pyplot.show",
"matplotlib.pyplot.yli... | [((393, 415), 'pandas.concat', 'pd.concat', (['list_of_dfs'], {}), '(list_of_dfs)\n', (402, 415), True, 'import pandas as pd\n'), ((2171, 2208), 'pandas.concat', 'pd.concat', (['[rgb_df1, rgb_df2]'], {'axis': '(1)'}), '([rgb_df1, rgb_df2], axis=1)\n', (2180, 2208), True, 'import pandas as pd\n'), ((3541, 3571), 'numpy.array', 'np.array', (["df['gaze_on_screen']"], {}), "(df['gaze_on_screen'])\n", (3549, 3571), True, 'import numpy as np\n'), ((3713, 3767), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.25)', 'random_state': '(0)'}), '(X, y, test_size=0.25, random_state=0)\n', (3729, 3767), False, 'from sklearn.model_selection import train_test_split\n'), ((3826, 3842), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3840, 3842), False, 'from sklearn.preprocessing import StandardScaler\n'), ((4167, 4206), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(75)'}), '(n_estimators=75)\n', (4189, 4206), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((4692, 4724), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'gaze_on_probs'], {}), '(y_test, gaze_on_probs)\n', (4701, 4724), False, 'from sklearn.metrics import roc_curve\n'), ((4735, 4748), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (4738, 4748), False, 'from sklearn.metrics import auc\n'), ((4749, 4795), 'matplotlib.pyplot.title', 'plt.title', (['"""Receiver Operating Characteristic"""'], {}), "('Receiver Operating Characteristic')\n", (4758, 4795), True, 'from matplotlib import pyplot as plt\n'), ((4796, 4850), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr', '"""y"""'], {'label': "('AUC = %0.2f' % roc_auc)"}), "(fpr, tpr, 'y', label='AUC = %0.2f' % roc_auc)\n", (4804, 4850), True, 'from matplotlib import pyplot as plt\n'), ((4853, 4882), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (4863, 4882), True, 'from matplotlib import pyplot as plt\n'), ((4885, 4916), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""r--"""'], {}), "([0, 1], [0, 1], 'r--')\n", (4893, 4916), True, 'from matplotlib import pyplot as plt\n'), ((4916, 4932), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1]'], {}), '([0, 1])\n', (4924, 4932), True, 'from matplotlib import pyplot as plt\n'), ((4933, 4949), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (4941, 4949), True, 'from matplotlib import pyplot as plt\n'), ((4950, 4982), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (4960, 4982), True, 'from matplotlib import pyplot as plt\n'), ((4983, 5016), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (4993, 5016), True, 'from matplotlib import pyplot as plt\n'), ((5017, 5027), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5025, 5027), True, 'from matplotlib import pyplot as plt\n'), ((5070, 5101), 'sklearn.metrics.precision_score', 'precision_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (5085, 5101), False, 'from sklearn.metrics import precision_score\n'), ((5112, 5140), 'sklearn.metrics.recall_score', 'recall_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (5124, 5140), False, 'from sklearn.metrics import recall_score\n'), ((5152, 5176), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (5160, 5176), False, 'from sklearn.metrics import f1_score\n'), ((5802, 5831), 'proximityMatrix.proximityMatrix', 'proximityMatrix', (['clf', 'X_train'], {}), '(clf, X_train)\n', (5817, 5831), False, 'from proximityMatrix import proximityMatrix\n'), ((6127, 6139), 'mds.mds', 'mds', (['proxMat'], {}), '(proxMat)\n', (6130, 6139), False, 'from mds import mds, square_points\n'), ((6141, 6156), 'pylab.figure', 'pylab.figure', (['(1)'], {}), '(1)\n', (6153, 6156), False, 'import pylab\n'), ((6157, 6190), 'pylab.plot', 'pylab.plot', (['Y[:, 0]', 'Y[:, 1]', '"""."""'], {}), "(Y[:, 0], Y[:, 1], '.')\n", (6167, 6190), False, 'import pylab\n'), ((6188, 6203), 'pylab.figure', 'pylab.figure', (['(2)'], {}), '(2)\n', (6200, 6203), False, 'import pylab\n'), ((6204, 6247), 'pylab.plot', 'pylab.plot', (['points[:, 0]', 'points[:, 1]', '"""."""'], {}), "(points[:, 0], points[:, 1], '.')\n", (6214, 6247), False, 'import pylab\n'), ((6247, 6259), 'pylab.show', 'pylab.show', ([], {}), '()\n', (6257, 6259), False, 'import pylab\n'), ((310, 355), 'pandas.read_pickle', 'pd.read_pickle', (["('MPIIMobileAttention/' + name)"], {}), "('MPIIMobileAttention/' + name)\n", (324, 355), True, 'import pandas as pd\n'), ((3198, 3235), 'pandas.concat', 'pd.concat', (['[rgb_df, depth_df]'], {'axis': '(1)'}), '([rgb_df, depth_df], axis=1)\n', (3207, 3235), True, 'import pandas as pd\n'), ((3486, 3497), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (3494, 3497), True, 'import numpy as np\n'), ((5916, 5936), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)'}), '(n_components=2)\n', (5920, 5936), False, 'from sklearn.manifold import TSNE\n'), ((6006, 6026), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(6)'}), '(n_components=6)\n', (6010, 6026), False, 'from sklearn.manifold import TSNE\n'), ((3382, 3420), 'pandas.concat', 'pd.concat', (['[phone_df, gaze_df]'], {'axis': '(1)'}), '([phone_df, gaze_df], axis=1)\n', (3391, 3420), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
import os, random
import numpy as np
from sklearn.utils import shuffle
from keras.preprocessing.image import ImageDataGenerator, NumpyArrayIterator
from skimage import transform
from keras_preprocessing.image.affine_transformations import apply_affine_transform
import sys
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from utils.utils import rgb2gray
class BatchGenerator:
"""Create a generator thay yelds batches of images B = P * K where P - number of persons (unique class),
K - number of examples per class. Classes are selected randomly and can be repeated from batch to batch.
"""
def __init__(
self,
images,
classes,
aug_gen=None,
p=20,
k=3,
seed=None,
equal_k=True,
to_gray=False,
n_poses=1,
dupl_labels=False,
rotate_poses=False,
flatten_batch=True,
perspective=False,
):
self.seed = seed
self.aug_gen = aug_gen
self.equal_k = equal_k
self.to_gray = to_gray
self.n_poses = n_poses
self.dupl_labels = dupl_labels
self.rotate_poses = rotate_poses
self.flatten_batch = flatten_batch
self.perspective = perspective
if self.dupl_labels:
print('Duplicating labels for network branches')
self.total_samples_seen = 0
self.unique_classes, counts = np.unique(classes, return_counts=True)
min_samples_per_class = min(counts)
print(
'Number of unique classes {}, min images per class {}'.format(
counts.shape[0], min_samples_per_class
)
)
if equal_k and min_samples_per_class < k:
self.k = min_samples_per_class
print('Number of samples per class is reduced to {}'.format(self.k))
else:
self.k = k
self.img_set = images
self.class_set = classes
if p > self.unique_classes.shape[0]:
self.p = self.unique_classes.shape[0]
print('Number of unique classes per batch is reduced to {}'.format(self.p))
else:
self.p = p
def __iter__(self):
return self
def set_seed(self):
if self.seed is not None:
local_seed = self.seed + self.total_samples_seen
else:
local_seed = self.total_samples_seen
np.random.seed(local_seed)
return local_seed
def _get_batches_of_transformed_samples(self):
# set seed
self.set_seed()
# initialise empty arrays for batches of images and classes
batch_img = np.zeros(
shape=(self.p * self.k, self.n_poses) + self.img_set.shape[1:],
dtype='float32',
)
batch_class = np.empty(shape=(self.p * self.k,), dtype=self.class_set.dtype)
# selected classes - random choice from an array of unique
sel_classes = np.random.choice(self.unique_classes, self.p, replace=False)
# select images
for i, sel_class in enumerate(sel_classes):
start_idx = i * self.k
end_idx = (i + 1) * self.k
num_img_sel_class = self.img_set[self.class_set == sel_class].shape[0]
if self.equal_k:
sel_idx = np.random.choice(num_img_sel_class, self.k, replace=False)
else:
sel_idx = np.random.choice(num_img_sel_class, self.k, replace=True)
for pose in range(self.n_poses):
batch_img[start_idx:end_idx, pose] = self.img_set[
self.class_set == sel_class
][sel_idx]
batch_class[start_idx:end_idx] = self.class_set[self.class_set == sel_class][
sel_idx
]
self.total_samples_seen = self.total_samples_seen + 1
self.set_seed()
# print(batch_img[0,0])
if self.perspective:
# Apply one perspective transform and then rotate the transformed image
angle_step = 360 // self.n_poses
# augment images if generator is defined
for j in range(batch_img.shape[0]):
if self.aug_gen is not None:
temp = self.aug_gen.random_transform(batch_img[j, 0], seed=None)
else:
temp = batch_img[j, 0]
for pose in range(batch_img.shape[1]):
projected = projective_transformation(temp.astype('uint8'), var=0.15)
angle = int(random.gauss(angle_step * pose, 10))
batch_img[j, pose] = apply_affine_transform(projected, theta=angle)
batch_img[j, pose] = self.aug_gen.preprocessing_function(
batch_img[j, pose] * 255
)
else:
rot_angle = 360 // self.n_poses
# augment images if generator is defined
if self.aug_gen is not None:
for j in range(batch_img.shape[0]):
if self.rotate_poses:
temp = self.aug_gen.random_transform(batch_img[j, 0], seed=None)
for pose in range(batch_img.shape[1]):
batch_img[j, pose] = apply_affine_transform(
temp, theta=rot_angle * pose
)
batch_img[j, pose] = self.aug_gen.preprocessing_function(
batch_img[j, pose]
)
else:
for pose in range(batch_img.shape[1]):
# In half cases convert to grayscale
if self.to_gray:
if np.random.random() > 0.5:
batch_img[j, pose] = rgb2gray(
batch_img[j, pose], 'float32'
)
temp = self.aug_gen.random_transform(
batch_img[j, pose], seed=None
)
batch_img[j, pose] = self.aug_gen.preprocessing_function(temp)
if self.dupl_labels:
return (
[batch_img[:, pose] for pose in range(batch_img.shape[1])],
[batch_class] * self.n_poses,
)
elif self.flatten_batch:
# Batch size = (#img, #poses, h,w,ch)
total_images = batch_img.shape[0] * batch_img.shape[1]
new_shape = (total_images,) + batch_img.shape[2:]
tiled_classes = np.reshape(
np.array([batch_class] * self.n_poses), (total_images,), 'C'
)
return np.reshape(batch_img, new_shape, order='F'), tiled_classes
else:
return np.squeeze(batch_img), batch_class
def __next__(self):
return self._get_batches_of_transformed_samples()
def randomProjection(variation, image_size, random_seed=None):
'''Generate geometrical projection by defining transformation of 4 points
------
Input:
variation: percentage (in decimal notation from 0 to 1)
relative size of a circle region where centre is projected
image_size: integer
size of image in pixels
random_seed: integer
initialize internal state of the random number generator
------
Return:
tform: object from skimage.transromf
'''
d = image_size * variation
if random_seed is not None:
random.seed(random_seed)
top_left = (
random.uniform(-0.5 * d, d),
random.uniform(-0.5 * d, d),
) # Top left corner
bottom_left = (
random.uniform(-0.5 * d, d),
random.uniform(-0.5 * d, d),
) # Bottom left corner
top_right = (
random.uniform(-0.5 * d, d),
random.uniform(-0.5 * d, d),
) # Top right corner
bottom_right = (
random.uniform(-0.5 * d, d),
random.uniform(-0.5 * d, d),
) # Bottom right corner
tform = transform.ProjectiveTransform()
tform.estimate(
np.array(
(
top_left,
(bottom_left[0], image_size - bottom_left[1]),
(image_size - bottom_right[0], image_size - bottom_right[1]),
(image_size - top_right[0], top_right[1]),
)
),
np.array(((0, 0), (0, image_size), (image_size, image_size), (image_size, 0))),
)
return tform
def projective_transformation(img, var=0.15, random_seed=None):
"""Additional preprocessing function for data augmentation: random projective transformations over input image.
Input:
img: 3D tensor, image (integers [0,255])
random_seed: integer
Returns:
img_transformed: 3D tensor, image (dtype float64, [0,1])
"""
projection = randomProjection(
var, min(img.shape[0], img.shape[1]), random_seed=random_seed
)
img_transformed = transform.warp(img, projection, mode='edge')
return img_transformed
class PairsImageDataGenerator(ImageDataGenerator):
"""Generate minibatches of image PAIRS data with real-time data augmentation.
"""
def flow(
self,
x,
y=None,
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
):
"""Generate minibatches of image pairs from a numpy array
"""
return PairsNumpyArrayIterator(
x,
y,
self,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
)
class PairsNumpyArrayIterator(NumpyArrayIterator):
"""Iterator yielding pairs of images from a Numpy array"""
def __init__(
self,
x,
y,
image_data_generator,
batch_size=32,
shuffle=True,
seed=None,
data_format=None,
save_to_dir=None,
save_prefix='',
save_format='png',
):
# self.y = y
self.seed = seed
self.classes = np.unique(y)
super(PairsNumpyArrayIterator, self).__init__(
x,
y,
image_data_generator,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
data_format=data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
)
def _get_class_indices(self):
"""Indices of same class elements in X
______________________________________
Example:
y = [2,2,0,1,0]
classes = [0,1,2]
num_classes = 3
class_indices = { 0: [2,4], 1: [3], 2: [0,1]]
"""
class_indices = {cl: np.where(self.y == cl)[0] for cl in self.classes}
return class_indices
def _get_batches_of_transformed_samples(self, index_array, return_classes=False):
batch_pairs = []
batch_pair_labels = []
batch_classes = []
class_indices = self._get_class_indices()
# set seed
if self.seed is not None:
local_seed = self.seed + self.total_batches_seen
else:
local_seed = self.total_batches_seen
random.seed(local_seed)
for step, idx in enumerate(index_array):
# get an anchor image
x_a = self.x[idx]
x_a = self.image_data_generator.random_transform(x_a)
x_a = self.image_data_generator.preprocessing_function(x_a)
# Half positive and half negative pairs are generated for a batch
same_class = int(self.y[idx])
# get different class
while True:
diff_class = random.choice(self.classes)
if same_class != diff_class:
break
neg_pair_index = random.choice(class_indices[diff_class])
while True:
pos_pair_index = random.choice(class_indices[same_class])
if pos_pair_index != idx:
break
x_n = self.x[neg_pair_index]
# augment pair image
# TODO do I need to cast to float? x2.astype(K.floatx())
x_n = self.image_data_generator.random_transform(x_n)
x_n = self.image_data_generator.preprocessing_function(x_n)
x_p = self.x[pos_pair_index]
x_p = self.image_data_generator.random_transform(x_p)
x_p = self.image_data_generator.preprocessing_function(x_p)
batch_pairs += [[x_a, x_p]]
batch_pairs += [[x_a, x_n]]
batch_classes += [[same_class, same_class]]
batch_classes += [[same_class, diff_class]]
batch_pair_labels += [0, 1]
# Shuffle pairs and labels in unison
if self.shuffle == True:
batch_pairs, batch_pair_labels, batch_classes = shuffle(
batch_pairs, batch_pair_labels, batch_classes, random_state=local_seed
)
if return_classes:
return (
[
np.array(batch_pairs, dtype='float32')[:, 0],
np.array(batch_pairs, dtype='float32')[:, 1],
],
np.array(batch_pair_labels),
np.array(batch_classes),
)
else:
return (
[
np.array(batch_pairs, dtype='float32')[:, 0],
np.array(batch_pairs, dtype='float32')[:, 1],
],
np.array(batch_pair_labels),
)
def __next__(self):
"""Returns
The next batch with classes.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# print(index_array)
# The transformation of images is not under thread lock
# so it can be done in parallel
# class_indices = self._get_class_indices()
return self._get_batches_of_transformed_samples(index_array, return_classes=True)
| [
"numpy.random.seed",
"numpy.empty",
"random.gauss",
"utils.utils.rgb2gray",
"numpy.unique",
"random.seed",
"numpy.reshape",
"numpy.random.choice",
"keras_preprocessing.image.affine_transformations.apply_affine_transform",
"os.path.realpath",
"skimage.transform.warp",
"numpy.squeeze",
"random... | [((8171, 8202), 'skimage.transform.ProjectiveTransform', 'transform.ProjectiveTransform', ([], {}), '()\n', (8200, 8202), False, 'from skimage import transform\n'), ((9097, 9141), 'skimage.transform.warp', 'transform.warp', (['img', 'projection'], {'mode': '"""edge"""'}), "(img, projection, mode='edge')\n", (9111, 9141), False, 'from skimage import transform\n'), ((331, 357), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (347, 357), False, 'import os, random\n'), ((1435, 1473), 'numpy.unique', 'np.unique', (['classes'], {'return_counts': '(True)'}), '(classes, return_counts=True)\n', (1444, 1473), True, 'import numpy as np\n'), ((2420, 2446), 'numpy.random.seed', 'np.random.seed', (['local_seed'], {}), '(local_seed)\n', (2434, 2446), True, 'import numpy as np\n'), ((2657, 2750), 'numpy.zeros', 'np.zeros', ([], {'shape': '((self.p * self.k, self.n_poses) + self.img_set.shape[1:])', 'dtype': '"""float32"""'}), "(shape=(self.p * self.k, self.n_poses) + self.img_set.shape[1:],\n dtype='float32')\n", (2665, 2750), True, 'import numpy as np\n'), ((2804, 2866), 'numpy.empty', 'np.empty', ([], {'shape': '(self.p * self.k,)', 'dtype': 'self.class_set.dtype'}), '(shape=(self.p * self.k,), dtype=self.class_set.dtype)\n', (2812, 2866), True, 'import numpy as np\n'), ((2957, 3017), 'numpy.random.choice', 'np.random.choice', (['self.unique_classes', 'self.p'], {'replace': '(False)'}), '(self.unique_classes, self.p, replace=False)\n', (2973, 3017), True, 'import numpy as np\n'), ((7652, 7676), 'random.seed', 'random.seed', (['random_seed'], {}), '(random_seed)\n', (7663, 7676), False, 'import os, random\n'), ((7703, 7730), 'random.uniform', 'random.uniform', (['(-0.5 * d)', 'd'], {}), '(-0.5 * d, d)\n', (7717, 7730), False, 'import os, random\n'), ((7740, 7767), 'random.uniform', 'random.uniform', (['(-0.5 * d)', 'd'], {}), '(-0.5 * d, d)\n', (7754, 7767), False, 'import os, random\n'), ((7822, 7849), 'random.uniform', 'random.uniform', (['(-0.5 * d)', 'd'], {}), '(-0.5 * d, d)\n', (7836, 7849), False, 'import os, random\n'), ((7859, 7886), 'random.uniform', 'random.uniform', (['(-0.5 * d)', 'd'], {}), '(-0.5 * d, d)\n', (7873, 7886), False, 'import os, random\n'), ((7942, 7969), 'random.uniform', 'random.uniform', (['(-0.5 * d)', 'd'], {}), '(-0.5 * d, d)\n', (7956, 7969), False, 'import os, random\n'), ((7979, 8006), 'random.uniform', 'random.uniform', (['(-0.5 * d)', 'd'], {}), '(-0.5 * d, d)\n', (7993, 8006), False, 'import os, random\n'), ((8063, 8090), 'random.uniform', 'random.uniform', (['(-0.5 * d)', 'd'], {}), '(-0.5 * d, d)\n', (8077, 8090), False, 'import os, random\n'), ((8100, 8127), 'random.uniform', 'random.uniform', (['(-0.5 * d)', 'd'], {}), '(-0.5 * d, d)\n', (8114, 8127), False, 'import os, random\n'), ((8231, 8413), 'numpy.array', 'np.array', (['(top_left, (bottom_left[0], image_size - bottom_left[1]), (image_size -\n bottom_right[0], image_size - bottom_right[1]), (image_size - top_right\n [0], top_right[1]))'], {}), '((top_left, (bottom_left[0], image_size - bottom_left[1]), (\n image_size - bottom_right[0], image_size - bottom_right[1]), (\n image_size - top_right[0], top_right[1])))\n', (8239, 8413), True, 'import numpy as np\n'), ((8514, 8592), 'numpy.array', 'np.array', (['((0, 0), (0, image_size), (image_size, image_size), (image_size, 0))'], {}), '(((0, 0), (0, image_size), (image_size, image_size), (image_size, 0)))\n', (8522, 8592), True, 'import numpy as np\n'), ((10377, 10389), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (10386, 10389), True, 'import numpy as np\n'), ((11556, 11579), 'random.seed', 'random.seed', (['local_seed'], {}), '(local_seed)\n', (11567, 11579), False, 'import os, random\n'), ((12170, 12210), 'random.choice', 'random.choice', (['class_indices[diff_class]'], {}), '(class_indices[diff_class])\n', (12183, 12210), False, 'import os, random\n'), ((13212, 13291), 'sklearn.utils.shuffle', 'shuffle', (['batch_pairs', 'batch_pair_labels', 'batch_classes'], {'random_state': 'local_seed'}), '(batch_pairs, batch_pair_labels, batch_classes, random_state=local_seed)\n', (13219, 13291), False, 'from sklearn.utils import shuffle\n'), ((3308, 3366), 'numpy.random.choice', 'np.random.choice', (['num_img_sel_class', 'self.k'], {'replace': '(False)'}), '(num_img_sel_class, self.k, replace=False)\n', (3324, 3366), True, 'import numpy as np\n'), ((3411, 3468), 'numpy.random.choice', 'np.random.choice', (['num_img_sel_class', 'self.k'], {'replace': '(True)'}), '(num_img_sel_class, self.k, replace=True)\n', (3427, 3468), True, 'import numpy as np\n'), ((11070, 11092), 'numpy.where', 'np.where', (['(self.y == cl)'], {}), '(self.y == cl)\n', (11078, 11092), True, 'import numpy as np\n'), ((12041, 12068), 'random.choice', 'random.choice', (['self.classes'], {}), '(self.classes)\n', (12054, 12068), False, 'import os, random\n'), ((12269, 12309), 'random.choice', 'random.choice', (['class_indices[same_class]'], {}), '(class_indices[same_class])\n', (12282, 12309), False, 'import os, random\n'), ((13556, 13583), 'numpy.array', 'np.array', (['batch_pair_labels'], {}), '(batch_pair_labels)\n', (13564, 13583), True, 'import numpy as np\n'), ((13601, 13624), 'numpy.array', 'np.array', (['batch_classes'], {}), '(batch_classes)\n', (13609, 13624), True, 'import numpy as np\n'), ((13860, 13887), 'numpy.array', 'np.array', (['batch_pair_labels'], {}), '(batch_pair_labels)\n', (13868, 13887), True, 'import numpy as np\n'), ((4623, 4669), 'keras_preprocessing.image.affine_transformations.apply_affine_transform', 'apply_affine_transform', (['projected'], {'theta': 'angle'}), '(projected, theta=angle)\n', (4645, 4669), False, 'from keras_preprocessing.image.affine_transformations import apply_affine_transform\n'), ((6688, 6726), 'numpy.array', 'np.array', (['([batch_class] * self.n_poses)'], {}), '([batch_class] * self.n_poses)\n', (6696, 6726), True, 'import numpy as np\n'), ((6782, 6825), 'numpy.reshape', 'np.reshape', (['batch_img', 'new_shape'], {'order': '"""F"""'}), "(batch_img, new_shape, order='F')\n", (6792, 6825), True, 'import numpy as np\n'), ((6874, 6895), 'numpy.squeeze', 'np.squeeze', (['batch_img'], {}), '(batch_img)\n', (6884, 6895), True, 'import numpy as np\n'), ((4545, 4580), 'random.gauss', 'random.gauss', (['(angle_step * pose)', '(10)'], {}), '(angle_step * pose, 10)\n', (4557, 4580), False, 'import os, random\n'), ((13409, 13447), 'numpy.array', 'np.array', (['batch_pairs'], {'dtype': '"""float32"""'}), "(batch_pairs, dtype='float32')\n", (13417, 13447), True, 'import numpy as np\n'), ((13475, 13513), 'numpy.array', 'np.array', (['batch_pairs'], {'dtype': '"""float32"""'}), "(batch_pairs, dtype='float32')\n", (13483, 13513), True, 'import numpy as np\n'), ((13713, 13751), 'numpy.array', 'np.array', (['batch_pairs'], {'dtype': '"""float32"""'}), "(batch_pairs, dtype='float32')\n", (13721, 13751), True, 'import numpy as np\n'), ((13779, 13817), 'numpy.array', 'np.array', (['batch_pairs'], {'dtype': '"""float32"""'}), "(batch_pairs, dtype='float32')\n", (13787, 13817), True, 'import numpy as np\n'), ((5267, 5319), 'keras_preprocessing.image.affine_transformations.apply_affine_transform', 'apply_affine_transform', (['temp'], {'theta': '(rot_angle * pose)'}), '(temp, theta=rot_angle * pose)\n', (5289, 5319), False, 'from keras_preprocessing.image.affine_transformations import apply_affine_transform\n'), ((5783, 5801), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5799, 5801), True, 'import numpy as np\n'), ((5866, 5905), 'utils.utils.rgb2gray', 'rgb2gray', (['batch_img[j, pose]', '"""float32"""'], {}), "(batch_img[j, pose], 'float32')\n", (5874, 5905), False, 'from utils.utils import rgb2gray\n')] |
import numpy as np
import logging
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.keras.optimizers.schedules import LearningRateSchedule
from tensorflow.keras.callbacks import Callback
logging.getLogger("tensorflow").setLevel(logging.ERROR)
class CosineAnnealer:
def __init__(self, start, end, steps):
self.start = start
self.end = end
self.steps = steps
self.n = 0
def step(self):
cos = np.cos(np.pi * (self.n / self.steps)) + 1
self.n += 1
return self.end + (self.start - self.end) / 2.0 * cos
class OneCycleScheduler(LearningRateSchedule):
"""`LearningRateSchedule` that schedules the learning rate on a 1cycle policy as per <NAME>'s paper
(https://arxiv.org/pdf/1803.09820.pdf).
The implementation adopts additional improvements as per the fastai library:
https://docs.fast.ai/callbacks.one_cycle.html, where only two phases are used and the adaptation is done using
cosine annealing. In the warm-up phase the LR increases from `lr_max / div_factor` to `lr_max` and momentum
decreases from `mom_max` to `mom_min`. In the second phase the LR decreases from `lr_max` to `lr_max / final_div`
and momemtum from `mom_max` to `mom_min`. By default the phases are not of equal length, with the warm-up phase
controlled by the parameter `warmup_ratio`.
NOTE: The momentum is not controlled through this class. This class is intended to be used together with the
`MomentumOneCycleScheduler` callback defined below.
"""
def __init__(
self,
lr_max,
steps,
mom_min=0.85,
mom_max=0.95,
warmup_ratio=0.3,
div_factor=25.0,
final_div=100000.0,
name=None,
):
super(OneCycleScheduler, self).__init__()
lr_min = lr_max / div_factor
if final_div is None:
final_lr = lr_max / (div_factor * 1e4)
else:
final_lr = lr_max / (final_div)
phase_1_steps = int(steps * warmup_ratio)
phase_2_steps = steps - phase_1_steps
self.lr_max = lr_max
self.steps = steps
self.mom_min = mom_min
self.mom_max = mom_max
self.warmup_ratio = warmup_ratio
self.div_factor = div_factor
self.final_div = final_div
self.name = name
phases = [CosineAnnealer(lr_min, lr_max, phase_1_steps), CosineAnnealer(lr_max, final_lr, phase_2_steps)]
step = 0
phase = 0
full_lr_schedule = np.zeros(int(steps))
for ii in np.arange(np.floor(steps), dtype=int):
step += 1
if step >= phase_1_steps:
phase = 1
full_lr_schedule[ii] = phases[phase].step()
self.full_lr_schedule = tf.convert_to_tensor(full_lr_schedule)
def __call__(self, step):
with ops.name_scope(self.name or "OneCycleScheduler"):
return self.full_lr_schedule[tf.cast(step, "int32") - 1]
def get_config(self):
return {
"lr_max": self.lr_max,
"steps": self.steps,
"mom_min": self.mom_min,
"mom_max": self.mom_max,
"warmup_ratio": self.warmup_ratio,
"div_factor": self.div_factor,
"final_div": self.final_div,
"name": self.name,
}
class MomentumOneCycleScheduler(Callback):
"""`Callback` that schedules the momentun according to the 1cycle policy as per <NAME>'s paper
(https://arxiv.org/pdf/1803.09820.pdf).
NOTE: This callback only schedules the momentum parameter, not the learning rate. It is intended to be used with the
KerasOneCycle learning rate scheduler above or similar.
"""
def __init__(self, steps, mom_min=0.85, mom_max=0.95, warmup_ratio=0.3):
super(MomentumOneCycleScheduler, self).__init__()
phase_1_steps = steps * warmup_ratio
phase_2_steps = steps - phase_1_steps
self.phase_1_steps = phase_1_steps
self.phase_2_steps = phase_2_steps
self.phase = 0
self.step = 0
self.phases = [CosineAnnealer(mom_max, mom_min, phase_1_steps), CosineAnnealer(mom_min, mom_max, phase_2_steps)]
def on_train_begin(self, logs=None):
self.set_momentum(self.mom_schedule().step())
def on_train_batch_end(self, batch, logs=None):
self.step += 1
if self.step >= self.phase_1_steps:
self.phase = 1
self.set_momentum(self.mom_schedule().step())
def set_momentum(self, mom):
# In Adam, the momentum parameter is called beta_1
if isinstance(self.model.optimizer, tf.keras.optimizers.Adam):
tf.keras.backend.set_value(self.model.optimizer.beta_1, mom)
# In SDG, the momentum parameter is called momentum
elif isinstance(self.model.optimizer, tf.keras.optimizers.SGD):
tf.keras.backend.set_value(self.model.optimizer.momentum, mom)
else:
raise NotImplementedError(
"Only SGD and Adam are supported by MomentumOneCycleScheduler: {}".format(type(self.model.optimizer))
)
def mom_schedule(self):
return self.phases[self.phase]
| [
"tensorflow.keras.backend.set_value",
"tensorflow.convert_to_tensor",
"numpy.floor",
"tensorflow.cast",
"numpy.cos",
"tensorflow.python.framework.ops.name_scope",
"logging.getLogger"
] | [((223, 254), 'logging.getLogger', 'logging.getLogger', (['"""tensorflow"""'], {}), "('tensorflow')\n", (240, 254), False, 'import logging\n'), ((2792, 2830), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['full_lr_schedule'], {}), '(full_lr_schedule)\n', (2812, 2830), True, 'import tensorflow as tf\n'), ((477, 514), 'numpy.cos', 'np.cos', (['(np.pi * (self.n / self.steps))'], {}), '(np.pi * (self.n / self.steps))\n', (483, 514), True, 'import numpy as np\n'), ((2588, 2603), 'numpy.floor', 'np.floor', (['steps'], {}), '(steps)\n', (2596, 2603), True, 'import numpy as np\n'), ((2875, 2923), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (["(self.name or 'OneCycleScheduler')"], {}), "(self.name or 'OneCycleScheduler')\n", (2889, 2923), False, 'from tensorflow.python.framework import ops\n'), ((4685, 4745), 'tensorflow.keras.backend.set_value', 'tf.keras.backend.set_value', (['self.model.optimizer.beta_1', 'mom'], {}), '(self.model.optimizer.beta_1, mom)\n', (4711, 4745), True, 'import tensorflow as tf\n'), ((4890, 4952), 'tensorflow.keras.backend.set_value', 'tf.keras.backend.set_value', (['self.model.optimizer.momentum', 'mom'], {}), '(self.model.optimizer.momentum, mom)\n', (4916, 4952), True, 'import tensorflow as tf\n'), ((2966, 2988), 'tensorflow.cast', 'tf.cast', (['step', '"""int32"""'], {}), "(step, 'int32')\n", (2973, 2988), True, 'import tensorflow as tf\n')] |
"""
The viewer for rendering the gym-idsgame environment. Supports both agent-mode and manual-mode
"""
try:
import pyglet
except ImportError as e:
raise ImportError('''
Cannot import pyglet.
HINT: you can install pyglet directly via 'pip install pyglet'.
But if you really just want to install all Gym dependencies and not have to think about it,
'pip install -e .[all]' or 'pip install gym[all]' will do it.
''')
try:
from pyglet.gl import *
except ImportError as e:
raise ImportError('''
Error occurred while running `from pyglet.gl import *`
HINT: make sure you have OpenGL install. On Ubuntu, you can run 'apt-get install python-opengl'.
If you're running on a server, you may need a virtual frame buffer; something like this should work:
'xvfb-run -s \"-screen 0 1400x900x24\" python <your_script.py>'
''')
import numpy as np
import time
import sys
from gym_idsgame.envs.rendering.frames.game_frame import GameFrame
from gym_idsgame.envs.dao.idsgame_config import IdsGameConfig
class Viewer():
"""
Class representing a viewer for the IDS-game. Can be used for either agent-view, or manual-view (human player)
"""
def __init__(self, idsgame_config: IdsGameConfig):
"""
:param idsgame_config: configuratin for the IdsGameEnv
"""
self.idsgame_config = idsgame_config
self.isopen = True
def manual_start_attacker(self) -> None:
"""
Starts the IDS-game app in a manual mode where the attacker is controlled with keyboard and mouse
:return: None
"""
self.idsgame_config.game_config.manual_attacker = True
self.gameframe = GameFrame(idsgame_config=self.idsgame_config)
self.gameframe.on_close = self.window_closed_by_user
self.isopen = True
pyglet.clock.schedule_interval(self.gameframe.update, 1 / 60.)
pyglet.app.run()
def manual_start_defender(self) -> None:
"""
Starts the IDS-game app in a manual mode where the defender is controlled with keyboard and mouse
:return: None
"""
self.idsgame_config.game_config.manual_defender = True
self.gameframe = GameFrame(idsgame_config=self.idsgame_config)
self.gameframe.on_close = self.window_closed_by_user
self.isopen = True
pyglet.clock.schedule_interval(self.gameframe.update, 1 / 60.)
pyglet.app.run()
def agent_start(self) -> None:
"""
Creates the IDS-game frame in agent-mode, where actions are taken programmatically rather than through
moving mouse and keyboard.
"""
self.idsgame_config.game_config.manual_attacker = False
self.idsgame_config.game_config.manual_defender = False
self.gameframe = GameFrame(idsgame_config=self.idsgame_config)
self.gameframe.on_close = self.window_closed_by_user
self.isopen = True
def window_closed_by_user(self) -> None:
"""
Callback when the frame is closed by the user
:return: None
"""
self.isopen = False
self.gameframe.close()
print("Window closed, exiting")
sys.exit(0)
def close(self) -> None:
"""
Closes the frame
:return: None
"""
self.gameframe.close()
def render_frame(self, return_rgb_array: bool = False):
"""
Renders a frame manually.
Using pyglet together with openAI gym means that we have to integrate OpenGL's event-loop
with the event-loop of the RL agent and the gym framework. That's why we render things manually and dispatch
events manually rather than just calling pyglet.app.run().
:param return_rgb_array: if this is true it returns the RGB array for the rendered frame (for recording)
:return: RGB array or bool
"""
self.gameframe.clear() # Clears the frame
self.gameframe.switch_to() # Make this window the current OpenGL rendering context
self.gameframe.dispatch_events() # Poll the OS for events and call related handlers for updating the frame
self.gameframe.on_draw() # Draw the frame
if return_rgb_array:
arr = self.extract_rgb_array()
self.gameframe.flip() # Swaps the OpenGL front and back buffers Updates the visible display with the back buffer
return arr if return_rgb_array else self.isopen
def render(self, return_rgb_array = False):
"""
Renders a state of the IDS game. A single state might include many frames. For example if an attack or defense
move was made, this will cause several frames to visualize the attack/defense.
:param return_rgb_array: boolean whether to return rgb array or not
:return: RGB array or bool
"""
self.gameframe.unschedule_events()
frames = []
arr = self.render_frame(return_rgb_array)
frames.append(arr)
for i in range(self.gameframe.idsgame_config.render_config.num_blinks):
if len(self.gameframe.game_state.defense_events) > 0 or len(self.gameframe.game_state.attack_events) > 0:
self.gameframe.simulate_events(i)
arr = self.render_frame(return_rgb_array=return_rgb_array)
frames.append(arr)
time.sleep(self.gameframe.idsgame_config.render_config.blink_interval)
self.gameframe.reset_events()
return np.array(frames) if return_rgb_array else self.isopen
def extract_rgb_array(self) -> np.ndarray:
"""
Extract RGB array from pyglet, this can then be used to record video of the rendering through gym's API
:return: RGB Array [height, width, 3]
"""
buffer = pyglet.image.get_buffer_manager().get_color_buffer()
image_data = buffer.get_image_data()
arr = np.fromstring(image_data.get_data(), dtype=np.uint8, sep='')
# In https://github.com/openai/gym-http-api/issues/2, we
# discovered that someone using Xmonad on Arch was having
# a window of size 598 x 398, though a 600 x 400 window
# was requested. (Guess Xmonad was preserving a pixel for
# the boundary.) So we use the buffer height/width rather
# than the requested one.
arr = arr.reshape(buffer.height, buffer.width, 4)
arr = arr[::-1, :, 0:3]
return arr | [
"pyglet.app.run",
"time.sleep",
"numpy.array",
"gym_idsgame.envs.rendering.frames.game_frame.GameFrame",
"pyglet.image.get_buffer_manager",
"sys.exit",
"pyglet.clock.schedule_interval"
] | [((1690, 1735), 'gym_idsgame.envs.rendering.frames.game_frame.GameFrame', 'GameFrame', ([], {'idsgame_config': 'self.idsgame_config'}), '(idsgame_config=self.idsgame_config)\n', (1699, 1735), False, 'from gym_idsgame.envs.rendering.frames.game_frame import GameFrame\n'), ((1832, 1895), 'pyglet.clock.schedule_interval', 'pyglet.clock.schedule_interval', (['self.gameframe.update', '(1 / 60.0)'], {}), '(self.gameframe.update, 1 / 60.0)\n', (1862, 1895), False, 'import pyglet\n'), ((1903, 1919), 'pyglet.app.run', 'pyglet.app.run', ([], {}), '()\n', (1917, 1919), False, 'import pyglet\n'), ((2207, 2252), 'gym_idsgame.envs.rendering.frames.game_frame.GameFrame', 'GameFrame', ([], {'idsgame_config': 'self.idsgame_config'}), '(idsgame_config=self.idsgame_config)\n', (2216, 2252), False, 'from gym_idsgame.envs.rendering.frames.game_frame import GameFrame\n'), ((2349, 2412), 'pyglet.clock.schedule_interval', 'pyglet.clock.schedule_interval', (['self.gameframe.update', '(1 / 60.0)'], {}), '(self.gameframe.update, 1 / 60.0)\n', (2379, 2412), False, 'import pyglet\n'), ((2420, 2436), 'pyglet.app.run', 'pyglet.app.run', ([], {}), '()\n', (2434, 2436), False, 'import pyglet\n'), ((2796, 2841), 'gym_idsgame.envs.rendering.frames.game_frame.GameFrame', 'GameFrame', ([], {'idsgame_config': 'self.idsgame_config'}), '(idsgame_config=self.idsgame_config)\n', (2805, 2841), False, 'from gym_idsgame.envs.rendering.frames.game_frame import GameFrame\n'), ((3184, 3195), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3192, 3195), False, 'import sys\n'), ((5472, 5488), 'numpy.array', 'np.array', (['frames'], {}), '(frames)\n', (5480, 5488), True, 'import numpy as np\n'), ((5347, 5417), 'time.sleep', 'time.sleep', (['self.gameframe.idsgame_config.render_config.blink_interval'], {}), '(self.gameframe.idsgame_config.render_config.blink_interval)\n', (5357, 5417), False, 'import time\n'), ((5774, 5807), 'pyglet.image.get_buffer_manager', 'pyglet.image.get_buffer_manager', ([], {}), '()\n', (5805, 5807), False, 'import pyglet\n')] |
#!/usr/bin/env python2
import numpy as np
from setpoint_bitmasks import *
from tf.transformations import euler_from_quaternion
def pose2yaw(this_pose):
orientation_q = this_pose.pose.orientation
(_, _, yaw) = euler_from_quaternion([orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w])
return yaw
def get_bitmask(xy_type, z_type, yaw_type):
# all pos control
if xy_type == 'pos' and z_type == 'pos' and yaw_type == 'pos':
bitmask = MASK_XY_POS__Z_POS_YAW_POS
elif xy_type == 'pos' and z_type == 'pos' and yaw_type == 'pos':
bitmask = MASK_XY_POS__Z_POS_YAW_RATE
# all pos control with limited vel (only available in horizontal motion at the moment
elif xy_type == 'pos_with_vel':
bitmask = MASK_XY_POS_XY_VEL_Z_POS_YAW_POS
# todo, raise if this condition is met but an unsupported variant
# xyz pos, yaw rate
elif xy_type == 'pos' and z_type == 'pos' and yaw_type == 'vel':
bitmask = MASK_XY_POS__Z_POS_YAW_RATE
# xy velocity control
elif xy_type == 'vel' and z_type == 'pos' and yaw_type == 'pos':
bitmask = MASK_XY_VEL__Z_POS__YAW_POS
elif xy_type == 'vel' and z_type == 'pos' and yaw_type == 'vel':
bitmask = MASK_XY_VEL__Z_POS__YAW_RATE
# xyz velocity control
elif xy_type == 'vel' and z_type == 'vel' and yaw_type == 'pos':
bitmask = MASK_XY_VEL__Z_VEL_YAW_POS
elif xy_type == 'vel' and z_type == 'vel' and yaw_type == 'vel':
bitmask = MASK_XY_VEL__Z_VEL_YAW_RATE
else:
raise TypeError(('this control combination is not yet implemented xy_type {} z_type {} yaw_type {}'.format(xy_type, z_type, yaw_type)))
return np.uint16(bitmask) | [
"numpy.uint16",
"tf.transformations.euler_from_quaternion"
] | [((219, 314), 'tf.transformations.euler_from_quaternion', 'euler_from_quaternion', (['[orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w]'], {}), '([orientation_q.x, orientation_q.y, orientation_q.z,\n orientation_q.w])\n', (240, 314), False, 'from tf.transformations import euler_from_quaternion\n'), ((1695, 1713), 'numpy.uint16', 'np.uint16', (['bitmask'], {}), '(bitmask)\n', (1704, 1713), True, 'import numpy as np\n')] |
from __future__ import division
import warnings
import numpy as np
import ROOT
from matplotlib import pyplot as plt
def single_fit(
spec,
fix=list(),
values=dict(),
scales=dict(),
randomize=False,
max_retries=100,
max_calls=1000000,
tol=1e-2):
"""
Perform a single fit using TMinuit.
:param spec: ParSpec
sepectrum object to use in the fit
:param fix: [str]
name of parameters to fix in the fit
:param values: [float]
custom central values
:param scales: {str: (float, float)}
map parameter names to alternate down, up scales
:param randomize: bool
randomize initial starting parameter values
:return: [float], float, ROOT.TMinimizer
fit values, ll and minimizer object used to fit
"""
# Do a vanilla fit (don't randomize parameters)
minimizer = spec.build_minimizer()
minimizer.SetTolerance(tol)
minimizer.SetMaxFunctionCalls(max_calls)
# copy so they can be edited
lows = np.array(spec.lows)
highs = np.array(spec.highs)
central = np.array(spec.central)
constraints = spec.constraints
# Revised scales for all parameters
for par, bounds in scales.items():
ipar = spec.ipar(par)
lows[ipar] = central[ipar] - abs(bounds[0])
highs[ipar] = central[ipar] + abs(bounds[1])
scale = .5*(abs(bounds[1])+abs(bounds[0]))
minimizer.SetVariableStepSize(ipar, scale)
# Build mask for parmaeters to fix, and indicate in minimizer
ifixs = np.zeros(spec.npars, dtype=bool)
for par in fix:
ipar = spec.ipar(par)
minimizer.FixVariable(ipar)
ifixs[ipar] = True
# Revised central values for all parameters
for par, value in values.items():
ipar = spec.ipar(par)
minimizer.SetVariableValue(ipar, value)
central[ipar] = value
if randomize:
x = spec.randomize_parameters(
central, central, lows, highs, constraints)
x[ifixs] = central[ifixs]
for i in range(spec.npars):
minimizer.SetVariableValue(i, x[i])
# Attempt the fit, TMinuit will fail sometimes
nfails = 0 # keep track of failed fits
while not minimizer.Minimize():
nfails += 1
if nfails >= max_retries:
minimizer.Clear() # note: ROOT leaks memory if not clearing
raise RuntimeError("Failed minimization")
if randomize:
x = spec.randomize_parameters(
central, central, lows, highs, constraints)
x[ifixs] = central[ifixs]
for i in range(spec.npars):
minimizer.SetVariableValue(i, x[i])
minx = [minimizer.X()[i] for i in range(spec.npars)]
ll = spec.ll(minx)
return minx, ll, minimizer
def global_fit(spec, nfits=10, max_retries=100, **kwargs):
"""
Perform multiple fits and keep the best minimum.
Accepts the same keyword arguments as `single_fit`.
:param spec: ParSpec
sepectrum object to use in the fit
:param nfits: int
number of successfull fit attempts from which to find a global minimum
:return: [float], float, ROOT.TMinimizer
fit parameters, ll and minimizer object used to fit
"""
# Take control of the randomize parameter when passing along to single fit.
# max_retires isn't propagated as it is captured by this function.
if 'randomize' in kwargs:
del kwargs['randomize']
best_x = None # parameter values at global min
best_ll = float('-inf') # log likelihood at global min
best_min = None # keep minimizer object which reaches best min
npass = 0
nfails = 0
while npass < nfits:
try:
minx, ll, minimizer = single_fit(spec, randomize=True, max_retries=1, **kwargs)
npass += 1 # once it succeeds, count the fit
except RuntimeError:
nfails += 1
if nfails >= max_retries:
raise RuntimeError("Failed global minimization")
continue # if it fails, try again with different randomization
if ll > best_ll:
best_x = minx
best_ll = ll
if best_min is not None:
best_min.Clear()
best_min = minimizer
else:
minimizer.Clear()
return best_x, best_ll, best_min
def run_minos(spec, minimizer, pars=list(), verbose=False):
"""
Find the points along each parameter value where the log likelihood is
halved. For a normal distribution, this is the 1-sigma interval containing
68.27% of the distribution.
:param spec: ParSpec
spectrum whose spectrum parameters are to be profiled
:param minimizer: ROOT.TMinimizer
minimier object which has found a minimum
:param pars: [str]
list of parameters on which to run Minos, or all if list is empty
:param verbose: bool
print information about parameter evaluations
:return: [float], [float], [bool]
distance to subtract and add to halve the log likelihood, and success
"""
if len(pars) == 0:
pars = spec.pars
# Lower and upper bounds for the parameters
npars = len(pars)
fit_down = [0] * npars
fit_up = [0] * npars
success = [True] * npars
# Declare ROOT doubles which minos can write to by reference
down = ROOT.Double(0)
up = ROOT.Double(0)
for i, par in enumerate(pars):
ipar = spec.ipar(par)
if not minimizer.GetMinosError(ipar, down, up):
success[i] = False
fit_down[i] = float(down)
fit_up[i] = float(up)
if verbose:
print('...%s: %+.2e, %+.2e' % (par, float(down), float(up)))
return fit_down, fit_up, success
def find_minima(spec, nsample=100, tol=1e-2, verbose=False, **kwargs):
"""
Find individual local minima in the space.
:param nsample: int
number of samples of the likelihood space to explore minima
:param tol: float
consider two log likelihoods belong to different minima if they differ
by more than this value
:param verbose: bool
print information about fit success
:return: [float], [[float]], [[float]], float
log likelihood at each minimum
fit values of parameters at each minimum
differenc of fit values to global minimum, scaled by uncertainty
probability of finding the global minimum given a random initial point
"""
if nsample <= 0:
raise ValueError("Invalid number of samples")
xs = list() # minimized parameters of each fit
lls = list() # log likelihood of each fit
best_ll = float('-inf')
best_min = None # keep track of the minimizer that reaches global
isample = 0
nfails = 0
while isample < nsample:
try:
minx, ll, minimizer = single_fit(
spec,
randomize=True if nfails+isample>0 else False,
max_retries=1,
tol=tol,
**kwargs)
if verbose:
print("...sample %d: %.2e" % (isample, ll))
except RuntimeError:
if verbose:
print("...fit failed")
nfails += 1
continue
isample += 1
xs.append(minx)
lls.append(ll)
if ll > best_ll:
if best_min is not None:
best_min.Clear()
best_min = minimizer
best_ll = ll
else:
minimizer.Clear()
if verbose:
print("...failure rate: %.2e" % (nfails/float(nsample+nfails)))
# Compute the error for each parameter
if not best_min.Hesse():
warnings.warn("Failed to compute error marix", RuntimeWarning)
errs = [best_min.Errors()[i] for i in range(spec.npars)]
xs = np.array(xs)
lls = np.array(lls)
isort = np.argsort(lls)[::-1]
xs = xs[isort]
lls = lls[isort]
# Build array of booleans, True if the log likelihood difference between
# consecutive (sorted) minima exceeds the tolerance, False otherwise
mins = np.fabs(lls[1:]-lls[:-1]) > tol
# Convert to a list of indices of individual local minima from the samples
imins = np.arange(1, nsample)[mins]
imins = [0] + list(imins)
# The number of samples that landed in the global minimum
nglobal = nsample if len(imins) == 1 else imins[1]
min_ll = [lls[i] for i in imins]
min_x = [list(xs[i]) for i in imins]
min_rel = [list((xs[i]-xs[0])/errs) for i in imins]
return min_ll, min_x, min_rel, nglobal/nsample
def _make_bounds(spec, ipar, low, high):
if low is None:
low = spec.central[ipar]-spec.scales[ipar]*2
if high is None:
high = spec.central[ipar]+spec.scales[ipar]*2
if low == high:
low -= 1
high += 1
return low, high
def slice1d(spec, x, par, low=None, high=None, nsteps=100):
ipar = spec.ipar(par)
low, high = _make_bounds(spec, ipar, low, high)
vals = np.linspace(low, high, nsteps)
lls = list()
x = np.array(x)
for i, val in enumerate(vals):
x[ipar] = val
lls.append((val, spec.ll(x)))
return np.array(lls)
def slice2d(
spec,
x,
par1,
par2,
low1=None,
high1=None,
low2=None,
high2=None,
nsteps=100):
ipar1 = spec.ipar(par1)
ipar2 = spec.ipar(par2)
low1, high1 = _make_bounds(spec, ipar1, low1, high1)
vals1 = np.linspace(low1, high1, nsteps)
low2, high2 = _make_bounds(spec, ipar2, low2, high2)
vals2 = np.linspace(low2, high2, nsteps)
vals = [(v1, v2) for v1 in vals1 for v2 in vals2]
lls = list()
x = np.array(x)
for i1, v1 in enumerate(vals1):
x[ipar1] = v1
for i2, v2 in enumerate(vals2):
x[ipar2] = v2
lls.append((v1, v2, spec.ll(x)))
return np.array(lls)
def profile(
spec,
par,
low=None,
high=None,
nsteps=100,
**kwargs):
ipar = spec.ipar(par)
low, high = _make_bounds(spec, ipar, low, high)
vals = np.linspace(low, high, nsteps)
best, best_ll, minimizer = single_fit(spec, **kwargs)
minimizer.FixVariable(ipar)
lls = list()
new_ll = best_ll
new_x = best
for i, val in enumerate(vals):
minimizer.SetVariableValue(ipar, val)
if not minimizer.Minimize():
continue
minx = [minimizer.X()[i] for i in range(spec.npars)]
ll = spec.ll(minx)
if ll > new_ll:
new_ll = ll
new_x = minx
print(val, ll)
lls.append((val, np.exp(ll)))
minimizer.Clear()
return np.array(lls[1:])
def covm(minimizer):
npars = int(minimizer.NDim())
return np.array(
[[minimizer.CovMatrix(i,j)
for j in range(npars)]
for i in range(npars)])
def corrcoef(covm):
covm = np.asarray(covm)
sigs = np.diag(covm)**0.5 # grab the stdevs
norms = np.copy(sigs) # normalization for correlation matrix
norms[~(sigs>0)] = 1 # ignore parameters with no var
# Build correlation matrix
corr = covm / (norms[np.newaxis, :] * norms[:, np.newaxis])
return corr
def smallcorr(covm, ipois, threshold=0.01):
corr = corrcoef(covm)
np.set_printoptions(precision=3, suppress=True)
# Select rows for the POIs only
corr = corr[ipois]
# Array of True where the correlation between a POI (row) and another
# parameter (col) is below threshold
below = np.fabs(corr) < threshold
# Find columns that are always below threshold: parameters that don't
# correlate strongly to any POI
below = np.all(below, axis=0)
for ipar in range(len(below)):
print("%+.3f %+.3f %s" % (corr[0][ipar], corr[1][ipar], below[ipar]))
return np.arange(len(below))[below]
| [
"numpy.diag",
"numpy.set_printoptions",
"numpy.copy",
"numpy.asarray",
"numpy.zeros",
"numpy.argsort",
"ROOT.Double",
"numpy.fabs",
"numpy.array",
"numpy.arange",
"numpy.linspace",
"numpy.exp",
"warnings.warn",
"numpy.all"
] | [((1056, 1075), 'numpy.array', 'np.array', (['spec.lows'], {}), '(spec.lows)\n', (1064, 1075), True, 'import numpy as np\n'), ((1088, 1108), 'numpy.array', 'np.array', (['spec.highs'], {}), '(spec.highs)\n', (1096, 1108), True, 'import numpy as np\n'), ((1124, 1146), 'numpy.array', 'np.array', (['spec.central'], {}), '(spec.central)\n', (1132, 1146), True, 'import numpy as np\n'), ((1578, 1610), 'numpy.zeros', 'np.zeros', (['spec.npars'], {'dtype': 'bool'}), '(spec.npars, dtype=bool)\n', (1586, 1610), True, 'import numpy as np\n'), ((5411, 5425), 'ROOT.Double', 'ROOT.Double', (['(0)'], {}), '(0)\n', (5422, 5425), False, 'import ROOT\n'), ((5435, 5449), 'ROOT.Double', 'ROOT.Double', (['(0)'], {}), '(0)\n', (5446, 5449), False, 'import ROOT\n'), ((7877, 7889), 'numpy.array', 'np.array', (['xs'], {}), '(xs)\n', (7885, 7889), True, 'import numpy as np\n'), ((7900, 7913), 'numpy.array', 'np.array', (['lls'], {}), '(lls)\n', (7908, 7913), True, 'import numpy as np\n'), ((9058, 9088), 'numpy.linspace', 'np.linspace', (['low', 'high', 'nsteps'], {}), '(low, high, nsteps)\n', (9069, 9088), True, 'import numpy as np\n'), ((9115, 9126), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (9123, 9126), True, 'import numpy as np\n'), ((9234, 9247), 'numpy.array', 'np.array', (['lls'], {}), '(lls)\n', (9242, 9247), True, 'import numpy as np\n'), ((9549, 9581), 'numpy.linspace', 'np.linspace', (['low1', 'high1', 'nsteps'], {}), '(low1, high1, nsteps)\n', (9560, 9581), True, 'import numpy as np\n'), ((9652, 9684), 'numpy.linspace', 'np.linspace', (['low2', 'high2', 'nsteps'], {}), '(low2, high2, nsteps)\n', (9663, 9684), True, 'import numpy as np\n'), ((9766, 9777), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (9774, 9777), True, 'import numpy as np\n'), ((9964, 9977), 'numpy.array', 'np.array', (['lls'], {}), '(lls)\n', (9972, 9977), True, 'import numpy as np\n'), ((10189, 10219), 'numpy.linspace', 'np.linspace', (['low', 'high', 'nsteps'], {}), '(low, high, nsteps)\n', (10200, 10219), True, 'import numpy as np\n'), ((10766, 10783), 'numpy.array', 'np.array', (['lls[1:]'], {}), '(lls[1:])\n', (10774, 10783), True, 'import numpy as np\n'), ((10994, 11010), 'numpy.asarray', 'np.asarray', (['covm'], {}), '(covm)\n', (11004, 11010), True, 'import numpy as np\n'), ((11073, 11086), 'numpy.copy', 'np.copy', (['sigs'], {}), '(sigs)\n', (11080, 11086), True, 'import numpy as np\n'), ((11374, 11421), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)', 'suppress': '(True)'}), '(precision=3, suppress=True)\n', (11393, 11421), True, 'import numpy as np\n'), ((11761, 11782), 'numpy.all', 'np.all', (['below'], {'axis': '(0)'}), '(below, axis=0)\n', (11767, 11782), True, 'import numpy as np\n'), ((7743, 7805), 'warnings.warn', 'warnings.warn', (['"""Failed to compute error marix"""', 'RuntimeWarning'], {}), "('Failed to compute error marix', RuntimeWarning)\n", (7756, 7805), False, 'import warnings\n'), ((7927, 7942), 'numpy.argsort', 'np.argsort', (['lls'], {}), '(lls)\n', (7937, 7942), True, 'import numpy as np\n'), ((8151, 8178), 'numpy.fabs', 'np.fabs', (['(lls[1:] - lls[:-1])'], {}), '(lls[1:] - lls[:-1])\n', (8158, 8178), True, 'import numpy as np\n'), ((8274, 8295), 'numpy.arange', 'np.arange', (['(1)', 'nsample'], {}), '(1, nsample)\n', (8283, 8295), True, 'import numpy as np\n'), ((11022, 11035), 'numpy.diag', 'np.diag', (['covm'], {}), '(covm)\n', (11029, 11035), True, 'import numpy as np\n'), ((11613, 11626), 'numpy.fabs', 'np.fabs', (['corr'], {}), '(corr)\n', (11620, 11626), True, 'import numpy as np\n'), ((10718, 10728), 'numpy.exp', 'np.exp', (['ll'], {}), '(ll)\n', (10724, 10728), True, 'import numpy as np\n')] |
import numpy as np
import math as ma
x = 1.0
y_numpy = np.sin(x)
y_math = ma.sin(x)
print('numpy:',y_numpy)
print('math: ',y_math)
| [
"numpy.sin",
"math.sin"
] | [((55, 64), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (61, 64), True, 'import numpy as np\n'), ((74, 83), 'math.sin', 'ma.sin', (['x'], {}), '(x)\n', (80, 83), True, 'import math as ma\n')] |
# Lint as: python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF metric accumulators."""
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
from typing import Any, Callable, List, Optional, Text, Tuple, Union
import numpy as np
from tensorflow_model_analysis.metrics import metric_util
from tensorflow_model_analysis.proto import config_pb2
from tensorflow_model_analysis.utils import size_estimator
class TFMetricsAccumulator(object):
"""Accumulator for TF metrics.
Attributes:
inputs: Accumulated batch of inputs. The inputs are stored in a
multi-dimensional list. The first dimension is used to index the
associated output (for single-output models this will only have one item).
The second dimension is used to store the args used by the combiner. For
example the args might be a tf.Example if feeding a model or they might be
(y_true, y_pred, example_weight) for calling update_state directly.
Batching is done on the last dimension.
weights: Accumulated weights. The weights are stored in a multi-dimensional
list where the first dimension is used to index the associated output (for
single-output models this will only have one item). The second dimension
is used to store the accumulated weights for each metric associated with
the output dimension.
size_estimator: Batch size estimator.
desired_batch_size: Desired batch size.
"""
# We really want the batch size to be adaptive like it is in
# beam.BatchElements(), but there isn't an easy way to make it so. For now
# we will limit stored inputs to a max overall byte size.
# TODO(b/73789023): Figure out how to make this batch size dynamic.
_TOTAL_INPUT_BYTE_SIZE_THRESHOLD = 16 << 20 # 16MiB
_DEFAULT_DESIRED_BATCH_SIZE = 1000
__slots__ = ['_inputs', '_weights', '_size_estimator', '_desired_batch_size']
def __init__(self,
input_counts: List[int],
metric_counts: List[int],
size_estimator_fn: Callable[[Any], int],
desired_batch_size: Optional[int] = None):
"""Initializes accumulator using a list of metric counts per output.
Args:
input_counts: Number of inputs associated with each output index.
metric_counts: Number of metrics associated with each output index.
size_estimator_fn: Function to use for estimating the size of the inputs.
desired_batch_size: FOR TESTING ONLY.
"""
# Inputs have shape (num_outputs, num_metrics, num_accumulated_inputs)
self._inputs = []
# Weights have shape (num_outputs, num_metrics)
self._weights = [] # type: List[List[Optional[np.ndarray]]]
for input_count in input_counts:
self._inputs.append(tuple([] for _ in range(input_count)))
for output_metric_count in metric_counts:
self._weights.append([None] * output_metric_count)
self._size_estimator = size_estimator.SizeEstimator(
size_threshold=self._TOTAL_INPUT_BYTE_SIZE_THRESHOLD,
size_fn=size_estimator_fn)
if desired_batch_size and desired_batch_size > 0:
self._desired_batch_size = desired_batch_size
else:
self._desired_batch_size = self._DEFAULT_DESIRED_BATCH_SIZE
def len_inputs(self) -> int:
"""Returns length of inputs."""
return len(self._inputs[0][0])
def add_input(self, output_index: int, *args):
"""Adds new inputs to the lists of input args stored at output_index."""
for i, v in enumerate(args):
self._inputs[output_index][i].append(v)
if v is not None:
self._size_estimator.update(v)
def get_inputs(self, output_index: int) -> Any:
"""Returns input args for output at given offset."""
return self._inputs[output_index]
def clear_inputs(self):
"""Clears currently stored inputs."""
for output_index in range(len(self._inputs)):
for i in range(len(self._inputs[output_index])):
del self._inputs[output_index][i][:]
self._size_estimator.clear()
def add_weights(self, output_index: int, metric_index: int,
weights: np.ndarray):
"""Adds weights for metric at given metric_index and output_index."""
cur_weights = self._weights[output_index][metric_index]
if cur_weights is None:
self._weights[output_index][metric_index] = weights
else:
self._weights[output_index][metric_index] = np.add(cur_weights, weights)
def get_weights(self, output_index: int,
metric_index: int) -> Optional[np.ndarray]:
"""Gets currently stored weights for given metric_index and output_index."""
return self._weights[output_index][metric_index]
def should_flush(self) -> bool:
"""Returns true if size estimator indicates flush is needed."""
return (self.len_inputs() >= self._desired_batch_size or
self._size_estimator.should_flush())
def get_size_estimate(self) -> int:
"""Returns size estimator associated with accumulator."""
return self._size_estimator.get_estimate()
def _numpy_array_size_fn(array: np.ndarray) -> int:
"""Size estimator for numpy arrays."""
return array.nbytes
class TFCompilableMetricsAccumulator(TFMetricsAccumulator):
"""Accumulator for compilable TF metrics.
Attributes:
inputs: Accumulated batch of inputs. The inputs are stored in a
multi-dimensional list. The first dimension is used to index the
associated output (for single-output models this will only have one item).
The second dimension is used to store the args passed to update_state
(i.e. (y_true, y_pred, example_weight)). Batching is done on the last
dimension.calling update_state directly. Batching is done on the last
dimension.
weights: Accumulated weights. The weights are stored in a multi-dimensional
list where the first dimension is used to index the associated output (for
single-output models this will only have one item). The second dimension
is used to store the accumulated weights for each metric associated with
the output dimension.
pad: True if padding needed.
last_dim: Max size of the last dimension of labels or predictions (used with
padding).
size_estimator: Batch size estimator.
desired_batch_size: Desired batch size.
"""
__slots__ = [
'_inputs', '_weights', '_pad', '_pad_to_dim', '_label_padding',
'_prediction_padding', '_size_estimator', '_desired_batch_size'
]
def __init__(self,
padding_options: Optional[config_pb2.PaddingOptions],
metric_counts: List[int],
desired_batch_size: Optional[int] = None):
"""Initializes accumulator using a list of metric counts per output."""
super(TFCompilableMetricsAccumulator, self).__init__(
# Input args of labels, predictions, example_weights for each output.
input_counts=[3] * len(metric_counts),
metric_counts=metric_counts,
size_estimator_fn=_numpy_array_size_fn,
desired_batch_size=desired_batch_size)
self._pad = False
if padding_options is not None:
def get_padding_value(oneof_name):
oneof = padding_options.WhichOneof(oneof_name)
return None if oneof is None else getattr(padding_options, oneof)
self._pad = True
self._label_padding = get_padding_value('label_padding')
self._prediction_padding = get_padding_value('prediction_padding')
self._pad_to_dim = 0
def add_input(self, output_index: int, label: np.ndarray,
prediction: np.ndarray, example_weight: np.ndarray):
"""Adds label, prediction, and example weight to output_index."""
super(TFCompilableMetricsAccumulator,
self).add_input(output_index, label, prediction, example_weight)
if self._pad:
self._pad_to_dim = max(self._pad_to_dim, label.shape[-1],
prediction.shape[-1])
def get_inputs(
self, output_index: int) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Returns labels, predictions, and weights for output at given offset."""
labels, preds, example_weights = super(TFCompilableMetricsAccumulator,
self).get_inputs(output_index)
if self._pad:
def pad_value(
name: Text, a: np.ndarray,
configured_value: Optional[Union[float, int]]) -> Union[int, float]:
if configured_value is None:
return 0 if a.dtype.kind == 'i' else .0
if isinstance(configured_value, int) and a.dtype.kind == 'i':
return configured_value
if isinstance(configured_value, float) and a.dtype.kind == 'f':
return configured_value
raise ValueError('%s padding is configured to be %s but data is %s' %
(name, type(configured_value), a.dtype))
labels = [
metric_util.pad(l, self._pad_to_dim,
pad_value('label', l, self._label_padding))
for l in labels
]
preds = [
metric_util.pad(p, self._pad_to_dim,
pad_value('prediction', p, self._prediction_padding))
for p in preds
]
return (np.array(labels), np.array(preds), np.array(example_weights))
def clear_inputs(self):
"""Clears currently stored inputs."""
super(TFCompilableMetricsAccumulator, self).clear_inputs()
self._pad_to_dim = 0
| [
"numpy.add",
"numpy.array",
"tensorflow_model_analysis.utils.size_estimator.SizeEstimator"
] | [((3520, 3634), 'tensorflow_model_analysis.utils.size_estimator.SizeEstimator', 'size_estimator.SizeEstimator', ([], {'size_threshold': 'self._TOTAL_INPUT_BYTE_SIZE_THRESHOLD', 'size_fn': 'size_estimator_fn'}), '(size_threshold=self.\n _TOTAL_INPUT_BYTE_SIZE_THRESHOLD, size_fn=size_estimator_fn)\n', (3548, 3634), False, 'from tensorflow_model_analysis.utils import size_estimator\n'), ((4982, 5010), 'numpy.add', 'np.add', (['cur_weights', 'weights'], {}), '(cur_weights, weights)\n', (4988, 5010), True, 'import numpy as np\n'), ((9771, 9787), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (9779, 9787), True, 'import numpy as np\n'), ((9789, 9804), 'numpy.array', 'np.array', (['preds'], {}), '(preds)\n', (9797, 9804), True, 'import numpy as np\n'), ((9806, 9831), 'numpy.array', 'np.array', (['example_weights'], {}), '(example_weights)\n', (9814, 9831), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import argparse
from time import gmtime, strftime
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from tqdm import tqdm
import numpy as np
from utils import *
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def run_validation(model, valid_dataloader):
model.eval()
loss_func = nn.CrossEntropyLoss()
acc_list, loss_list = [], []
with torch.no_grad():
for i, (inputs, labels) in enumerate(tqdm(valid_dataloader)):
inputs, labels = inputs.float().to(device), labels.to(device)
preds= model(inputs)
pred_idx = preds.max(1).indices
acc = (pred_idx == labels).sum().item() / labels.size(0)
acc_list.append(acc)
loss = loss_func(preds, labels).item()
loss_list.append(loss)
valid_loss = np.array(loss_list).mean()
valid_acc = np.array(acc_list).mean()
return valid_loss, valid_acc
def run_pretrain(args):
print(args)
torch.set_num_threads(args.n_workers)
model_type = 'mobilenet_v2_torchhub'
pretrained = True # load imagenet weight
experiment_dir = 'pretrained_{}'.format(model_type) if args.experiment_dir is None else args.experiment_dir
os.mkdir(experiment_dir)
checkpoint = None
input_size = 224
n_classes = 120
log = open(experiment_dir + '/pretrain.log', 'w')
model = create_model(model_type=model_type, pretrained=pretrained, n_classes=n_classes,
input_size=input_size, checkpoint=checkpoint)
model = model.to(device)
print(model)
# count_flops(model, device=device)
train_dataset = TrainDataset('./data/stanford-dogs/Processed/train')
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
valid_dataset = EvalDataset('./data/stanford-dogs/Processed/valid')
valid_dataloader = DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=args.weight_decay)
best_valid_acc = 0.0
for epoch in range(args.n_epochs):
print('Start training epoch {}'.format(epoch))
loss_list = []
# train
model.train()
for i, (inputs, labels) in enumerate(tqdm(train_dataloader)):
optimizer.zero_grad()
inputs, labels = inputs.float().to(device), labels.to(device)
preds = model(inputs)
loss = criterion(preds, labels)
loss_list.append(loss.item())
loss.backward()
optimizer.step()
# validation
valid_loss, valid_acc = run_validation(model, valid_dataloader)
train_loss = np.array(loss_list).mean()
print('Epoch {}: train loss {:.4f}, valid loss {:.4f}, valid acc {:.4f}'.format
(epoch, train_loss, valid_loss, valid_acc))
log.write('Epoch {}: train loss {:.4f}, valid loss {:.4f}, valid acc {:.4f}\n'.format
(epoch, train_loss, valid_loss, valid_acc))
# save
if valid_acc > best_valid_acc:
best_valid_acc = valid_acc
torch.save(model.state_dict(), experiment_dir + '/checkpoint_best.pt')
log.close()
def parse_args():
parser = argparse.ArgumentParser(description='Example code for pruning MobileNetV2')
parser.add_argument('--experiment_dir', type=str, default=None,
help='directory containing the pretrained model')
parser.add_argument('--checkpoint_name', type=str, default='checkpoint_best.pt',
help='checkpoint of the pretrained model')
# finetuning parameters
parser.add_argument('--n_workers', type=int, default=16,
help='number of threads')
parser.add_argument('--n_epochs', type=int, default=180,
help='number of epochs to train the model')
parser.add_argument('--learning_rate', type=float, default=1e-4)
parser.add_argument('--weight_decay', type=float, default=0.0)
parser.add_argument('--batch_size', type=int, default=32,
help='input batch size for training and inference')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
run_pretrain(args)
| [
"os.mkdir",
"tqdm.tqdm",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"torch.nn.CrossEntropyLoss",
"torch.set_num_threads",
"torch.cuda.is_available",
"numpy.array",
"torch.no_grad"
] | [((427, 448), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (446, 448), True, 'import torch.nn as nn\n'), ((1088, 1125), 'torch.set_num_threads', 'torch.set_num_threads', (['args.n_workers'], {}), '(args.n_workers)\n', (1109, 1125), False, 'import torch\n'), ((1357, 1381), 'os.mkdir', 'os.mkdir', (['experiment_dir'], {}), '(experiment_dir)\n', (1365, 1381), False, 'import os\n'), ((1855, 1922), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(train_dataset, batch_size=args.batch_size, shuffle=True)\n', (1865, 1922), False, 'from torch.utils.data import DataLoader\n'), ((2018, 2086), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)'}), '(valid_dataset, batch_size=args.batch_size, shuffle=False)\n', (2028, 2086), False, 'from torch.utils.data import DataLoader\n'), ((2104, 2125), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2123, 2125), True, 'import torch.nn as nn\n'), ((3475, 3550), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Example code for pruning MobileNetV2"""'}), "(description='Example code for pruning MobileNetV2')\n", (3498, 3550), False, 'import argparse\n'), ((305, 330), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (328, 330), False, 'import torch\n'), ((491, 506), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (504, 506), False, 'import torch\n'), ((553, 575), 'tqdm.tqdm', 'tqdm', (['valid_dataloader'], {}), '(valid_dataloader)\n', (557, 575), False, 'from tqdm import tqdm\n'), ((935, 954), 'numpy.array', 'np.array', (['loss_list'], {}), '(loss_list)\n', (943, 954), True, 'import numpy as np\n'), ((978, 996), 'numpy.array', 'np.array', (['acc_list'], {}), '(acc_list)\n', (986, 996), True, 'import numpy as np\n'), ((2474, 2496), 'tqdm.tqdm', 'tqdm', (['train_dataloader'], {}), '(train_dataloader)\n', (2478, 2496), False, 'from tqdm import tqdm\n'), ((2911, 2930), 'numpy.array', 'np.array', (['loss_list'], {}), '(loss_list)\n', (2919, 2930), True, 'import numpy as np\n')] |
import unittest
import numpy
import cupy
from cupy import cuda
from cupy import testing
import cupyx.scipy.linalg
if cupyx.scipy._scipy_available:
import scipy.linalg
@testing.gpu
@testing.parameterize(*testing.product({
'shape': [(1, 1), (2, 2), (3, 3), (5, 5)],
}))
@testing.fix_random()
@unittest.skipUnless(
cuda.cusolver_enabled, 'Only cusolver in CUDA 8.0 is supported')
@testing.with_requires('scipy')
class TestLUFactor(unittest.TestCase):
@testing.for_float_dtypes(no_float16=True)
def test_lu_factor(self, dtype):
array = numpy.random.randn(*self.shape)
a_cpu = numpy.asarray(array, dtype=dtype)
a_gpu = cupy.asarray(array, dtype=dtype)
result_cpu = scipy.linalg.lu_factor(a_cpu)
result_gpu = cupyx.scipy.linalg.lu_factor(a_gpu)
self.assertEqual(len(result_cpu), len(result_gpu))
self.assertEqual(result_cpu[0].dtype, result_gpu[0].dtype)
self.assertEqual(result_cpu[1].dtype, result_gpu[1].dtype)
cupy.testing.assert_allclose(result_cpu[0], result_gpu[0], atol=1e-5)
cupy.testing.assert_array_equal(result_cpu[1], result_gpu[1])
@testing.gpu
@testing.parameterize(*testing.product({
'trans': [0, 1, 2],
'shapes': [((4, 4), (4,)), ((5, 5), (5, 2))],
}))
@testing.fix_random()
@unittest.skipUnless(
cuda.cusolver_enabled, 'Only cusolver in CUDA 8.0 is supported')
@testing.with_requires('scipy')
class TestLUSolve(unittest.TestCase):
@testing.for_float_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def test_lu_solve(self, xp, scp, dtype):
a_shape, b_shape = self.shapes
A = testing.shaped_random(a_shape, xp, dtype=dtype)
b = testing.shaped_random(b_shape, xp, dtype=dtype)
lu = scp.linalg.lu_factor(A)
return scp.linalg.lu_solve(lu, b, trans=self.trans)
| [
"cupy.testing.fix_random",
"cupy.asarray",
"numpy.random.randn",
"numpy.asarray",
"cupy.testing.assert_array_equal",
"unittest.skipUnless",
"cupy.testing.with_requires",
"cupy.testing.numpy_cupy_allclose",
"cupy.testing.product",
"cupy.testing.shaped_random",
"cupy.testing.for_float_dtypes",
"... | [((280, 300), 'cupy.testing.fix_random', 'testing.fix_random', ([], {}), '()\n', (298, 300), False, 'from cupy import testing\n'), ((302, 390), 'unittest.skipUnless', 'unittest.skipUnless', (['cuda.cusolver_enabled', '"""Only cusolver in CUDA 8.0 is supported"""'], {}), "(cuda.cusolver_enabled,\n 'Only cusolver in CUDA 8.0 is supported')\n", (321, 390), False, 'import unittest\n'), ((393, 423), 'cupy.testing.with_requires', 'testing.with_requires', (['"""scipy"""'], {}), "('scipy')\n", (414, 423), False, 'from cupy import testing\n'), ((1279, 1299), 'cupy.testing.fix_random', 'testing.fix_random', ([], {}), '()\n', (1297, 1299), False, 'from cupy import testing\n'), ((1301, 1389), 'unittest.skipUnless', 'unittest.skipUnless', (['cuda.cusolver_enabled', '"""Only cusolver in CUDA 8.0 is supported"""'], {}), "(cuda.cusolver_enabled,\n 'Only cusolver in CUDA 8.0 is supported')\n", (1320, 1389), False, 'import unittest\n'), ((1392, 1422), 'cupy.testing.with_requires', 'testing.with_requires', (['"""scipy"""'], {}), "('scipy')\n", (1413, 1422), False, 'from cupy import testing\n'), ((469, 510), 'cupy.testing.for_float_dtypes', 'testing.for_float_dtypes', ([], {'no_float16': '(True)'}), '(no_float16=True)\n', (493, 510), False, 'from cupy import testing\n'), ((1467, 1508), 'cupy.testing.for_float_dtypes', 'testing.for_float_dtypes', ([], {'no_float16': '(True)'}), '(no_float16=True)\n', (1491, 1508), False, 'from cupy import testing\n'), ((1514, 1571), 'cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'atol': '(1e-05)', 'scipy_name': '"""scp"""'}), "(atol=1e-05, scipy_name='scp')\n", (1541, 1571), False, 'from cupy import testing\n'), ((564, 595), 'numpy.random.randn', 'numpy.random.randn', (['*self.shape'], {}), '(*self.shape)\n', (582, 595), False, 'import numpy\n'), ((612, 645), 'numpy.asarray', 'numpy.asarray', (['array'], {'dtype': 'dtype'}), '(array, dtype=dtype)\n', (625, 645), False, 'import numpy\n'), ((662, 694), 'cupy.asarray', 'cupy.asarray', (['array'], {'dtype': 'dtype'}), '(array, dtype=dtype)\n', (674, 694), False, 'import cupy\n'), ((1004, 1074), 'cupy.testing.assert_allclose', 'cupy.testing.assert_allclose', (['result_cpu[0]', 'result_gpu[0]'], {'atol': '(1e-05)'}), '(result_cpu[0], result_gpu[0], atol=1e-05)\n', (1032, 1074), False, 'import cupy\n'), ((1082, 1143), 'cupy.testing.assert_array_equal', 'cupy.testing.assert_array_equal', (['result_cpu[1]', 'result_gpu[1]'], {}), '(result_cpu[1], result_gpu[1])\n', (1113, 1143), False, 'import cupy\n'), ((210, 270), 'cupy.testing.product', 'testing.product', (["{'shape': [(1, 1), (2, 2), (3, 3), (5, 5)]}"], {}), "({'shape': [(1, 1), (2, 2), (3, 3), (5, 5)]})\n", (225, 270), False, 'from cupy import testing\n'), ((1667, 1714), 'cupy.testing.shaped_random', 'testing.shaped_random', (['a_shape', 'xp'], {'dtype': 'dtype'}), '(a_shape, xp, dtype=dtype)\n', (1688, 1714), False, 'from cupy import testing\n'), ((1727, 1774), 'cupy.testing.shaped_random', 'testing.shaped_random', (['b_shape', 'xp'], {'dtype': 'dtype'}), '(b_shape, xp, dtype=dtype)\n', (1748, 1774), False, 'from cupy import testing\n'), ((1182, 1269), 'cupy.testing.product', 'testing.product', (["{'trans': [0, 1, 2], 'shapes': [((4, 4), (4,)), ((5, 5), (5, 2))]}"], {}), "({'trans': [0, 1, 2], 'shapes': [((4, 4), (4,)), ((5, 5), (5,\n 2))]})\n", (1197, 1269), False, 'from cupy import testing\n')] |
from math import sqrt
import matplotlib.pyplot as plt
import numpy as np
from prettytable import PrettyTable
headers = PrettyTable(['№', 'x1', 'x2', 'function(x1,x2)', 'dgrad'])
def table(count, x1, x2, delta_grad, function):
Tablelist = {
'№': count,
'x1': round(x1, 7),
'x2': round(x2, 7),
'function(x1,x2)': round(function, 8),
'dgrad': round(delta_grad, 8),
}
headers.add_row(Tablelist.values())
def output(x1, x2, count, eps_grad):
return f'Число шагов = {count}\nx1 = {x1}, x2 = {x2}\nfunction(x1,x2) = {function(x1, x2)}\neps_grad = {eps_grad}'
def function(x1, x2):
return 10 * x1 * x1 + 2 * x2 * x2 - 2 * x1 - 2 * x2 + 1 - 4 * x1 * x2
#return 22 * x1 + 0.1 * x2 + np.exp(4.84 * x1 * x1 + 1.2 * x2 * x2)
def grad_function(x1, x2, delta):
def derivative(x1, x2, delta_x1, delta_x2):
der = (function(x1 + delta_x1, x2 + delta_x2) - function(x1 - delta_x1, x2 - delta_x2)) / (
2 * delta)
return der
gradient = ([-1 * derivative(x1, x2, delta, 0), -1 * derivative(x1, x2, 0, delta)])
return gradient
def delta_grad(gradient):
d_g = sqrt(gradient[0] ** 2 + gradient[1] ** 2)
return d_g
def ort(grad0, grad1):
ortog = grad0[0] * grad1[0] + grad0[1] * grad1[1]
return ortog
def gss(a, b, gradient, x1, x2, eps, s):
interval = (b - a)
a1 = a + interval * (1 - s)
b1 = a + interval * s
fa1 = function(x1 + a1 * gradient[0], x2 + a1 * gradient[1])
fb1 = function(x1 + b1 * gradient[0], x2 + b1 * gradient[1])
while abs(interval) >= eps:
if fa1 <= fb1: # <= - минимум, >= - максимум
b = b1
b1 = a1
fb1 = fa1
interval = interval * s
a1 = a + interval * (1 - s)
fa1 = function(x1 + a1 * gradient[0], x2 + a1 * gradient[1])
else:
a = a1
a1 = b1
fa1 = fb1
interval = interval * s
b1 = a + interval * s
fb1 = function(x1 + b1 * gradient[0], x2 + b1 * gradient[1])
L = (a + b) / 2
return L
def grad_move(x10, x20, lam, gradient):
x1 = (x10 + lam * gradient[0])
x2 = (x20 + lam * gradient[1])
old_gradient = gradient
gradient = grad_function(x1, x2, delta)
new_function = function(x1, x2)
return [x1, x2, old_gradient, gradient, new_function]
def asc(x1, x2, delta):
count = 0
points_x = [x1]
points_y = [x2]
func = [function(x1, x2)]
delta_gradient = 1
eps_grad = 0.02
a, b = 0, 1
eps = (1 - a) / 100000
while delta_gradient > eps_grad:
count += 1
gradient = grad_function(x1, x2, delta)
lam = gss(a, b, gradient, x1, x2, eps, s)
(x1, x2, old_gradient,
gradient, new_function) = grad_move(x1, x2, lam, gradient)
nf = function(x1, x2)
delta_gradient = delta_grad(gradient)
check = ort(old_gradient, gradient)
eps_ort = sqrt(ort(gradient, gradient)) / 1000
points_x.append(x1)
points_y.append(x2)
func.append(nf)
if abs(check) >= abs(eps_ort):
delta /= 10
table(count, x1, x2, delta_gradient, function(x1, x2))
return output(x1, x2, count, eps_grad), [points_x, points_y], func
if __name__ == '__main__':
s = ((sqrt(5) - 1) / 2)
x1 = 1
x2 = 1
delta = 0.000001
info, points_coord, coord_func = asc(x1, x2, delta)
print(headers)
print(info)
x_axis = y_axis = np.arange(0, 2, 0.001)
X, Y = np.meshgrid(x_axis, y_axis)
Zs = np.array(function(np.ravel(X), np.ravel(Y)))
Z = Zs.reshape(X.shape)
sorted_coord_func = sorted(coord_func)
cs = plt.contour(X, Y, Z, levels=sorted_coord_func)
plt.clabel(cs)
plt.xlabel('x1')
plt.ylabel('x2')
plt.plot(points_coord[0], points_coord[1])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, Z, zorder=2)
ax.plot(points_coord[0], points_coord[1], coord_func, color='red',
zorder=1)
plt.show()
| [
"matplotlib.pyplot.clabel",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"math.sqrt",
"matplotlib.pyplot.plot",
"numpy.ravel",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.contour",
"numpy.arange",
"prettytable.PrettyTable",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((121, 179), 'prettytable.PrettyTable', 'PrettyTable', (["['№', 'x1', 'x2', 'function(x1,x2)', 'dgrad']"], {}), "(['№', 'x1', 'x2', 'function(x1,x2)', 'dgrad'])\n", (132, 179), False, 'from prettytable import PrettyTable\n'), ((1162, 1203), 'math.sqrt', 'sqrt', (['(gradient[0] ** 2 + gradient[1] ** 2)'], {}), '(gradient[0] ** 2 + gradient[1] ** 2)\n', (1166, 1203), False, 'from math import sqrt\n'), ((3514, 3536), 'numpy.arange', 'np.arange', (['(0)', '(2)', '(0.001)'], {}), '(0, 2, 0.001)\n', (3523, 3536), True, 'import numpy as np\n'), ((3548, 3575), 'numpy.meshgrid', 'np.meshgrid', (['x_axis', 'y_axis'], {}), '(x_axis, y_axis)\n', (3559, 3575), True, 'import numpy as np\n'), ((3710, 3756), 'matplotlib.pyplot.contour', 'plt.contour', (['X', 'Y', 'Z'], {'levels': 'sorted_coord_func'}), '(X, Y, Z, levels=sorted_coord_func)\n', (3721, 3756), True, 'import matplotlib.pyplot as plt\n'), ((3761, 3775), 'matplotlib.pyplot.clabel', 'plt.clabel', (['cs'], {}), '(cs)\n', (3771, 3775), True, 'import matplotlib.pyplot as plt\n'), ((3780, 3796), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x1"""'], {}), "('x1')\n", (3790, 3796), True, 'import matplotlib.pyplot as plt\n'), ((3801, 3817), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""x2"""'], {}), "('x2')\n", (3811, 3817), True, 'import matplotlib.pyplot as plt\n'), ((3822, 3864), 'matplotlib.pyplot.plot', 'plt.plot', (['points_coord[0]', 'points_coord[1]'], {}), '(points_coord[0], points_coord[1])\n', (3830, 3864), True, 'import matplotlib.pyplot as plt\n'), ((3875, 3887), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3885, 3887), True, 'import matplotlib.pyplot as plt\n'), ((4071, 4081), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4079, 4081), True, 'import matplotlib.pyplot as plt\n'), ((3338, 3345), 'math.sqrt', 'sqrt', (['(5)'], {}), '(5)\n', (3342, 3345), False, 'from math import sqrt\n'), ((3603, 3614), 'numpy.ravel', 'np.ravel', (['X'], {}), '(X)\n', (3611, 3614), True, 'import numpy as np\n'), ((3616, 3627), 'numpy.ravel', 'np.ravel', (['Y'], {}), '(Y)\n', (3624, 3627), True, 'import numpy as np\n')] |
"""
DSACALIB/UTILS.PY
<NAME>, <EMAIL>, 10/2019
Modified for python3 from DSA-10 routines written by <NAME>, <NAME>.
Routines to interact w/ fits visibilities recorded by DSA-10, hdf5 visibilities
recorded by DSA-110, and visibility in CASA measurement sets.
"""
# TODO: Update source class
# Always import scipy before importing casatools.
import traceback
from scipy.ndimage.filters import median_filter
import numpy as np
from antpos.utils import get_itrf
import astropy.units as u
from astropy.coordinates import Angle
from dsacalib import constants as ct
import casatools as cc
def exception_logger(logger, task, exception, throw):
"""Logs exception traceback to syslog using the dsa_syslog module.
Parameters
----------
logger : dsa_syslog.DsaSyslogger() instance
The logger used for within the reduction pipeline.
task : str
A short description of where in the pipeline the error occured.
exception : Exception
The exception that occured.
throw : boolean
If set to True, the exception is raised after the traceback is written
to syslogs.
"""
error_string = 'During {0}, {1} occurred:\n{2}'.format(
task, type(exception).__name__, ''.join(
traceback.format_tb(exception.__traceback__)
)
)
if logger is not None:
logger.error(error_string)
else:
print(error_string)
if throw:
raise exception
class src():
"""Simple class for holding source parameters.
"""
def __init__(self, name, ra, dec, I=1., epoch='J2000', pa=None,
maj_axis=None, min_axis=None):
"""Initializes the src class.
Parameters
----------
name : str
Identifier for the source.
ra : str
The right ascension of the source. e.g. "12h00m19.21s".Astropy
quantity also accepted.
dec : str
The declination of the source. e.g. "+73d00m45.7s". Astropy
quantity also accepted.
I : float
The flux of the source in Jy. Defaults 1.
epoch : str
The epoch of `ra` and `dec`. Defaults "J2000".
pa : float
The position angle in degrees. Defaults ``None``.
maj_axis : float
The major axis in arcseconds. Defaults ``None``.
min_axis : float
The minor axis in arcseconds. Defaults ``None``.
"""
self.name = name
self.I = I
assert epoch == 'J2000'
self.epoch = 'J2000'
if isinstance(ra, str):
ra = to_deg(ra)
if isinstance(dec, str):
dec = to_deg(dec)
self.ra = ra
self.dec = dec
self.direction = direction(
'J2000',
ra.to_value(u.rad),
dec.to_value(u.rad)
)
self.pa = pa
if maj_axis is None:
self.maj_axis = None
else:
self.maj_axis = maj_axis*u.arcsecond
if min_axis is None:
self.min_axis = None
else:
self.min_axis = min_axis*u.arcsecond
def to_deg(string):
"""Converts a string representation of RA or DEC to degrees.
Parameters
----------
string : str
RA or DEC in string format e.g. "12h00m19.21s" or "+73d00m45.7s".
Returns
-------
deg : astropy quantity
The angle in degrees.
"""
return Angle(string).to(u.deg)
def get_autobl_indices(nant, casa=False):
"""Returns a list of the indices containing the autocorrelations.
Can return the index for either correlator-ordered visibilities (`casa` set
to ``False``) or CASA-ordered visibilities (`casa` set to ``True``).
Parameters
----------
nant : int
The number of antennas in the visibility set.
casa : boolean
Whether the visibilities follow CASA ordering standards (`casa` set to
``True``) or DSA-10/DSA-110 correlator ordering standards (`casa` set
to ``False``). Defaults to ``False``, or correlator ordering standards.
Returns
-------
auto_bls : list
The list of indices in the visibilities corresponding to
autocorrelations.
"""
auto_bls = []
i = -1
for j in range(1, nant+1):
i += j
auto_bls += [i]
if casa:
nbls = (nant*(nant+1))//2
auto_bls = [(nbls-1)-aidx for aidx in auto_bls]
auto_bls = auto_bls[::-1]
return auto_bls
def get_antpos_itrf(antpos):
"""Reads and orders antenna positions from a text or csv file.
Parameters
----------
antpos : str
The path to the text or csv file containing the antenna positions.
Returns
-------
anum : list(int)
The antenna numbers, in numerical order.
xx, yy, zz : list(float)
The ITRF coordinates of the antennas, in meters.
"""
if antpos[-4:] == '.txt':
anum, xx, yy, zz = np.loadtxt(antpos).transpose()
anum = anum.astype(int)+1
anum, xx, yy, zz = zip(*sorted(zip(anum, xx, yy, zz)))
elif antpos[-4:] == '.csv':
df = get_itrf(antpos)
anum = np.array(df.index)
xx = np.array(df[['dx_m']])
yy = np.array(df[['dy_m']])
zz = np.array(df[['dz_m']])
return anum, xx, yy, zz
def mask_bad_bins(vis, axis, thresh=6.0, medfilt=False, nmed=129):
"""Masks bad channels or time bins in visibility data.
Parameters
----------
vis : ndarray
The visibility array, with dimensions (baselines, time, frequency,
polarization)
axis : int
The axis to flag along. `axis` set to 1 will flag bad time bins. `axis`
set to 2 will flag bad frequency bins.
thresh : float
The threshold above which to flag data. Anything that deviates from the
median by more than `thresh` multiplied by the standard deviation is
flagged.
medfilt : Boolean
Whether to median filter to remove an average trend. If ``True``, will
median filter. If ``False``, will subtract the median for the
baseline/pol pair.
nmed : int
The size of the median filter to use. Only used in medfilt is ``True``.
Must be an odd integer.
Returns
-------
good_bins : ndarray
Has a value of 1 where the bin is good, and 0 where the bin should be
flagged. If `axis` is 2, the dimensions are (baselines, 1, frequency,
polarization). If `axis` is 1, the dimensions are (baselines, time, 1,
polarization).
fraction_flagged : ndarray
The fraction of data flagged for each baseline/polarization pair.
Dimensions (baselines, polarization).
"""
# TODO: Update medfilt to use the correct axis
assert not medfilt
assert axis in (1, 2)
avg_axis = 1 if axis == 2 else 2
# Average over time (or frequency) first.
vis_avg = np.abs(np.mean(vis, axis=avg_axis, keepdims=True))
# Median filter over frequency (or time) and remove the median trend or
# remove the median.
if medfilt:
vis_avg_mf = median_filter(vis_avg.real, size=(1, nmed, 1))
vis_avg -= vis_avg_mf
else:
vis_avg -= np.median(vis_avg, axis=1, keepdims=True)
# Calculate the standard deviation along the frequency (or time) axis.
vis_std = np.std(vis_avg, axis=1, keepdims=True)
# Get good channels.
good_bins = np.abs(vis_avg) < thresh*vis_std
fraction_flagged = (
1-good_bins.sum(axis=axis)/good_bins.shape[axis]
).squeeze()
return good_bins, fraction_flagged
def mask_bad_pixels(vis, thresh=6.0, mask=None):
r"""Masks pixels with values above a SNR threshold within each visibility.
Parameters
----------
vis : ndarray
The complex visibilities. Dimensions (baseline, time, frequency,
polarization).
thresh : float
The threshold above which to flag data. Data above `thresh`\*the
standard deviation in each channel of each visiblity is flagged.
Defaults 6.
mask : ndarray
A mask for data that is already flagged. Should be 0 where data has
been flagged, 1 otherwise. Same dimensions as `vis`. Data previously
flagged is not used in the calculation of the channel standard
deviations.
Returns
-------
good_pixels : ndarray
Whether a given pixel in `vis` is good (1 or ``True``) or bad (i.e.
above the threshold: 0 or ``False``). Same dimensions as ``vis``.
fraction_flagged : array
The ratio of the flagged data to the total number of pixels for each
baseline/polarization.
"""
(nbls, nt, nf, npol) = vis.shape
vis = np.abs(vis.reshape(nbls, -1, npol))
vis = vis-np.median(vis, axis=1, keepdims=True)
if mask is not None:
vis = vis*mask.reshape(nbls, -1, npol)
std = np.std(np.abs(vis), axis=1, keepdims=True)
good_pixels = np.abs(vis) < thresh*std
fraction_flagged = 1 - good_pixels.sum(1)/good_pixels.shape[1]
good_pixels = good_pixels.reshape(nbls, nt, nf, npol)
return good_pixels, fraction_flagged
def daz_dha(dec, daz=None, dha=None, lat=ct.OVRO_LAT):
"""Converts an offset between azimuth and hour angle.
Assumes that the offset in azimuth or hour angle from an azimuth of pi or
hour angle of 0 is small. One of `daz` or `dha` must be provided, the
other is calculated.
Parameters
----------
dec : float
The pointing declination of the antenna in radians.
daz : float
The azimuth offset in radians. ``None`` may also be passed, in which
case the azimuth offset is calculated and returned. Defaults to
``None``.
dha : float
The hour angle offset in radians. ``None`` may also be passed, in which
case the hour angle offset is calculated and returned. Defaults to
``None``.
lat : float
The latitude of the antenna in radians. Defaults to the value of
``ovro_lat`` defined in ``dsacalib.constants``.
Returns
-------
float
The converted offset. If the value of `daz` passed was not ``None``,
this is the hour angle offset corresponding to the azimuth offset
`daz`. If the value of `dha` passed was not ``None``, this is the
azimuth offset corresonding to the hour angle offset `dha`.
Raises
------
RuntimeError
If neither `daz or `dha` is defined.
"""
factor = -1*(np.sin(lat)-np.tan(dec)*np.cos(lat))
if daz is not None:
assert dha is None, "daz and dha cannot both be defined."
ans = daz*factor
elif dha is not None:
ans = dha/factor
else:
raise RuntimeError('One of daz or dha must be defined')
return ans
class direction():
"""Class for holding sky coordinates and converting between ICRS and FK5.
Parameters
----------
epoch : str
'J2000' (for ICRS or J2000 coordinates) or 'HADEC' (for FK5 coordinates
at an equinox of obstime)
lon : float
The longitude (right ascension or hour angle) in radians
lat : float
The latitude (declination) in radians
obstime : float
The observation time in mjd.
observatory : str
The name of the observatory
"""
def __init__(self, epoch, lon, lat, obstime=None, observatory='OVRO_MMA'):
assert epoch in ['J2000', 'HADEC']
if epoch == 'HADEC':
assert obstime is not None
self.epoch = epoch
self.lon = lon
self.lat = lat
self.obstime = obstime
self.observatory = observatory
def J2000(self, obstime=None, observatory=None):
"""Provides direction in J2000 coordinates.
Parameters
----------
obstime : float
Time of observation in mjd.
location : str
Name of the observatory.
Returns
-------
tuple
ra, dec at J2000 in units of radians.
"""
if self.epoch == 'J2000':
return self.lon, self.lat
assert self.epoch == 'HADEC'
if obstime is None:
assert self.obstime is not None
obstime = self.obstime
if observatory is None:
assert self.observatory is not None
observatory = self.observatory
me = cc.measures()
epoch = me.epoch(
'UTC',
'{0}d'.format(obstime)
)
location = me.observatory(observatory)
source = me.direction(
'HADEC',
'{0}rad'.format(self.lon),
'{0}rad'.format(self.lat)
)
me.doframe(epoch)
me.doframe(location)
output = me.measure(source, 'J2000')
assert output['m0']['unit'] == 'rad'
assert output['m1']['unit'] == 'rad'
return output['m0']['value'], output['m1']['value']
def hadec(self, obstime=None, observatory=None):
"""Provides direction in HADEC (FK5) at `obstime`.
Parameters
----------
obstime : float
Time of observation in mjd.
location : str
Name of the observatory.
Returns
-------
tuple
ha, dec at obstime in units of radians.
"""
if self.epoch == 'HADEC':
assert obstime is None
return self.lon, self.lat
assert self.epoch == 'J2000'
if obstime is None:
assert self.obstime is not None
obstime = self.obstime
if observatory is None:
assert self.observatory is not None
observatory = self.observatory
me = cc.measures()
epoch = me.epoch(
'UTC',
'{0}d'.format(obstime)
)
location = me.observatory(observatory)
source = me.direction(
'J2000',
'{0}rad'.format(self.lon),
'{0}rad'.format(self.lat)
)
me.doframe(epoch)
me.doframe(location)
output = me.measure(source, 'HADEC')
assert output['m0']['unit'] == 'rad'
assert output['m1']['unit'] == 'rad'
return output['m0']['value'], output['m1']['value']
| [
"numpy.abs",
"casatools.measures",
"numpy.std",
"numpy.median",
"traceback.format_tb",
"numpy.mean",
"antpos.utils.get_itrf",
"numpy.array",
"numpy.sin",
"numpy.loadtxt",
"numpy.tan",
"numpy.cos",
"astropy.coordinates.Angle",
"scipy.ndimage.filters.median_filter"
] | [((7337, 7375), 'numpy.std', 'np.std', (['vis_avg'], {'axis': '(1)', 'keepdims': '(True)'}), '(vis_avg, axis=1, keepdims=True)\n', (7343, 7375), True, 'import numpy as np\n'), ((6918, 6960), 'numpy.mean', 'np.mean', (['vis'], {'axis': 'avg_axis', 'keepdims': '(True)'}), '(vis, axis=avg_axis, keepdims=True)\n', (6925, 6960), True, 'import numpy as np\n'), ((7100, 7146), 'scipy.ndimage.filters.median_filter', 'median_filter', (['vis_avg.real'], {'size': '(1, nmed, 1)'}), '(vis_avg.real, size=(1, nmed, 1))\n', (7113, 7146), False, 'from scipy.ndimage.filters import median_filter\n'), ((7206, 7247), 'numpy.median', 'np.median', (['vis_avg'], {'axis': '(1)', 'keepdims': '(True)'}), '(vis_avg, axis=1, keepdims=True)\n', (7215, 7247), True, 'import numpy as np\n'), ((7417, 7432), 'numpy.abs', 'np.abs', (['vis_avg'], {}), '(vis_avg)\n', (7423, 7432), True, 'import numpy as np\n'), ((8753, 8790), 'numpy.median', 'np.median', (['vis'], {'axis': '(1)', 'keepdims': '(True)'}), '(vis, axis=1, keepdims=True)\n', (8762, 8790), True, 'import numpy as np\n'), ((8880, 8891), 'numpy.abs', 'np.abs', (['vis'], {}), '(vis)\n', (8886, 8891), True, 'import numpy as np\n'), ((8934, 8945), 'numpy.abs', 'np.abs', (['vis'], {}), '(vis)\n', (8940, 8945), True, 'import numpy as np\n'), ((12377, 12390), 'casatools.measures', 'cc.measures', ([], {}), '()\n', (12388, 12390), True, 'import casatools as cc\n'), ((13693, 13706), 'casatools.measures', 'cc.measures', ([], {}), '()\n', (13704, 13706), True, 'import casatools as cc\n'), ((1248, 1292), 'traceback.format_tb', 'traceback.format_tb', (['exception.__traceback__'], {}), '(exception.__traceback__)\n', (1267, 1292), False, 'import traceback\n'), ((3429, 3442), 'astropy.coordinates.Angle', 'Angle', (['string'], {}), '(string)\n', (3434, 3442), False, 'from astropy.coordinates import Angle\n'), ((5115, 5131), 'antpos.utils.get_itrf', 'get_itrf', (['antpos'], {}), '(antpos)\n', (5123, 5131), False, 'from antpos.utils import get_itrf\n'), ((5147, 5165), 'numpy.array', 'np.array', (['df.index'], {}), '(df.index)\n', (5155, 5165), True, 'import numpy as np\n'), ((5179, 5201), 'numpy.array', 'np.array', (["df[['dx_m']]"], {}), "(df[['dx_m']])\n", (5187, 5201), True, 'import numpy as np\n'), ((5215, 5237), 'numpy.array', 'np.array', (["df[['dy_m']]"], {}), "(df[['dy_m']])\n", (5223, 5237), True, 'import numpy as np\n'), ((5251, 5273), 'numpy.array', 'np.array', (["df[['dz_m']]"], {}), "(df[['dz_m']])\n", (5259, 5273), True, 'import numpy as np\n'), ((10481, 10492), 'numpy.sin', 'np.sin', (['lat'], {}), '(lat)\n', (10487, 10492), True, 'import numpy as np\n'), ((4942, 4960), 'numpy.loadtxt', 'np.loadtxt', (['antpos'], {}), '(antpos)\n', (4952, 4960), True, 'import numpy as np\n'), ((10493, 10504), 'numpy.tan', 'np.tan', (['dec'], {}), '(dec)\n', (10499, 10504), True, 'import numpy as np\n'), ((10505, 10516), 'numpy.cos', 'np.cos', (['lat'], {}), '(lat)\n', (10511, 10516), True, 'import numpy as np\n')] |
import random
from collections import namedtuple
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import gym
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
Experience = namedtuple("Experience", field_names="state action reward next_state done")
def tensor(array):
return torch.tensor(array, dtype=torch.float)
class Critic(nn.Module):
def __init__(self, state_size, action_size):
super().__init__()
self.fc1 = nn.Linear(state_size, 256)
self.fc2 = nn.Linear(256 + action_size, 128)
self.qval = nn.Linear(128, 1)
self.init_parameters()
def init_parameters(self):
nn.init.kaiming_normal_(self.fc1.weight) # default activation is leaky relu
nn.init.kaiming_normal_(self.fc2.weight)
nn.init.xavier_normal_(self.qval.weight) # default activation is linear
def forward(self, state, action):
x = F.leaky_relu(self.fc1(state))
x = torch.cat((x, action), dim=1)
x = F.leaky_relu(self.fc2(x))
return self.qval(x).squeeze()
class Actor(nn.Module):
def __init__(self, state_size, action_size):
super().__init__()
self.fc1 = nn.Linear(state_size, 256)
self.fc2 = nn.Linear(256, 256)
self.act = nn.Linear(256, action_size)
self.init_parameters()
def init_parameters(self):
nn.init.kaiming_normal_(self.fc1.weight) # default non-linearity is leaky relu
nn.init.kaiming_normal_(self.fc2.weight)
nn.init.xavier_normal_(self.act.weight, nn.init.calculate_gain("tanh"))
def forward(self, state):
x = F.leaky_relu(self.fc1(state))
x = F.leaky_relu(self.fc2(x))
return torch.tanh(self.act(x))
class ReplayMemory:
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.index = 0
def push(self, experience):
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.index] = experience
self.index = (self.index + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
class DDPGAgent:
def __init__(self, state_size, action_size, load=False):
self.state_size = state_size
self.action_size = action_size
self.gamma = 0.99
self.polyak = 0.99
self.batch_size = 128
self.train_start = 128
self.memory = ReplayMemory(int(1e6))
self.actor = Actor(state_size, action_size)
self.actor_target = Actor(state_size, action_size)
self.actor_target.load_state_dict(self.actor.state_dict())
self.critic = Critic(state_size, action_size)
self.critic_target = Critic(state_size, action_size)
self.critic_target.load_state_dict(self.critic.state_dict())
if load:
self.load_pretrained()
self.actor_opt = optim.Adam(self.actor.parameters(), lr=1e-4)
self.critic_opt = optim.Adam(self.critic.parameters(), lr=1e-4, weight_decay=0.0001)
def push_experience(self, state, action, reward, next_state, done):
self.memory.push(Experience(state, action, reward, next_state, done))
def update_target_nets_soft(self):
# Update critic target
for target_param, param in zip(self.critic_target.parameters(), self.critic.parameters()):
target_param.data.copy_(self.polyak * target_param.data + (1 - self.polyak) * param.data)
# Update actor target
for target_param, param in zip(self.actor_target.parameters(), self.actor.parameters()):
target_param.data.copy_(self.polyak * target_param.data + (1 - self.polyak) * param.data)
def take_action(self, state, act_noise=0.2):
with torch.no_grad():
return self.actor(state) + torch.normal(torch.zeros(self.action_size), act_noise)
def optimize_model(self):
if len(self.memory) < self.train_start:
return
experiences = self.memory.sample(self.batch_size)
batch = Experience(*zip(*experiences))
state_batch = torch.stack(batch.state)
action_batch = torch.stack(batch.action)
reward_batch = torch.stack(batch.reward)
non_final_mask = ~torch.tensor(batch.done)
non_final_next_states = torch.stack([s for done, s in zip(batch.done, batch.next_state) if not done])
Q_values = self.critic(state_batch, action_batch)
# DDPG target #
next_state_values = torch.zeros(self.batch_size)
actions = self.actor_target(non_final_next_states)
next_state_values[non_final_mask] = self.critic_target(non_final_next_states, actions)
Q_targets = reward_batch + self.gamma * next_state_values.detach()
#####################
# Optimize critic
assert Q_values.shape == Q_targets.shape
self.critic_opt.zero_grad()
critic_loss = F.mse_loss(Q_values, Q_targets)
critic_loss.backward()
self.critic_opt.step()
# Optimize actor
self.actor_opt.zero_grad()
actor_loss = -self.critic(state_batch, self.actor(state_batch)).mean() # Negative sign for gradient ASCENT
actor_loss.backward()
self.actor_opt.step()
self.update_target_nets_soft()
def load_pretrained(self):
self.actor.load_state_dict(torch.load("actor_pretrained.pt"))
self.critic.load_state_dict(torch.load("critic_pretrained.pt"))
def test_run():
env = gym.make("BipedalWalker-v2")
obs_size = env.observation_space.shape[0]
act_size = env.action_space.shape[0]
agent = DDPGAgent(obs_size, act_size, load=True)
state = tensor(env.reset())
returns = 0
while True:
env.render()
action = agent.take_action(state, act_noise=0.001) # basically no noise
next_state, reward, done, info = env.step(action.numpy())
state = tensor(next_state)
returns += reward
if done:
break
print("Return: {:.2f}".format(returns))
def train(episodes=3000):
env = gym.make("BipedalWalker-v2")
obs_size = env.observation_space.shape[0]
act_size = env.action_space.shape[0]
print_every = 50
checkpoint_every = 100
agent = DDPGAgent(obs_size, act_size)
print("Starting training!\n")
returns = []
for episode in range(1, episodes + 1):
if episode % print_every == 0:
print("Episode : ", episode)
print("Memory size : ", len(agent.memory))
print("Best return : {:.2f}".format(np.max(returns[-print_every:])))
print("Avarage return : {:.2f}\n".format(np.mean(returns[-print_every:])))
state = tensor(env.reset())
episode_return = 0
for step in range(1000):
action = agent.take_action(state)
next_state, reward, done, info = env.step(action.numpy())
next_state, reward = tensor(next_state), tensor(reward)
agent.push_experience(state, action, reward, next_state, done)
agent.optimize_model()
episode_return += reward.item()
if done:
break
state = next_state
returns.append(episode_return)
if episode % checkpoint_every == 0:
print("Checkpointing the agent!\n")
torch.save(agent.actor.state_dict(), "actor_checkpoint.pt")
torch.save(agent.critic.state_dict(), "critic_checkpoint.pt")
plt.plot(returns)
plt.xlabel("Episode")
plt.ylabel("Return")
plt.savefig("train.png", dpi=1000)
plt.show()
if __name__ == "__main__":
# test_run()
train()
| [
"random.sample",
"torch.cat",
"numpy.mean",
"torch.nn.init.calculate_gain",
"torch.no_grad",
"torch.nn.init.kaiming_normal_",
"torch.load",
"numpy.max",
"torch.nn.Linear",
"torch.zeros",
"seaborn.set",
"matplotlib.pyplot.show",
"torch.nn.functional.mse_loss",
"matplotlib.pyplot.ylabel",
... | [((232, 241), 'seaborn.set', 'sns.set', ([], {}), '()\n', (239, 241), True, 'import seaborn as sns\n'), ((257, 332), 'collections.namedtuple', 'namedtuple', (['"""Experience"""'], {'field_names': '"""state action reward next_state done"""'}), "('Experience', field_names='state action reward next_state done')\n", (267, 332), False, 'from collections import namedtuple\n'), ((365, 403), 'torch.tensor', 'torch.tensor', (['array'], {'dtype': 'torch.float'}), '(array, dtype=torch.float)\n', (377, 403), False, 'import torch\n'), ((5638, 5666), 'gym.make', 'gym.make', (['"""BipedalWalker-v2"""'], {}), "('BipedalWalker-v2')\n", (5646, 5666), False, 'import gym\n'), ((6223, 6251), 'gym.make', 'gym.make', (['"""BipedalWalker-v2"""'], {}), "('BipedalWalker-v2')\n", (6231, 6251), False, 'import gym\n'), ((7642, 7659), 'matplotlib.pyplot.plot', 'plt.plot', (['returns'], {}), '(returns)\n', (7650, 7659), True, 'import matplotlib.pyplot as plt\n'), ((7664, 7685), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episode"""'], {}), "('Episode')\n", (7674, 7685), True, 'import matplotlib.pyplot as plt\n'), ((7690, 7710), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Return"""'], {}), "('Return')\n", (7700, 7710), True, 'import matplotlib.pyplot as plt\n'), ((7715, 7749), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""train.png"""'], {'dpi': '(1000)'}), "('train.png', dpi=1000)\n", (7726, 7749), True, 'import matplotlib.pyplot as plt\n'), ((7754, 7764), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7762, 7764), True, 'import matplotlib.pyplot as plt\n'), ((527, 553), 'torch.nn.Linear', 'nn.Linear', (['state_size', '(256)'], {}), '(state_size, 256)\n', (536, 553), True, 'import torch.nn as nn\n'), ((573, 606), 'torch.nn.Linear', 'nn.Linear', (['(256 + action_size)', '(128)'], {}), '(256 + action_size, 128)\n', (582, 606), True, 'import torch.nn as nn\n'), ((627, 644), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(1)'], {}), '(128, 1)\n', (636, 644), True, 'import torch.nn as nn\n'), ((716, 756), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.fc1.weight'], {}), '(self.fc1.weight)\n', (739, 756), True, 'import torch.nn as nn\n'), ((801, 841), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.fc2.weight'], {}), '(self.fc2.weight)\n', (824, 841), True, 'import torch.nn as nn\n'), ((850, 890), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.qval.weight'], {}), '(self.qval.weight)\n', (872, 890), True, 'import torch.nn as nn\n'), ((1016, 1045), 'torch.cat', 'torch.cat', (['(x, action)'], {'dim': '(1)'}), '((x, action), dim=1)\n', (1025, 1045), False, 'import torch\n'), ((1244, 1270), 'torch.nn.Linear', 'nn.Linear', (['state_size', '(256)'], {}), '(state_size, 256)\n', (1253, 1270), True, 'import torch.nn as nn\n'), ((1290, 1309), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(256)'], {}), '(256, 256)\n', (1299, 1309), True, 'import torch.nn as nn\n'), ((1329, 1356), 'torch.nn.Linear', 'nn.Linear', (['(256)', 'action_size'], {}), '(256, action_size)\n', (1338, 1356), True, 'import torch.nn as nn\n'), ((1428, 1468), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.fc1.weight'], {}), '(self.fc1.weight)\n', (1451, 1468), True, 'import torch.nn as nn\n'), ((1516, 1556), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.fc2.weight'], {}), '(self.fc2.weight)\n', (1539, 1556), True, 'import torch.nn as nn\n'), ((2189, 2227), 'random.sample', 'random.sample', (['self.memory', 'batch_size'], {}), '(self.memory, batch_size)\n', (2202, 2227), False, 'import random\n'), ((4239, 4263), 'torch.stack', 'torch.stack', (['batch.state'], {}), '(batch.state)\n', (4250, 4263), False, 'import torch\n'), ((4287, 4312), 'torch.stack', 'torch.stack', (['batch.action'], {}), '(batch.action)\n', (4298, 4312), False, 'import torch\n'), ((4336, 4361), 'torch.stack', 'torch.stack', (['batch.reward'], {}), '(batch.reward)\n', (4347, 4361), False, 'import torch\n'), ((4641, 4669), 'torch.zeros', 'torch.zeros', (['self.batch_size'], {}), '(self.batch_size)\n', (4652, 4669), False, 'import torch\n'), ((5064, 5095), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['Q_values', 'Q_targets'], {}), '(Q_values, Q_targets)\n', (5074, 5095), True, 'import torch.nn.functional as F\n'), ((1605, 1635), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""tanh"""'], {}), "('tanh')\n", (1627, 1635), True, 'import torch.nn as nn\n'), ((3901, 3916), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3914, 3916), False, 'import torch\n'), ((4388, 4412), 'torch.tensor', 'torch.tensor', (['batch.done'], {}), '(batch.done)\n', (4400, 4412), False, 'import torch\n'), ((5502, 5535), 'torch.load', 'torch.load', (['"""actor_pretrained.pt"""'], {}), "('actor_pretrained.pt')\n", (5512, 5535), False, 'import torch\n'), ((5573, 5607), 'torch.load', 'torch.load', (['"""critic_pretrained.pt"""'], {}), "('critic_pretrained.pt')\n", (5583, 5607), False, 'import torch\n'), ((3970, 3999), 'torch.zeros', 'torch.zeros', (['self.action_size'], {}), '(self.action_size)\n', (3981, 3999), False, 'import torch\n'), ((6724, 6754), 'numpy.max', 'np.max', (['returns[-print_every:]'], {}), '(returns[-print_every:])\n', (6730, 6754), True, 'import numpy as np\n'), ((6810, 6841), 'numpy.mean', 'np.mean', (['returns[-print_every:]'], {}), '(returns[-print_every:])\n', (6817, 6841), True, 'import numpy as np\n')] |
import os
import numpy as np
import subprocess
def redrock_scripts(i, flag='v0p5'):
''' generate scripts for running redrock
'''
# job name
fjob = os.path.join('cori_redrock%i.slurm' % i)
queue = 'regular'
constraint = 'knl'
jb = '\n'.join([
'#!/bin/bash',
'#SBATCH --qos=%s' % queue,
'#SBATCH --time=01:30:00',
'#SBATCH --constraint=%s' % constraint,
'#SBATCH -N 1',
'#SBATCH -J sv_redrock%i' % i,
'#SBATCH -o _sv_redrock%i.o' % i,
'#SBATCH -L SCRATCH,project',
'',
'now=$(date +"%T")',
'echo "start time ... $now"',
'',
'source /project/projectdirs/desi/software/desi_environment.sh 19.2',
'',
'export OMP_NUM_THREADS=1',
'',
'dir_spec=$CSCRATCH"/feasibgs/survey_validation/"',
'',
'f_str="GALeg.g15.bgsSpec.3000.%s.sample%i.seed0"' % (flag, i),
'f_spec=$dir_spec$f_str".fits"',
'f_redr=$dir_spec$f_str".rr.fits"',
'f_zout=$dir_spec$f_str".rr.h5"',
'rrdesi --mp 68 --zbest $f_redr --output $f_zout $f_spec',
'',
'now=$(date +"%T")',
'echo "end time ... $now"'])
job = open(fjob, 'w')
job.write(jb)
job.close()
return fjob
def redrock_scripts_TSreview(iexp, texp):
''' generate scripts for running redrock on TS review spectra
'''
# job name
fjob = os.path.join('cori_redrock.TSreview.exp%i.texp_%.f.slurm' % (iexp, texp))
queue = 'regular'
constraint = 'knl'
jb = '\n'.join([
'#!/bin/bash',
'#SBATCH --qos=%s' % queue,
'#SBATCH --time=03:00:00',
'#SBATCH --constraint=%s' % constraint,
'#SBATCH -N 1',
'#SBATCH -J RR_TSreview_%i_%.f' % (iexp, texp),
'#SBATCH -o _RR_TSreview_%i_%.f.o' % (iexp, texp),
'#SBATCH -L SCRATCH,project',
'',
'now=$(date +"%T")',
'echo "start time ... $now"',
'',
'source /project/projectdirs/desi/software/desi_environment.sh 19.2',
'',
'export OMP_NUM_THREADS=1',
'',
'dir_spec=$CSCRATCH"/feasibgs/survey_validation/"',
'',
'f_str="GALeg.g15.bgsSpec.5000.TSreview.exp%i.texp_%.f"' % (iexp, texp),
'f_spec=$dir_spec$f_str".fits"',
'f_redr=$dir_spec$f_str".rr.fits"',
'f_zout=$dir_spec$f_str".rr.h5"',
'rrdesi --mp 68 --zbest $f_redr --output $f_zout $f_spec',
'',
'now=$(date +"%T")',
'echo "end time ... $now"'])
job = open(fjob, 'w')
job.write(jb)
job.close()
return fjob
def submit_job(fjob):
''' run sbatch jobname.slurm
'''
if not os.path.isfile(fjob): raise ValueError
subprocess.check_output(['sbatch', fjob])
return None
if __name__=="__main__":
#for i in range(8):
# fjob = redrock_scripts(i, flag='v0p5')
# submit_job(fjob)
texps = 60. * np.array([3, 5, 8, 12, 15]) # 3 to 15 min
for iexp in range(3):
for texp in texps:
fjob = redrock_scripts_TSreview(iexp, texp)
submit_job(fjob)
| [
"numpy.array",
"subprocess.check_output",
"os.path.isfile",
"os.path.join"
] | [((170, 210), 'os.path.join', 'os.path.join', (["('cori_redrock%i.slurm' % i)"], {}), "('cori_redrock%i.slurm' % i)\n", (182, 210), False, 'import os\n'), ((1461, 1534), 'os.path.join', 'os.path.join', (["('cori_redrock.TSreview.exp%i.texp_%.f.slurm' % (iexp, texp))"], {}), "('cori_redrock.TSreview.exp%i.texp_%.f.slurm' % (iexp, texp))\n", (1473, 1534), False, 'import os\n'), ((2802, 2843), 'subprocess.check_output', 'subprocess.check_output', (["['sbatch', fjob]"], {}), "(['sbatch', fjob])\n", (2825, 2843), False, 'import subprocess\n'), ((2759, 2779), 'os.path.isfile', 'os.path.isfile', (['fjob'], {}), '(fjob)\n', (2773, 2779), False, 'import os\n'), ((3007, 3034), 'numpy.array', 'np.array', (['[3, 5, 8, 12, 15]'], {}), '([3, 5, 8, 12, 15])\n', (3015, 3034), True, 'import numpy as np\n')] |
import numpy as np
import numpy.random as random
import sys
import h5py
import scipy.ndimage as img
import scipy.interpolate as interp
import scipy.io
import astropy.units as u
import astropy.constants as const
import astropy.table as at
import astropy.io
import data.tellurics.skycalc.skycalc as skycalc
import data.tellurics.skycalc.skycalc_cli as sky_cli
import json
import requests
import io
# WobbleSim/wobblesim.py
# WobbleSim/cli/main.py
def main(low_resolution,s2n,epoches,vp,epsilon,gamma,w,stellarname_wave,stellarname_flux,skycalcname,skycalcalma,gascellname,a):
generate_data = False
# Read In Files And Simulate
#################################################
# lambda is in Angstroms here
lamb_min = 5000
lamb_max = 6300
xmin = np.log(lamb_min)
xmax = np.log(lamb_max)
deltas = sample_deltas(epoches,vel_width=vp*u.km/u.s)
flux_stellar, lamb_stellar = read_in_stellar(stellarname_wave,
stellarname_flux)
# lambda here is in nanometers
trans_tellurics, lamb_tellurics, airmass = simulate_tellurics(inputFilename=skycalcname,
almFilename=skycalcalma,
epoches=epoches)
# lambda is in nanometers here as well
trans_gas, lamb_gas = read_in_gas_cell(filename=gascellname)
lamb_tellurics *= u.nm
lamb_gas *= u.Angstrom
lamb_stellar *= u.Angstrom
x_s = np.log(lamb_stellar/u.Angstrom)
x_t = np.log(lamb_tellurics/u.Angstrom)
x_g = np.log(lamb_gas/u.Angstrom)
temp = [get_median_difference(x) for x in [x_s,x_t[0],x_g]]
median_diff = min(temp)
xs = np.arange(np.log(lamb_min),np.log(lamb_max),step=median_diff)
f_s = np.empty((epoches,xs.shape[0]))
f_t = np.empty((epoches,xs.shape[0]))
f_g = interpolate(xs,x_g,trans_gas)
f_theory = np.empty((epoches,xs.shape[0]))
for i in range(epoches):
f_s[i,:] = interpolate(xs + deltas[i],x_s, flux_stellar)
f_t[i,:] = interpolate(xs,x_t[i,:],trans_tellurics[i])
f_theory[i,:] = f_s[i,:] * f_t[i,:] * f_g
# now take lambda grids from all of these and make a new one with
# spacing equal to the minimum median spacing of the above grids
# then using lanczos 5 interpolation interpolate all values onto new grids
# then multiply element wise for combined theoretical spectrum
if generate_data:
# Initialize constants
#################################################
high_spacing = spacing_from_res(high_resolution)
xs = np.arange(xmin,xmax,step=high_spacing)
line_width = spacing_from_res(low_resolution)
# Generate Theoretical Y values
#################################################
y_star,deltas = generate_stellar( sn,epoches,xs,line_width,ymin,ymax,vp*u.km/u.s)
y_tell,airmass = generate_tellurics(tn,epoches,xs,line_width,ymin,ymax)
y_gas ,mu_g = generate_gas_cell( gn,epoches,xs,line_width,ymin,ymax)
y_sum = y_star + y_tell + y_gas
f_sum = np.exp(y_sum)
# Convolve with Telescope PSF
################################################
lsf = mean_lsf(low_resolution,median_diff,sigma_range=5.0)
f_lsf = np.empty(f_theory.shape)
for iii in range(f_theory.shape[0]):
f_lsf[iii,:] = img.convolve(f_theory[iii,:],lsf)
# np.apply_along_axis(dummy,1,f_theory,lsf) # convolve just tell star and gas
# Generate dataset grid & jitter & stretch
##################################################
x = np.arange(xmin,xmax,step=spacing_from_res(low_resolution))
nlr = x.shape[0]
x_hat, m = stretch(x,epoches,epsilon)
x_hat, delt = jitter(x,epoches,w)
# Interpolate Spline and Add Noise
##################################################
s2n_grid = get_s2n(x_hat.shape,s2n)
f_exp = np.empty(x_hat.shape)
f_readout = np.empty(x_hat.shape)
# noise = np.empty(x_hat.shape)
for i in range(f_exp.shape[0]):
f_exp[i,:] = lanczos_interpolation(x_hat[i,:],xs,f_lsf[i,:],dx=median_diff,a=a)
for j in range(f_exp.shape[1]):
f_readout[i,j] = f_exp[i,j] * random.normal(1,1./s2n_grid[i,j])
# Get Error Bars
###################################################
ferr_out = generate_errors(f_readout,s2n_grid,gamma)
lmb_out = np.exp(x)
# Pack Output into Dictionary
###################################################
out = {"wavelength_sample":lmb_out,
"flux":f_readout,
"flux_error":ferr_out,
"wavelength_theory":np.exp(xs),
"flux_tellurics":f_t,
"flux_stellar":f_s,
"flux_gas":f_g,
"flux_lsf":f_lsf,
"del":delt,
"m":m,
"airmass":airmass,
"delta":deltas}
return out
def mean_lsf(low_resolution,spacing,sigma_range=5.0):
lsf = np.arange(-sigma_range/low_resolution,sigma_range/low_resolution,step=spacing)
lsf = gauss_func(lsf,mu=0.0,sigma=1.0/low_resolution)
lsf /= np.linalg.norm(lsf,ord=1)
return lsf
def lanczos_interpolation(x,xs,ys,dx,a=4):
x0 = xs[0]
y = np.zeros(x.shape)
for i,x_value in enumerate(x):
# which is basically the same as sample=x[j-a+1] to x[j+a]
# where j in this case is the nearest index xs_j to x_value
# print("value: ", x_value)
# print("closest: ",xs[int((x_value-x0)//dx)])
# print,x_value)
sample_min,sample_max = max(0,abs(x_value-x0)//dx - a + 1),min(xs.shape[0],abs(x_value-x0)//dx + a)
samples = np.arange(sample_min,sample_max,dtype=int)
# print(sample_min,sample_max)
for sample in samples:
y[i] += ys[sample] * lanczos_kernel((x_value - xs[sample])/dx,a)
return y
def lanczos_kernel(x,a):
if x == 0:
return 1
if x > -a and x < a:
return a*np.sin(np.pi*u.radian*x) * np.sin(np.pi*u.radian*x/a)/(np.pi**2 * x**2)
return 0
def same_dist_elems(arr):
diff = arr[1] - arr[0]
for x in range(1, len(arr) - 1):
if arr[x + 1] - arr[x] != diff:
return False
return True
def read_in_tellurics(filename):
hdu = astropy.io.fits.open(filename)
print(hdu)
prim = hdu['PRIMARY'].header.keys
print(prim)
sys.exit()
prim = at.Table.read(hdu['PRIMARY'])
print(prim.info())
tbl = at.Table.read(hdu[1])
sys.exit()
trans_grid = np.array(tbl['trans'].data)
lamb_grid = np.array(tbl['lam'].data)
airmass = np.array(prim['airmass'].data)
trans_grid = np.expand_dims(trans_grid,0)
lamb_grid = np.expand_dims(lamb_grid,0)
return trans_grid, lamb_grid, airmass
def get_s2n(shape,constant):
return np.ones(shape) * constant
def average_difference(x):
return np.mean([t - s for s, t in zip(x, x[1:])])
def jitter(x,epoches,w=1.0):
out = x
if len(out.shape) == 1:
out = np.expand_dims(out,axis=0)
out = np.repeat(out,repeats=epoches,axis=0)
width = average_difference(out[0,:])
jitter = (2*random.rand(epoches) - 1) * width * w
for i,delt in enumerate(jitter):
out[i,:] += delt
return out,jitter
def stretch(x,epoches,epsilon=0.01):
if len(x.shape) == 1:
x = np.expand_dims(x,axis=0)
x = np.repeat(x,repeats=epoches,axis=0)
m = (epsilon * (2*random.rand(epoches) - 1)) + 1
for i,ms in enumerate(m):
x[i,:] *= ms
return x,m
def gauss_func(x,mu,sigma):
return np.exp((-1/2)*(x - mu)**2/(sigma**2))
def generate_noise(epoches,size,scale=0.01):
return random.normal(scale=scale,size=(epoches,size))
def spacing_from_res(R):
return np.log(1+1/R)
def generate_stellar(n_lines,epoches,x,line_width,y_min=0.0,y_max=0.7,vel_width=30*u.km/u.s):
deltas = np.array(shifts((2*random.rand(epoches)-1)*vel_width))
mus = (np.max(x) - np.min(x))*random.rand(n_lines) + np.min(x)
heights = (y_max - y_min) * random.rand(n_lines) + y_min
y = np.zeros((epoches,x.shape[0]))
for i,delta in enumerate(deltas):
for j in range(n_lines):
y[i,:] -= heights[j] * gauss_func(x + delta,mus[j],line_width)
return y, deltas
def generate_tellurics(n_lines,epoches,x,line_width,y_min=0.0,y_max=0.7):
airmass = random.rand(epoches)
mus = (np.max(x) - np.min(x))*random.rand(n_lines) + np.min(x)
heights = (y_max - y_min) * random.rand(n_lines) + y_min
y = np.zeros((epoches,x.shape[0]))
for i in range(epoches):
for j in range(n_lines):
y[i,:] -= airmass[i] * heights[j] * gauss_func(x,mus[j],line_width)
return y, airmass
def generate_gas_cell(n_lines,epoches,x,line_width,y_min=0.0,y_max=0.7):
mus = (np.max(x) - np.min(x))*random.rand(n_lines) + np.min(x)
heights = (y_max - y_min) * random.rand(n_lines) + y_min
y = np.zeros((epoches,x.shape[0]))
for i in range(epoches):
for j in range(n_lines):
y[i,:] -= heights[j] * gauss_func(x,mus[j],line_width)
return y, mus
def generate_errors(f,s2n,gamma=1.0):
xs,ys = np.where(f < 0)
for x,y in zip(xs,ys):
f[x,y] = 0
f_err = np.empty(f.shape)
for i in range(f_err.shape[0]):
for j in range(f_err.shape[1]):
error = random.normal(scale=f[i,j]/s2n[i,j] * gamma)
# print(error)
f_err[i,j] = error
return f_err
| [
"numpy.empty",
"numpy.ones",
"numpy.sin",
"numpy.arange",
"numpy.exp",
"numpy.linalg.norm",
"numpy.random.normal",
"numpy.max",
"numpy.repeat",
"numpy.min",
"astropy.table.Table.read",
"sys.exit",
"numpy.log",
"numpy.zeros",
"numpy.expand_dims",
"scipy.ndimage.convolve",
"numpy.where... | [((791, 807), 'numpy.log', 'np.log', (['lamb_min'], {}), '(lamb_min)\n', (797, 807), True, 'import numpy as np\n'), ((821, 837), 'numpy.log', 'np.log', (['lamb_max'], {}), '(lamb_max)\n', (827, 837), True, 'import numpy as np\n'), ((1551, 1584), 'numpy.log', 'np.log', (['(lamb_stellar / u.Angstrom)'], {}), '(lamb_stellar / u.Angstrom)\n', (1557, 1584), True, 'import numpy as np\n'), ((1593, 1628), 'numpy.log', 'np.log', (['(lamb_tellurics / u.Angstrom)'], {}), '(lamb_tellurics / u.Angstrom)\n', (1599, 1628), True, 'import numpy as np\n'), ((1637, 1666), 'numpy.log', 'np.log', (['(lamb_gas / u.Angstrom)'], {}), '(lamb_gas / u.Angstrom)\n', (1643, 1666), True, 'import numpy as np\n'), ((1841, 1873), 'numpy.empty', 'np.empty', (['(epoches, xs.shape[0])'], {}), '((epoches, xs.shape[0]))\n', (1849, 1873), True, 'import numpy as np\n'), ((1883, 1915), 'numpy.empty', 'np.empty', (['(epoches, xs.shape[0])'], {}), '((epoches, xs.shape[0]))\n', (1891, 1915), True, 'import numpy as np\n'), ((1971, 2003), 'numpy.empty', 'np.empty', (['(epoches, xs.shape[0])'], {}), '((epoches, xs.shape[0]))\n', (1979, 2003), True, 'import numpy as np\n'), ((3376, 3400), 'numpy.empty', 'np.empty', (['f_theory.shape'], {}), '(f_theory.shape)\n', (3384, 3400), True, 'import numpy as np\n'), ((4013, 4034), 'numpy.empty', 'np.empty', (['x_hat.shape'], {}), '(x_hat.shape)\n', (4021, 4034), True, 'import numpy as np\n'), ((4051, 4072), 'numpy.empty', 'np.empty', (['x_hat.shape'], {}), '(x_hat.shape)\n', (4059, 4072), True, 'import numpy as np\n'), ((4499, 4508), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (4505, 4508), True, 'import numpy as np\n'), ((5056, 5145), 'numpy.arange', 'np.arange', (['(-sigma_range / low_resolution)', '(sigma_range / low_resolution)'], {'step': 'spacing'}), '(-sigma_range / low_resolution, sigma_range / low_resolution, step\n =spacing)\n', (5065, 5145), True, 'import numpy as np\n'), ((5204, 5230), 'numpy.linalg.norm', 'np.linalg.norm', (['lsf'], {'ord': '(1)'}), '(lsf, ord=1)\n', (5218, 5230), True, 'import numpy as np\n'), ((5312, 5329), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (5320, 5329), True, 'import numpy as np\n'), ((6451, 6461), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6459, 6461), False, 'import sys\n'), ((6473, 6502), 'astropy.table.Table.read', 'at.Table.read', (["hdu['PRIMARY']"], {}), "(hdu['PRIMARY'])\n", (6486, 6502), True, 'import astropy.table as at\n'), ((6537, 6558), 'astropy.table.Table.read', 'at.Table.read', (['hdu[1]'], {}), '(hdu[1])\n', (6550, 6558), True, 'import astropy.table as at\n'), ((6564, 6574), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6572, 6574), False, 'import sys\n'), ((6592, 6619), 'numpy.array', 'np.array', (["tbl['trans'].data"], {}), "(tbl['trans'].data)\n", (6600, 6619), True, 'import numpy as np\n'), ((6637, 6662), 'numpy.array', 'np.array', (["tbl['lam'].data"], {}), "(tbl['lam'].data)\n", (6645, 6662), True, 'import numpy as np\n'), ((6677, 6707), 'numpy.array', 'np.array', (["prim['airmass'].data"], {}), "(prim['airmass'].data)\n", (6685, 6707), True, 'import numpy as np\n'), ((6726, 6755), 'numpy.expand_dims', 'np.expand_dims', (['trans_grid', '(0)'], {}), '(trans_grid, 0)\n', (6740, 6755), True, 'import numpy as np\n'), ((6772, 6800), 'numpy.expand_dims', 'np.expand_dims', (['lamb_grid', '(0)'], {}), '(lamb_grid, 0)\n', (6786, 6800), True, 'import numpy as np\n'), ((7642, 7685), 'numpy.exp', 'np.exp', (['(-1 / 2 * (x - mu) ** 2 / sigma ** 2)'], {}), '(-1 / 2 * (x - mu) ** 2 / sigma ** 2)\n', (7648, 7685), True, 'import numpy as np\n'), ((7737, 7785), 'numpy.random.normal', 'random.normal', ([], {'scale': 'scale', 'size': '(epoches, size)'}), '(scale=scale, size=(epoches, size))\n', (7750, 7785), True, 'import numpy.random as random\n'), ((7821, 7838), 'numpy.log', 'np.log', (['(1 + 1 / R)'], {}), '(1 + 1 / R)\n', (7827, 7838), True, 'import numpy as np\n'), ((8140, 8171), 'numpy.zeros', 'np.zeros', (['(epoches, x.shape[0])'], {}), '((epoches, x.shape[0]))\n', (8148, 8171), True, 'import numpy as np\n'), ((8427, 8447), 'numpy.random.rand', 'random.rand', (['epoches'], {}), '(epoches)\n', (8438, 8447), True, 'import numpy.random as random\n'), ((8589, 8620), 'numpy.zeros', 'np.zeros', (['(epoches, x.shape[0])'], {}), '((epoches, x.shape[0]))\n', (8597, 8620), True, 'import numpy as np\n'), ((8999, 9030), 'numpy.zeros', 'np.zeros', (['(epoches, x.shape[0])'], {}), '((epoches, x.shape[0]))\n', (9007, 9030), True, 'import numpy as np\n'), ((9228, 9243), 'numpy.where', 'np.where', (['(f < 0)'], {}), '(f < 0)\n', (9236, 9243), True, 'import numpy as np\n'), ((9302, 9319), 'numpy.empty', 'np.empty', (['f.shape'], {}), '(f.shape)\n', (9310, 9319), True, 'import numpy as np\n'), ((1779, 1795), 'numpy.log', 'np.log', (['lamb_min'], {}), '(lamb_min)\n', (1785, 1795), True, 'import numpy as np\n'), ((1796, 1812), 'numpy.log', 'np.log', (['lamb_max'], {}), '(lamb_max)\n', (1802, 1812), True, 'import numpy as np\n'), ((2696, 2736), 'numpy.arange', 'np.arange', (['xmin', 'xmax'], {'step': 'high_spacing'}), '(xmin, xmax, step=high_spacing)\n', (2705, 2736), True, 'import numpy as np\n'), ((3199, 3212), 'numpy.exp', 'np.exp', (['y_sum'], {}), '(y_sum)\n', (3205, 3212), True, 'import numpy as np\n'), ((3465, 3500), 'scipy.ndimage.convolve', 'img.convolve', (['f_theory[iii, :]', 'lsf'], {}), '(f_theory[iii, :], lsf)\n', (3477, 3500), True, 'import scipy.ndimage as img\n'), ((4737, 4747), 'numpy.exp', 'np.exp', (['xs'], {}), '(xs)\n', (4743, 4747), True, 'import numpy as np\n'), ((5743, 5787), 'numpy.arange', 'np.arange', (['sample_min', 'sample_max'], {'dtype': 'int'}), '(sample_min, sample_max, dtype=int)\n', (5752, 5787), True, 'import numpy as np\n'), ((6883, 6897), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (6890, 6897), True, 'import numpy as np\n'), ((7075, 7102), 'numpy.expand_dims', 'np.expand_dims', (['out'], {'axis': '(0)'}), '(out, axis=0)\n', (7089, 7102), True, 'import numpy as np\n'), ((7116, 7155), 'numpy.repeat', 'np.repeat', (['out'], {'repeats': 'epoches', 'axis': '(0)'}), '(out, repeats=epoches, axis=0)\n', (7125, 7155), True, 'import numpy as np\n'), ((7410, 7435), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (7424, 7435), True, 'import numpy as np\n'), ((7447, 7484), 'numpy.repeat', 'np.repeat', (['x'], {'repeats': 'epoches', 'axis': '(0)'}), '(x, repeats=epoches, axis=0)\n', (7456, 7484), True, 'import numpy as np\n'), ((8060, 8069), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (8066, 8069), True, 'import numpy as np\n'), ((8509, 8518), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (8515, 8518), True, 'import numpy as np\n'), ((8919, 8928), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (8925, 8928), True, 'import numpy as np\n'), ((8037, 8057), 'numpy.random.rand', 'random.rand', (['n_lines'], {}), '(n_lines)\n', (8048, 8057), True, 'import numpy.random as random\n'), ((8102, 8122), 'numpy.random.rand', 'random.rand', (['n_lines'], {}), '(n_lines)\n', (8113, 8122), True, 'import numpy.random as random\n'), ((8486, 8506), 'numpy.random.rand', 'random.rand', (['n_lines'], {}), '(n_lines)\n', (8497, 8506), True, 'import numpy.random as random\n'), ((8551, 8571), 'numpy.random.rand', 'random.rand', (['n_lines'], {}), '(n_lines)\n', (8562, 8571), True, 'import numpy.random as random\n'), ((8896, 8916), 'numpy.random.rand', 'random.rand', (['n_lines'], {}), '(n_lines)\n', (8907, 8916), True, 'import numpy.random as random\n'), ((8961, 8981), 'numpy.random.rand', 'random.rand', (['n_lines'], {}), '(n_lines)\n', (8972, 8981), True, 'import numpy.random as random\n'), ((9416, 9464), 'numpy.random.normal', 'random.normal', ([], {'scale': '(f[i, j] / s2n[i, j] * gamma)'}), '(scale=f[i, j] / s2n[i, j] * gamma)\n', (9429, 9464), True, 'import numpy.random as random\n'), ((4315, 4353), 'numpy.random.normal', 'random.normal', (['(1)', '(1.0 / s2n_grid[i, j])'], {}), '(1, 1.0 / s2n_grid[i, j])\n', (4328, 4353), True, 'import numpy.random as random\n'), ((6073, 6105), 'numpy.sin', 'np.sin', (['(np.pi * u.radian * x / a)'], {}), '(np.pi * u.radian * x / a)\n', (6079, 6105), True, 'import numpy as np\n'), ((8014, 8023), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (8020, 8023), True, 'import numpy as np\n'), ((8026, 8035), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (8032, 8035), True, 'import numpy as np\n'), ((8463, 8472), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (8469, 8472), True, 'import numpy as np\n'), ((8475, 8484), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (8481, 8484), True, 'import numpy as np\n'), ((8873, 8882), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (8879, 8882), True, 'import numpy as np\n'), ((8885, 8894), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (8891, 8894), True, 'import numpy as np\n'), ((6046, 6074), 'numpy.sin', 'np.sin', (['(np.pi * u.radian * x)'], {}), '(np.pi * u.radian * x)\n', (6052, 6074), True, 'import numpy as np\n'), ((7212, 7232), 'numpy.random.rand', 'random.rand', (['epoches'], {}), '(epoches)\n', (7223, 7232), True, 'import numpy.random as random\n'), ((7505, 7525), 'numpy.random.rand', 'random.rand', (['epoches'], {}), '(epoches)\n', (7516, 7525), True, 'import numpy.random as random\n'), ((7963, 7983), 'numpy.random.rand', 'random.rand', (['epoches'], {}), '(epoches)\n', (7974, 7983), True, 'import numpy.random as random\n')] |
from bs4 import BeautifulSoup
import numpy as np
PI = np.pi
import csv
import os
from collections import OrderedDict
import re
OUTPUT_FILE = 'gpx_processed_info.csv'
MAX_SPEED = 50#mph
#radius of earth in miles
C_R = 6371/1.60934
def distcalc(c1, c2):
lat1 = float(c1['lat'])*PI/180.
lon1 = float(c1['lon'])*PI/180.
lat2 = float(c2['lat'])*PI/180.
lon2 = float(c2['lon'])*PI/180.
dlat = lat2-lat1
dlon = lon2-lon1
a = np.sin(dlat/2.)**2 + np.cos(lat1)*np.cos(lat2)*np.sin(dlon/2)**2
c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1-a))
d = C_R * c
return d
def calculate_distances(points):
dists = np.asarray([distcalc(c2.attrs,c1.attrs) for c1, c2 in zip(points[1:],points[:-1])])
return dists
def calculate_velocities(distances):
#convert mi/s to mph
velocities = distances * 3600
return velocities
def calculate_accelerations(velocities):
return np.diff(velocities)
MIPS_TO_MPH = 3600.
FPS_TO_MPH = 3600./5280
G_FPS = 32.
G_MPHPS = 32 * FPS_TO_MPH
def process_file(filename, target_dir):
new_filename = re.sub(r'([^.]+)\.gpx', r'raw_csv/\1.csv', filename)
if os.path.exists(new_filename):
#print '%s already exists. skipping.' % new_filename
return None
print('processing %s' % filename )
with open(os.path.join(target_dir, filename),'r') as f:
soup = BeautifulSoup(f.read(), 'lxml')
track = soup.find('trk')
segments = track.find('trkseg')
points = segments.find_all('trkpt')
times = [p.find('time').text for p in points]
elevations = np.asarray([float(p.find('ele').text) for p in points])
#lon-lat based
distances = calculate_distances(points)
velocities = calculate_velocities(distances)
#if velocity > MAX_SPEED, then it indicates discontinuity
velocities = velocities * (velocities < MAX_SPEED)
accelerations = calculate_accelerations(velocities)
#elevation
elevation_changes = np.diff(elevations)
sum_v = np.sum(velocities)
sum_v2 = np.sum(velocities**2)
sum_v3 = np.sum(velocities**3)
abs_elevation = np.sum(np.abs(elevation_changes))/2
sum_a = np.sum(accelerations * (accelerations > 0))
#alternative type of accelerations measurement
velocities_mph = 3600 * velocities
energy_increases = velocities_mph[1:]**2 - velocities_mph[:-1]**2
energy_increases = energy_increases - FPS_TO_MPH**2 * G_FPS * elevation_changes[1:] * (elevation_changes[1:] < 0)
energy_increases = np.sum(energy_increases * (energy_increases > 0))
with open(new_filename, 'w') as f:
f.write('time,distance,elevation_change')
for t, d, e in zip(times[1:], distances, elevation_changes):
f.write('\n')
f.write(','.join([str(t), str(d), str(e)]))
return {
'sum_v':sum_v,
'sum_v2':sum_v2,
'abs_elevation':abs_elevation,
'sum_a':sum_a,
'sum_v3':sum_v3,
'sum_e':energy_increases
}
def main(gpx_source_dir, gpx_target_dir, gpx_summary_filename):
original_dir = os.getcwd()
os.makedirs(gpx_target_dir, exist_ok=True)
os.chdir(gpx_source_dir)
file_list = [x for x in os.listdir('.') if x[-4:].lower()=='.gpx']
file_list.sort()
fileinfo = OrderedDict()
for file in file_list:
td = process_file(file, gpx_target_dir)
if td is not None:
fileinfo[file] = td
#no longer interested in actually summing up variables here...
if True:
return 0
with open(os.path.join(gpx_target_dir, gpx_summary_filename), 'w') as f:
f.write(','.join(['filename','sum_v','sum_v2','sum_v3','abs_elevation','sum_a', 'sum_e']))
for fn, data in fileinfo.iteritems():
f.write('\n')
f.write(','.join([str(x) for x in [
fn,
data['sum_v'],
data['sum_v2'],
data['sum_v3'],
data['abs_elevation'],
data['sum_a'],
data['sum_e']
]
]
))
print('processed gpx files')
os.setwd(original_dir)
if __name__=='__main__':
raise NotImplementedError('this program is now to be called from other files') #main()
"""
**Context:**
I am trying to reverse-engineer Strava's algorithm for measuring Calories burned on a bike ride. I have 76 .gpx files downloaded, along with Strava's estimation for Calories burned. The equations for measuring this involve being able to estimate the speed at various time points, the elevation at various time points, and calculating power as a polynomial function of these two values. Integrating unscaled polynomials over time and plugging them into a linear regression with respect to the Calorie estimate should determine coefficients for my particular case.
**Note that I do not believe Strava's measurement is accurate. I simply want to determine the algorithms and formula they use for my particular case**
_________________
**Problems:**
1. My current speed variable is simply calculated by scaling the distance (each over 1-second intervals), which was initially calculated using the [haversine formula](https://en.wikipedia.org/wiki/Haversine_formula). I have a smoothed version that uses a normal kernel with varying bandwidths, and in each case the total distance matches the one Strava displays on its website.
2. The elevation calculations I make to determine overall change in elevation are very far off from the website's. I am simply looking at all positive increases in elevation (there is an 0.1-foot resolution), and adding those together. The estimates the website gives are usually 3/4-3 times the value I estimate.
3. The power formula the website gives is [this](https://support.strava.com/hc/en-us/articles/216917107-Power-Calculations), but it seems to calculate wind resistance as a function of v^2 (which is usually the force) as opposed to v^3 (which should be the power). Either way, I use the first, second, and third powers of velocity as variables. Additionally, I look at kinetic energy increases (a function of v^2/2) and add those together, decreasing them by decreases in gravitational potential energy that simultaneously occur.
_____________
**Additional notes about data:**
1. The rides take place on mostly flat ground, with very few hills. Most elevation changes take place over longer distances.
2.
"""
| [
"os.listdir",
"os.setwd",
"numpy.sum",
"os.makedirs",
"os.path.join",
"numpy.abs",
"os.getcwd",
"os.path.exists",
"numpy.diff",
"numpy.sin",
"numpy.cos",
"collections.OrderedDict",
"re.sub",
"os.chdir",
"numpy.sqrt"
] | [((920, 939), 'numpy.diff', 'np.diff', (['velocities'], {}), '(velocities)\n', (927, 939), True, 'import numpy as np\n'), ((1086, 1138), 're.sub', 're.sub', (['"""([^.]+)\\\\.gpx"""', '"""raw_csv/\\\\1.csv"""', 'filename'], {}), "('([^.]+)\\\\.gpx', 'raw_csv/\\\\1.csv', filename)\n", (1092, 1138), False, 'import re\n'), ((1146, 1174), 'os.path.exists', 'os.path.exists', (['new_filename'], {}), '(new_filename)\n', (1160, 1174), False, 'import os\n'), ((1975, 1994), 'numpy.diff', 'np.diff', (['elevations'], {}), '(elevations)\n', (1982, 1994), True, 'import numpy as np\n'), ((2007, 2025), 'numpy.sum', 'np.sum', (['velocities'], {}), '(velocities)\n', (2013, 2025), True, 'import numpy as np\n'), ((2039, 2062), 'numpy.sum', 'np.sum', (['(velocities ** 2)'], {}), '(velocities ** 2)\n', (2045, 2062), True, 'import numpy as np\n'), ((2074, 2097), 'numpy.sum', 'np.sum', (['(velocities ** 3)'], {}), '(velocities ** 3)\n', (2080, 2097), True, 'import numpy as np\n'), ((2164, 2207), 'numpy.sum', 'np.sum', (['(accelerations * (accelerations > 0))'], {}), '(accelerations * (accelerations > 0))\n', (2170, 2207), True, 'import numpy as np\n'), ((2510, 2559), 'numpy.sum', 'np.sum', (['(energy_increases * (energy_increases > 0))'], {}), '(energy_increases * (energy_increases > 0))\n', (2516, 2559), True, 'import numpy as np\n'), ((3135, 3146), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3144, 3146), False, 'import os\n'), ((3151, 3193), 'os.makedirs', 'os.makedirs', (['gpx_target_dir'], {'exist_ok': '(True)'}), '(gpx_target_dir, exist_ok=True)\n', (3162, 3193), False, 'import os\n'), ((3203, 3227), 'os.chdir', 'os.chdir', (['gpx_source_dir'], {}), '(gpx_source_dir)\n', (3211, 3227), False, 'import os\n'), ((3335, 3348), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3346, 3348), False, 'from collections import OrderedDict\n'), ((4177, 4199), 'os.setwd', 'os.setwd', (['original_dir'], {}), '(original_dir)\n', (4185, 4199), False, 'import os\n'), ((454, 472), 'numpy.sin', 'np.sin', (['(dlat / 2.0)'], {}), '(dlat / 2.0)\n', (460, 472), True, 'import numpy as np\n'), ((542, 552), 'numpy.sqrt', 'np.sqrt', (['a'], {}), '(a)\n', (549, 552), True, 'import numpy as np\n'), ((554, 568), 'numpy.sqrt', 'np.sqrt', (['(1 - a)'], {}), '(1 - a)\n', (561, 568), True, 'import numpy as np\n'), ((1310, 1344), 'os.path.join', 'os.path.join', (['target_dir', 'filename'], {}), '(target_dir, filename)\n', (1322, 1344), False, 'import os\n'), ((2123, 2148), 'numpy.abs', 'np.abs', (['elevation_changes'], {}), '(elevation_changes)\n', (2129, 2148), True, 'import numpy as np\n'), ((3256, 3271), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (3266, 3271), False, 'import os\n'), ((3595, 3645), 'os.path.join', 'os.path.join', (['gpx_target_dir', 'gpx_summary_filename'], {}), '(gpx_target_dir, gpx_summary_filename)\n', (3607, 3645), False, 'import os\n'), ((475, 487), 'numpy.cos', 'np.cos', (['lat1'], {}), '(lat1)\n', (481, 487), True, 'import numpy as np\n'), ((488, 500), 'numpy.cos', 'np.cos', (['lat2'], {}), '(lat2)\n', (494, 500), True, 'import numpy as np\n'), ((501, 517), 'numpy.sin', 'np.sin', (['(dlon / 2)'], {}), '(dlon / 2)\n', (507, 517), True, 'import numpy as np\n')] |
#!/sscc/opt/anaconda3/bin/python
import numpy as nump
ndsa = 58 #number of DSAs
###################################################################Simulation Engine Classes#####################################################
class Event:
"""
This is an Event class.
Attributes:
@event_type: type of event
@event_time: time of event
@event_information: information of event
"""
def __init__(self,event_type,event_time,event_information):
self.type = event_type
self.time = event_time
self.info = event_information
class G:
"""
This is a class that contains all the global variables relevant to the Simulator
"""
seed = 7777 #Random Number Seed
maxtime=5 #Maximum Clock Time
nreps =5 #Number of Replications
clock =0 #Clock
#pid = 100000 #Patient id variable, should be larger than size of initial waitlist
oid = 0 #Donor id variable
maxrejects =999 #Maximum number of rejections from COMPATIBLE recipients before discarding organ
#progtime = 30/365 #Time until next patient disease progression (non HCC)
regional_sharing =0 #1=Full Regional Sharing /0=Standard (Share 15 + Share 35)
sodium =1 #1=Use MELD Sodium
capanddelay =0 #1=Use HCC cap and delay
spartners =0 #1=Use Sharing Partners
ShareU = 35 #threshold for "Share35 Policy"
ShareL = 15 #threshold for "Share15" Policy
localboost = 0 #MELD score boost for local patients
regionalboost = 0 #MELD score boost for regional patients
# Initialize Output
record_deaths = nump.zeros(shape=(1,3)) #record number of deaths per year
record_mr_disparity_mean = nump.zeros(shape=(1,3)) # record DSA average mortality rate per year
record_mr_disparity_std = nump.zeros(shape=(1,3)) # record standard deviation of DSA mortality rate
record_meld_disparity_mean = nump.zeros(shape=(1,3)) #record average MELD score at transplant for non-status 1 candidates
record_meld_disparity_std = nump.zeros(shape=(1,3)) #record standard deviation of MELD score at transplant for non-status 1 candidates
record_medMELDmean =nump.zeros(shape=(1,3)) #record median MELD at transplant over the year for non-Status 1 candidates
record_medMELDstd=nump.zeros(shape=(1,3)) #record standard deviation of median MELD at transplant for non-status 1 candidates
record_ydeaths = nump.zeros(shape=(1,ndsa+3)) #record number of deaths across DSAs
record_ytransplants=nump.zeros(shape=(1,ndsa+3)) #record number of transplants across DSAs
record_yarrivals=nump.zeros(shape=(1,ndsa+3)) #record number of patient arrivals across DSAs
record_ycandidates=nump.zeros(shape=(1,ndsa+3)) #record number of candidates at the beginning of the year across DSAs
record_yremoved=nump.zeros(shape=(1,ndsa+3)) #record number of waitlist candidates removed from waitlist during the year besides death or transplant
record_ywait=nump.zeros(shape=(1,ndsa+3),dtype=float) #record accumulated total transplant waiting time across DSAs
record_yMELD =nump.zeros(shape=(1,ndsa+3)) #record accumulated total transplant MELD scores of patients across DSA
record_txDSA = nump.zeros(shape=(ndsa,ndsa)) #record the total number of organs procured from DSA i to DSA j for all years and replications
record_txDSAoutput = nump.zeros(shape=(ndsa,ndsa)) #record the number of livers from DSA i to DSA j at replication-year(t)
record_txID = nump.zeros(shape=(1,6)) #record patients who were transplanted; does not include those who were ever or would have been relisted
record_doID = nump.zeros(shape=(1,5)) #record patients who were transplanted along with their corresponding donors; does not include transplant patients who were ever or would have been relisted
record_removals = nump.zeros(shape=(1,6)) #record patients removed for any reason besides transplant/death
record_yrelists = nump.zeros(shape=(1,ndsa+3)) #record number of candidates relisted for transplant during the byear by DSA
record_yregrafts = nump.zeros(shape=(1,ndsa+3)) #record number of relisted candidates who received re-transplant durint the year by DSA
record_txIDregraft = nump.zeros(shape=(1,6)) #record patients who were re-transplanted
record_doIDregraft = nump.zeros(shape=(1,5)) #record patients who were re-transplanted along with corresponding donors
record_relists = nump.zeros(shape=(1,6)) #record information of patient relisted
class SimStat:
"""
Simulation Statistics
"""
numcandidates =nump.zeros(shape=(ndsa,1),dtype=int) #number of candidates across DSA
ycandidates = nump.zeros(shape=(ndsa,1),dtype=int) #number of candidates at beginning of year across DSA
yarrivals = nump.zeros(shape=(ndsa,1),dtype=int) #number of patient arrivals across DSAs
ydeaths = nump.zeros(shape=(ndsa,1),dtype=int) #number of deaths across DSAs
yremoved = nump.zeros(shape=(ndsa,1),dtype=int) #number of removals across DSAs
ytransplants = nump.zeros(shape=(ndsa,1),dtype=int) #number of transplant across DSAs
ywait = nump.zeros(shape=(ndsa,1),dtype=float) #accumulated total waiting time across DSAs
yMELD = nump.zeros(shape=(ndsa,1),dtype=int) #accumulated total MELD scores of patients across DSAs
ymedMELD = [[] for i in range(0,ndsa)] #a list of MELD scores for each DSA
yrelists = nump.zeros(shape=(ndsa,1),dtype=int) #number of relists across DSAs
yregrafts = nump.zeros(shape=(ndsa,1),dtype=int) #number of re-transplants across DSAs
##################################################################################################################################################################################
| [
"numpy.zeros"
] | [((1659, 1683), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(1, 3)'}), '(shape=(1, 3))\n', (1669, 1683), True, 'import numpy as nump\n'), ((1748, 1772), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(1, 3)'}), '(shape=(1, 3))\n', (1758, 1772), True, 'import numpy as nump\n'), ((1847, 1871), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(1, 3)'}), '(shape=(1, 3))\n', (1857, 1871), True, 'import numpy as nump\n'), ((1954, 1978), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(1, 3)'}), '(shape=(1, 3))\n', (1964, 1978), True, 'import numpy as nump\n'), ((2079, 2103), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(1, 3)'}), '(shape=(1, 3))\n', (2089, 2103), True, 'import numpy as nump\n'), ((2210, 2234), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(1, 3)'}), '(shape=(1, 3))\n', (2220, 2234), True, 'import numpy as nump\n'), ((2332, 2356), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(1, 3)'}), '(shape=(1, 3))\n', (2342, 2356), True, 'import numpy as nump\n'), ((2462, 2493), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(1, ndsa + 3)'}), '(shape=(1, ndsa + 3))\n', (2472, 2493), True, 'import numpy as nump\n'), ((2552, 2583), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(1, ndsa + 3)'}), '(shape=(1, ndsa + 3))\n', (2562, 2583), True, 'import numpy as nump\n'), ((2644, 2675), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(1, ndsa + 3)'}), '(shape=(1, ndsa + 3))\n', (2654, 2675), True, 'import numpy as nump\n'), ((2743, 2774), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(1, ndsa + 3)'}), '(shape=(1, ndsa + 3))\n', (2753, 2774), True, 'import numpy as nump\n'), ((2862, 2893), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(1, ndsa + 3)'}), '(shape=(1, ndsa + 3))\n', (2872, 2893), True, 'import numpy as nump\n'), ((3013, 3057), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(1, ndsa + 3)', 'dtype': 'float'}), '(shape=(1, ndsa + 3), dtype=float)\n', (3023, 3057), True, 'import numpy as nump\n'), ((3134, 3165), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(1, ndsa + 3)'}), '(shape=(1, ndsa + 3))\n', (3144, 3165), True, 'import numpy as nump\n'), ((3254, 3284), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(ndsa, ndsa)'}), '(shape=(ndsa, ndsa))\n', (3264, 3284), True, 'import numpy as nump\n'), ((3404, 3434), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(ndsa, ndsa)'}), '(shape=(ndsa, ndsa))\n', (3414, 3434), True, 'import numpy as nump\n'), ((3524, 3548), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(1, 6)'}), '(shape=(1, 6))\n', (3534, 3548), True, 'import numpy as nump\n'), ((3671, 3695), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(1, 5)'}), '(shape=(1, 5))\n', (3681, 3695), True, 'import numpy as nump\n'), ((3874, 3898), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(1, 6)'}), '(shape=(1, 6))\n', (3884, 3898), True, 'import numpy as nump\n'), ((3985, 4016), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(1, ndsa + 3)'}), '(shape=(1, ndsa + 3))\n', (3995, 4016), True, 'import numpy as nump\n'), ((4114, 4145), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(1, ndsa + 3)'}), '(shape=(1, ndsa + 3))\n', (4124, 4145), True, 'import numpy as nump\n'), ((4256, 4280), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(1, 6)'}), '(shape=(1, 6))\n', (4266, 4280), True, 'import numpy as nump\n'), ((4347, 4371), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(1, 5)'}), '(shape=(1, 5))\n', (4357, 4371), True, 'import numpy as nump\n'), ((4466, 4490), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(1, 6)'}), '(shape=(1, 6))\n', (4476, 4490), True, 'import numpy as nump\n'), ((4609, 4647), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(ndsa, 1)', 'dtype': 'int'}), '(shape=(ndsa, 1), dtype=int)\n', (4619, 4647), True, 'import numpy as nump\n'), ((4697, 4735), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(ndsa, 1)', 'dtype': 'int'}), '(shape=(ndsa, 1), dtype=int)\n', (4707, 4735), True, 'import numpy as nump\n'), ((4806, 4844), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(ndsa, 1)', 'dtype': 'int'}), '(shape=(ndsa, 1), dtype=int)\n', (4816, 4844), True, 'import numpy as nump\n'), ((4901, 4939), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(ndsa, 1)', 'dtype': 'int'}), '(shape=(ndsa, 1), dtype=int)\n', (4911, 4939), True, 'import numpy as nump\n'), ((4987, 5025), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(ndsa, 1)', 'dtype': 'int'}), '(shape=(ndsa, 1), dtype=int)\n', (4997, 5025), True, 'import numpy as nump\n'), ((5075, 5113), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(ndsa, 1)', 'dtype': 'int'}), '(shape=(ndsa, 1), dtype=int)\n', (5085, 5113), True, 'import numpy as nump\n'), ((5158, 5198), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(ndsa, 1)', 'dtype': 'float'}), '(shape=(ndsa, 1), dtype=float)\n', (5168, 5198), True, 'import numpy as nump\n'), ((5253, 5291), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(ndsa, 1)', 'dtype': 'int'}), '(shape=(ndsa, 1), dtype=int)\n', (5263, 5291), True, 'import numpy as nump\n'), ((5443, 5481), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(ndsa, 1)', 'dtype': 'int'}), '(shape=(ndsa, 1), dtype=int)\n', (5453, 5481), True, 'import numpy as nump\n'), ((5530, 5568), 'numpy.zeros', 'nump.zeros', ([], {'shape': '(ndsa, 1)', 'dtype': 'int'}), '(shape=(ndsa, 1), dtype=int)\n', (5540, 5568), True, 'import numpy as nump\n')] |
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
import tensorflow as image_summary
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.client import device_lib
import matplotlib.pyplot as plt
from collections import namedtuple
import pickle
# from tqdm import tqdm_notebook
# number 1 to 10 data
(train_img, train_lab), (test_img, test_lab) = tf.keras.datasets.cifar10.load_data()
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
print(np.size(train_img[0]))
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_img[i], cmap=plt.cm.binary)
# The CIFAR labels happen to be arrays,
# which is why you need the extra index
plt.xlabel(class_names[train_lab[i][0]])
# plt.show()
EPOCH = 10
DATA_PATH = "./cifar-10-batches-py"
OUTPUT_PATH_LOG = './l2_logs_latest_BS300_EP10'
L2_REGULARIZATION = True
BATCH_SIZE = 300
STEP = 1000
LEARNING_RATE = 1e-4
def unpickle(file):
with open(os.path.join(DATA_PATH, file), 'rb') as fo:
dict = pickle.load(fo, encoding='latin1')
return dict
def one_hot(vec, vals=10):
n = len(vec)
out = np.zeros((n, vals))
out[range(n), vec] = 1
return out
class CifarLoader(object):
def __init__(self, source_files):
self._source = source_files
self._i = 0
self.images = None
self.labels = None
def load(self):
data = [unpickle(f) for f in self._source]
images = np.vstack([d["data"] for d in data])
n = len(images)
self.images = images.reshape(n, 3, 32, 32).transpose(0, 2, 3, 1).astype(float) / 255
self.labels = one_hot(np.hstack([d["labels"] for d in data]), 10)
return self
def next_batch(self, batch_size):
x, y = self.images[self._i:self._i+batch_size], self.labels[self._i:self._i+batch_size]
self._i = (self._i + batch_size) % len(self.images)
return x, y
class CifarDataManager(object):
def __init__(self):
self.train = CifarLoader(["data_batch_{}".format(i)
for i in range(1, 6)]).load()
self.test = CifarLoader(["test_batch"]).load()
def display_cifar(images, size):
n = len(images)
plt.figure()
plt.gca().set_axis_off()
im = np.vstack([np.hstack([images[np.random.choice(n)] for i in range(size)])
for i in range(size)])
plt.imshow(im)
plt.show()
d = CifarDataManager()
print("Number of train images: {}".format(len(d.train.images)))
print("Number of train labels: {}".format(len(d.train.labels)))
print("Number of test images: {}".format(len(d.test.images)))
print("Number of test images: {}".format(len(d.test.labels)))
images = d.train.images
#display_cifar(images, 10)
def compute_accuracy(v_xs, v_ys):
global prediction
y_pre = sess.run(prediction, feed_dict={x_in: v_xs, keep_prob: 1})
correct_prediction = tf.equal(tf.argmax(y_pre, 1), tf.argmax(v_ys, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
result = sess.run(accuracy, feed_dict={
x_in: v_xs, y_in: v_ys, keep_prob: 1})
return result
def weight_variable(shape, name):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=name)
def bias_variable(shape, name):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
# stride [1, x_movement, y_movement, 1]
# Must have strides[0] = strides[3] = 1
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
# stride [1, x_movement, y_movement, 1]
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def get_activations(layer, stimuli):
units = sess.run(layer, feed_dict={x_in: np.reshape(
stimuli, [1, 784], order='F'), keep_prob: 1.0})
plot_nn_filter(units)
def plot_nn_filter(units):
import math
filters = units.shape[3]
plt.figure(1, figsize=(20, 20))
n_columns = 6
n_rows = math.ceil(filters / n_columns) + 1
for i in range(filters):
plt.subplot(n_rows, n_columns, i+1)
plt.title('Filter ' + str(i))
plt.imshow(units[0, :, :, i], interpolation="nearest", cmap="gray")
plt.tight_layout()
plt.show()
tf.reset_default_graph()
cifar = CifarDataManager()
x_in = tf.placeholder(dtype=tf.float32, shape=(None, 32, 32, 3))/255.
# x_in = tf.placeholder(dtype=tf.float32, shape=(None, 32*32*3))/255.
y_in = tf.placeholder(dtype=tf.float32, shape=(None, 10))
keep_prob = tf.placeholder(tf.float32)
# 紀錄 3 張影像在 tensorboard 上
tf.summary.image('Input_images', x_in, max_outputs=3)
print(x_in.shape) # [n_samples, 32,32,3]
## conv1 layer ##
with tf.name_scope('conv1'):
with tf.name_scope('weights'):
# patch 5x5, in size 1, out size 32
W_conv1 = weight_variable([5, 5, 3, 32], name='W_conv1')
tf.summary.histogram('conv1/weights', W_conv1)
with tf.name_scope('bias'):
b_conv1 = bias_variable([32], name='b_conv1')
tf.summary.histogram('conv1/biases', b_conv1)
h_conv1 = tf.nn.relu(conv2d(x_in, W_conv1) +
b_conv1) # output size 28x28x32
tf.summary.histogram('conv1/outputs', h_conv1)
# output size 14x14x32
h_pool1 = max_pool_2x2(h_conv1)
print('W_conv1', W_conv1)
print('b_conv1', b_conv1)
print('h_pool1', h_pool1)
## conv2 layer ##
with tf.name_scope('conv2'):
with tf.name_scope('weights'):
# patch 5x5, in size 32, out size 64
W_conv2 = weight_variable([5, 5, 32, 64], name='W_conv2')
tf.summary.histogram('conv2/weights', W_conv2)
with tf.name_scope('bias'):
b_conv2 = bias_variable([64], name='b_conv2')
tf.summary.histogram('conv2/biases', b_conv2)
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) +
b_conv2) # output size 14x14x64
tf.summary.histogram('conv2/outputs', h_conv2)
# output size 7x7x64
h_pool2 = max_pool_2x2(h_conv2)
print('W_conv2', W_conv2)
print('b_conv2', b_conv2)
print('h_pool2', h_pool2)
## fc1 layer ##
with tf.name_scope('fc1'):
with tf.name_scope('weights'):
W_fc1 = weight_variable([8*8*64, 1024], name='W_fc1')
tf.summary.histogram('fc1/weights', W_fc1)
with tf.name_scope('bias'):
b_fc1 = bias_variable([1024], name='b_fc1')
tf.summary.histogram('fc1/biases', b_fc1)
# [n_samples, 7, 7, 64] ->> [n_samples, 7*7*64]
h_pool2_flat = tf.reshape(h_pool2, [-1, 8*8*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# tf.summary.histogram('fc1/outputs', h_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
print('W_fc1', W_fc1)
print('b_fc1', b_fc1)
## fc2 layer ##
with tf.name_scope('fc2'):
with tf.name_scope('weights'):
W_fc2 = weight_variable([1024, 10], name='W_fc2')
tf.summary.histogram('fc2/weights', W_fc2)
with tf.name_scope('bias'):
b_fc2 = bias_variable([10], name='b_fc2')
tf.summary.histogram('fc2/biases', b_fc2)
prediction = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
# tf.summary.histogram('fc2/outputs', prediction)
## Loss ##
with tf.name_scope('loss'):
l2_loss = 0.0001*tf.nn.l2_loss(W_conv1) + 0.0001*tf.nn.l2_loss(W_conv2) + 0.0001*tf.nn.l2_loss(W_fc1) + 0.0001*tf.nn.l2_loss(W_fc2)
if L2_REGULARIZATION:
loss = tf.reduce_mean(-tf.reduce_sum(y_in * tf.log(prediction) + l2_loss,
reduction_indices=[1]))
else:
loss = tf.reduce_mean(-tf.reduce_sum(y_in * tf.log(prediction),
reduction_indices=[1]))
tf.summary.scalar('loss', loss)
## optimizer ##
with tf.name_scope('Optimizer'):
optimizer = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)
result = tf.equal(tf.argmax(prediction, 1), tf.argmax(y_in, 1))
# accuracy ##
with tf.name_scope('Accuracy'):
accuracy = tf.reduce_mean(tf.cast(result, tf.float32))
# 紀錄 accuracy
tf.summary.scalar('Accuracy', accuracy)
# 把上面所有的 tf.summary 整合成一個 operation call
opsSummary = tf.summary.merge_all()
# images=x_in[:25]
# tf.summary.image('25 training data examples', images, max_outputs = 25)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
# Creat writer to write data needed for tensorboard
writer = tf.summary.FileWriter(OUTPUT_PATH_LOG, sess.graph)
# Show graph of neural network in tensorboard
writer.add_graph(sess.graph)
writerTrain = tf.summary.FileWriter(os.path.join(OUTPUT_PATH_LOG, 'train'))
writerTest = tf.summary.FileWriter(os.path.join(OUTPUT_PATH_LOG, 'test'))
sess.run(tf.global_variables_initializer())
for i in range(EPOCH):
# mini batch training
for j in range(STEP):
train_batch = cifar.train.next_batch(BATCH_SIZE)
test_batch = cifar.train.next_batch(BATCH_SIZE)
# print(np.shape(train_batch[0]))
# print(np.shape(train_batch[1]))
# print('!!!', train_batch[1])
sess.run(optimizer, feed_dict={
x_in: train_batch[0], y_in: train_batch[1], keep_prob: 0.5})
if j % 50 == 0:
print((i+1), ' epoch, ', (j+50), ' iteration')
train_accuracy = sess.run(accuracy, feed_dict={
x_in: train_batch[0], y_in: train_batch[1], keep_prob: 0})
test_accuracy = sess.run(accuracy, feed_dict={
x_in: test_batch[0], y_in: test_batch[1], keep_prob: 0})
# print('Epoch %2d: acc = %.3f, test_acc = %.3f' %
# (i+1, train_accuracy*100., test_accuracy*100.))
# Calculate desired information to be shown in tensorboard
summary = sess.run(opsSummary, feed_dict={
x_in: train_batch[0], y_in: train_batch[1], keep_prob: 1})
writerTrain.add_summary(summary, global_step=500*i+j)
summary = sess.run(opsSummary, feed_dict={
x_in: test_batch[0], y_in: test_batch[1], keep_prob: 1})
writerTest.add_summary(summary, global_step=500*i+j)
accuracy_num = compute_accuracy(
test_batch[0], test_batch[1])
print('accuracy: ', accuracy_num)
# saver = tf.train.Saver()
# saver.save(sess, os.path.join(OUTPUT_PATH_LOG, 'model.ckpt'))
| [
"tensorflow.reset_default_graph",
"tensorflow.reshape",
"tensorflow.train.AdamOptimizer",
"tensorflow.ConfigProto",
"matplotlib.pyplot.figure",
"tensorflow.Variable",
"pickle.load",
"tensorflow.nn.conv2d",
"tensorflow.matmul",
"matplotlib.pyplot.gca",
"tensorflow.GPUOptions",
"matplotlib.pyplo... | [((417, 454), 'tensorflow.keras.datasets.cifar10.load_data', 'tf.keras.datasets.cifar10.load_data', ([], {}), '()\n', (452, 454), True, 'import tensorflow as tf\n'), ((606, 634), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (616, 634), True, 'import matplotlib.pyplot as plt\n'), ((4450, 4474), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (4472, 4474), True, 'import tensorflow as tf\n'), ((4649, 4699), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '(None, 10)'}), '(dtype=tf.float32, shape=(None, 10))\n', (4663, 4699), True, 'import tensorflow as tf\n'), ((4713, 4739), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (4727, 4739), True, 'import tensorflow as tf\n'), ((4767, 4820), 'tensorflow.summary.image', 'tf.summary.image', (['"""Input_images"""', 'x_in'], {'max_outputs': '(3)'}), "('Input_images', x_in, max_outputs=3)\n", (4783, 4820), True, 'import tensorflow as tf\n'), ((8309, 8331), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (8329, 8331), True, 'import tensorflow as tf\n'), ((8441, 8491), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': '(0.5)'}), '(per_process_gpu_memory_fraction=0.5)\n', (8454, 8491), True, 'import tensorflow as tf\n'), ((8620, 8670), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['OUTPUT_PATH_LOG', 'sess.graph'], {}), '(OUTPUT_PATH_LOG, sess.graph)\n', (8641, 8670), True, 'import tensorflow as tf\n'), ((583, 604), 'numpy.size', 'np.size', (['train_img[0]'], {}), '(train_img[0])\n', (590, 604), True, 'import numpy as np\n'), ((658, 682), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(5)', '(i + 1)'], {}), '(5, 5, i + 1)\n', (669, 682), True, 'import matplotlib.pyplot as plt\n'), ((683, 697), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (693, 697), True, 'import matplotlib.pyplot as plt\n'), ((702, 716), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (712, 716), True, 'import matplotlib.pyplot as plt\n'), ((721, 736), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (729, 736), True, 'import matplotlib.pyplot as plt\n'), ((741, 785), 'matplotlib.pyplot.imshow', 'plt.imshow', (['train_img[i]'], {'cmap': 'plt.cm.binary'}), '(train_img[i], cmap=plt.cm.binary)\n', (751, 785), True, 'import matplotlib.pyplot as plt\n'), ((879, 919), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['class_names[train_lab[i][0]]'], {}), '(class_names[train_lab[i][0]])\n', (889, 919), True, 'import matplotlib.pyplot as plt\n'), ((1308, 1327), 'numpy.zeros', 'np.zeros', (['(n, vals)'], {}), '((n, vals))\n', (1316, 1327), True, 'import numpy as np\n'), ((2372, 2384), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2382, 2384), True, 'import matplotlib.pyplot as plt\n'), ((2543, 2557), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {}), '(im)\n', (2553, 2557), True, 'import matplotlib.pyplot as plt\n'), ((2562, 2572), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2570, 2572), True, 'import matplotlib.pyplot as plt\n'), ((3347, 3385), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.1)'}), '(shape, stddev=0.1)\n', (3366, 3385), True, 'import tensorflow as tf\n'), ((3397, 3428), 'tensorflow.Variable', 'tf.Variable', (['initial'], {'name': 'name'}), '(initial, name=name)\n', (3408, 3428), True, 'import tensorflow as tf\n'), ((3477, 3506), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': 'shape'}), '(0.1, shape=shape)\n', (3488, 3506), True, 'import tensorflow as tf\n'), ((3518, 3538), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (3529, 3538), True, 'import tensorflow as tf\n'), ((3658, 3714), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(x, W, strides=[1, 1, 1, 1], padding='SAME')\n", (3670, 3714), True, 'import tensorflow as tf\n'), ((3793, 3868), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n", (3807, 3868), True, 'import tensorflow as tf\n'), ((4125, 4156), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(20, 20)'}), '(1, figsize=(20, 20))\n', (4135, 4156), True, 'import matplotlib.pyplot as plt\n'), ((4414, 4432), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4430, 4432), True, 'import matplotlib.pyplot as plt\n'), ((4437, 4447), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4445, 4447), True, 'import matplotlib.pyplot as plt\n'), ((4509, 4566), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '(None, 32, 32, 3)'}), '(dtype=tf.float32, shape=(None, 32, 32, 3))\n', (4523, 4566), True, 'import tensorflow as tf\n'), ((4887, 4909), 'tensorflow.name_scope', 'tf.name_scope', (['"""conv1"""'], {}), "('conv1')\n", (4900, 4909), True, 'import tensorflow as tf\n'), ((5361, 5407), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""conv1/outputs"""', 'h_conv1'], {}), "('conv1/outputs', h_conv1)\n", (5381, 5407), True, 'import tensorflow as tf\n'), ((5590, 5612), 'tensorflow.name_scope', 'tf.name_scope', (['"""conv2"""'], {}), "('conv2')\n", (5603, 5612), True, 'import tensorflow as tf\n'), ((6069, 6115), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""conv2/outputs"""', 'h_conv2'], {}), "('conv2/outputs', h_conv2)\n", (6089, 6115), True, 'import tensorflow as tf\n'), ((6290, 6310), 'tensorflow.name_scope', 'tf.name_scope', (['"""fc1"""'], {}), "('fc1')\n", (6303, 6310), True, 'import tensorflow as tf\n'), ((6665, 6702), 'tensorflow.reshape', 'tf.reshape', (['h_pool2', '[-1, 8 * 8 * 64]'], {}), '(h_pool2, [-1, 8 * 8 * 64])\n', (6675, 6702), True, 'import tensorflow as tf\n'), ((6828, 6859), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['h_fc1', 'keep_prob'], {}), '(h_fc1, keep_prob)\n', (6841, 6859), True, 'import tensorflow as tf\n'), ((6935, 6955), 'tensorflow.name_scope', 'tf.name_scope', (['"""fc2"""'], {}), "('fc2')\n", (6948, 6955), True, 'import tensorflow as tf\n'), ((7373, 7394), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (7386, 7394), True, 'import tensorflow as tf\n'), ((7870, 7901), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'loss'], {}), "('loss', loss)\n", (7887, 7901), True, 'import tensorflow as tf\n'), ((7924, 7950), 'tensorflow.name_scope', 'tf.name_scope', (['"""Optimizer"""'], {}), "('Optimizer')\n", (7937, 7950), True, 'import tensorflow as tf\n'), ((8040, 8064), 'tensorflow.argmax', 'tf.argmax', (['prediction', '(1)'], {}), '(prediction, 1)\n', (8049, 8064), True, 'import tensorflow as tf\n'), ((8066, 8084), 'tensorflow.argmax', 'tf.argmax', (['y_in', '(1)'], {}), '(y_in, 1)\n', (8075, 8084), True, 'import tensorflow as tf\n'), ((8106, 8131), 'tensorflow.name_scope', 'tf.name_scope', (['"""Accuracy"""'], {}), "('Accuracy')\n", (8119, 8131), True, 'import tensorflow as tf\n'), ((8214, 8253), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Accuracy"""', 'accuracy'], {}), "('Accuracy', accuracy)\n", (8231, 8253), True, 'import tensorflow as tf\n'), ((8784, 8822), 'os.path.join', 'os.path.join', (['OUTPUT_PATH_LOG', '"""train"""'], {}), "(OUTPUT_PATH_LOG, 'train')\n", (8796, 8822), False, 'import os\n'), ((8859, 8896), 'os.path.join', 'os.path.join', (['OUTPUT_PATH_LOG', '"""test"""'], {}), "(OUTPUT_PATH_LOG, 'test')\n", (8871, 8896), False, 'import os\n'), ((8908, 8941), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (8939, 8941), True, 'import tensorflow as tf\n'), ((1198, 1232), 'pickle.load', 'pickle.load', (['fo'], {'encoding': '"""latin1"""'}), "(fo, encoding='latin1')\n", (1209, 1232), False, 'import pickle\n'), ((1635, 1671), 'numpy.vstack', 'np.vstack', (["[d['data'] for d in data]"], {}), "([d['data'] for d in data])\n", (1644, 1671), True, 'import numpy as np\n'), ((3062, 3081), 'tensorflow.argmax', 'tf.argmax', (['y_pre', '(1)'], {}), '(y_pre, 1)\n', (3071, 3081), True, 'import tensorflow as tf\n'), ((3083, 3101), 'tensorflow.argmax', 'tf.argmax', (['v_ys', '(1)'], {}), '(v_ys, 1)\n', (3092, 3101), True, 'import tensorflow as tf\n'), ((3133, 3172), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (3140, 3172), True, 'import tensorflow as tf\n'), ((4188, 4218), 'math.ceil', 'math.ceil', (['(filters / n_columns)'], {}), '(filters / n_columns)\n', (4197, 4218), False, 'import math\n'), ((4260, 4297), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_rows', 'n_columns', '(i + 1)'], {}), '(n_rows, n_columns, i + 1)\n', (4271, 4297), True, 'import matplotlib.pyplot as plt\n'), ((4342, 4409), 'matplotlib.pyplot.imshow', 'plt.imshow', (['units[0, :, :, i]'], {'interpolation': '"""nearest"""', 'cmap': '"""gray"""'}), "(units[0, :, :, i], interpolation='nearest', cmap='gray')\n", (4352, 4409), True, 'import matplotlib.pyplot as plt\n'), ((4920, 4944), 'tensorflow.name_scope', 'tf.name_scope', (['"""weights"""'], {}), "('weights')\n", (4933, 4944), True, 'import tensorflow as tf\n'), ((5063, 5109), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""conv1/weights"""', 'W_conv1'], {}), "('conv1/weights', W_conv1)\n", (5083, 5109), True, 'import tensorflow as tf\n'), ((5119, 5140), 'tensorflow.name_scope', 'tf.name_scope', (['"""bias"""'], {}), "('bias')\n", (5132, 5140), True, 'import tensorflow as tf\n'), ((5204, 5249), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""conv1/biases"""', 'b_conv1'], {}), "('conv1/biases', b_conv1)\n", (5224, 5249), True, 'import tensorflow as tf\n'), ((5623, 5647), 'tensorflow.name_scope', 'tf.name_scope', (['"""weights"""'], {}), "('weights')\n", (5636, 5647), True, 'import tensorflow as tf\n'), ((5768, 5814), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""conv2/weights"""', 'W_conv2'], {}), "('conv2/weights', W_conv2)\n", (5788, 5814), True, 'import tensorflow as tf\n'), ((5824, 5845), 'tensorflow.name_scope', 'tf.name_scope', (['"""bias"""'], {}), "('bias')\n", (5837, 5845), True, 'import tensorflow as tf\n'), ((5909, 5954), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""conv2/biases"""', 'b_conv2'], {}), "('conv2/biases', b_conv2)\n", (5929, 5954), True, 'import tensorflow as tf\n'), ((6321, 6345), 'tensorflow.name_scope', 'tf.name_scope', (['"""weights"""'], {}), "('weights')\n", (6334, 6345), True, 'import tensorflow as tf\n'), ((6417, 6459), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""fc1/weights"""', 'W_fc1'], {}), "('fc1/weights', W_fc1)\n", (6437, 6459), True, 'import tensorflow as tf\n'), ((6469, 6490), 'tensorflow.name_scope', 'tf.name_scope', (['"""bias"""'], {}), "('bias')\n", (6482, 6490), True, 'import tensorflow as tf\n'), ((6552, 6593), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""fc1/biases"""', 'b_fc1'], {}), "('fc1/biases', b_fc1)\n", (6572, 6593), True, 'import tensorflow as tf\n'), ((6966, 6990), 'tensorflow.name_scope', 'tf.name_scope', (['"""weights"""'], {}), "('weights')\n", (6979, 6990), True, 'import tensorflow as tf\n'), ((7058, 7100), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""fc2/weights"""', 'W_fc2'], {}), "('fc2/weights', W_fc2)\n", (7078, 7100), True, 'import tensorflow as tf\n'), ((7110, 7131), 'tensorflow.name_scope', 'tf.name_scope', (['"""bias"""'], {}), "('bias')\n", (7123, 7131), True, 'import tensorflow as tf\n'), ((7191, 7232), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""fc2/biases"""', 'b_fc2'], {}), "('fc2/biases', b_fc2)\n", (7211, 7232), True, 'import tensorflow as tf\n'), ((8163, 8190), 'tensorflow.cast', 'tf.cast', (['result', 'tf.float32'], {}), '(result, tf.float32)\n', (8170, 8190), True, 'import tensorflow as tf\n'), ((8517, 8556), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (8531, 8556), True, 'import tensorflow as tf\n'), ((1139, 1168), 'os.path.join', 'os.path.join', (['DATA_PATH', 'file'], {}), '(DATA_PATH, file)\n', (1151, 1168), False, 'import os\n'), ((1819, 1857), 'numpy.hstack', 'np.hstack', (["[d['labels'] for d in data]"], {}), "([d['labels'] for d in data])\n", (1828, 1857), True, 'import numpy as np\n'), ((2389, 2398), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2396, 2398), True, 'import matplotlib.pyplot as plt\n'), ((6722, 6752), 'tensorflow.matmul', 'tf.matmul', (['h_pool2_flat', 'W_fc1'], {}), '(h_pool2_flat, W_fc1)\n', (6731, 6752), True, 'import tensorflow as tf\n'), ((7264, 7292), 'tensorflow.matmul', 'tf.matmul', (['h_fc1_drop', 'W_fc2'], {}), '(h_fc1_drop, W_fc2)\n', (7273, 7292), True, 'import tensorflow as tf\n'), ((7511, 7531), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['W_fc2'], {}), '(W_fc2)\n', (7524, 7531), True, 'import tensorflow as tf\n'), ((7968, 8005), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['LEARNING_RATE'], {}), '(LEARNING_RATE)\n', (7990, 8005), True, 'import tensorflow as tf\n'), ((3953, 3993), 'numpy.reshape', 'np.reshape', (['stimuli', '[1, 784]'], {'order': '"""F"""'}), "(stimuli, [1, 784], order='F')\n", (3963, 3993), True, 'import numpy as np\n'), ((7481, 7501), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['W_fc1'], {}), '(W_fc1)\n', (7494, 7501), True, 'import tensorflow as tf\n'), ((7417, 7439), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['W_conv1'], {}), '(W_conv1)\n', (7430, 7439), True, 'import tensorflow as tf\n'), ((7449, 7471), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['W_conv2'], {}), '(W_conv2)\n', (7462, 7471), True, 'import tensorflow as tf\n'), ((2452, 2471), 'numpy.random.choice', 'np.random.choice', (['n'], {}), '(n)\n', (2468, 2471), True, 'import numpy as np\n'), ((7774, 7792), 'tensorflow.log', 'tf.log', (['prediction'], {}), '(prediction)\n', (7780, 7792), True, 'import tensorflow as tf\n'), ((7610, 7628), 'tensorflow.log', 'tf.log', (['prediction'], {}), '(prediction)\n', (7616, 7628), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
####
# <NAME>
# Email: <EMAIL>
# Midterm
# PHYS 220
# 10/26/2018
####
import matplotlib.pyplot as plt
import numpy as np
class MysterySequence(object):
"""
Creates object with four attributes: (r)- value that is evaluated, (x0)- initial x value, (N)- number of iterations, (xs)- list of x values
"""
def __init__(self, r, x0 = 0.5, N = 100):
"""
Initializes object with four attributes which default to specific values
"""
self.r = r
self.x0 = x0
self.N = N
self.xs = None
def evaluate(self):
"""
Evaluates using equation according to given r value, initial x value, and number of iterations N
Creates list of computed values in xs
"""
r = self.r
x = self.x0
N = self.N
self.xs = []
for n in range(N+1):
self.xs.append(x)
x=(r*x*(1-x))
def __call__(self):
"""
Evaluates and returns list of x values if evaluate equals 'None'
"""
if (self.xs == None):
self.evaluate()
return(self.xs)
else:
return(self.xs)
class SequencePlotter(MysterySequence):
"""
Plots graph according to given list of x values (xs)
"""
def plot(self):
"""
Plots graph by representing list of x values as the y-axis and list of r values according to N as x-axis
Plots graph with points, lines connecting points, labels, and title
"""
self.evaluate()
y = self.xs
x = list(range(self.N+1))
f = plt.figure(figsize=(12,8))
a = plt.axes()
a.plot(x, y, marker = '.', color = 'r')
a.plot(x, y, linestyle = "--", color = 'k')
a.set(ylim = (0,1), xlabel = 'Iteration K', ylabel = 'Population x(k)', title = "Sequence Parameters: x0={}, r={}, N={}".format(self.x0,self.r,self.N))
def scatterplot():
"""
Graphs a scatterplot of computed values at every r value in range [2.9, 4] with an iteration of 0.001
Sets r value at 2.9
"""
xs = []
ys = []
r = 2.9
for r in np.arange(2.9, 4, 0.001):
temp_mystery = MysterySequence(r, 0.5, 300)
temp_ys = temp_mystery()
del temp_ys[0 : 151]
xs = xs + [r for i in range(1,151)]
ys = ys + temp_ys
fig, ax = plt.subplots(figsize=(12,8))
ax.scatter(xs, ys, marker='.', color='r')
ax.set(ylim = (0,1), xlim = (2.9,4), xlabel = 'Iteration R', ylabel = 'Asymptotic Value', title = "Sequence Parameters: x0={0.5}, r={[2.9,4]}, N={300}")
| [
"matplotlib.pyplot.axes",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.subplots"
] | [((2171, 2195), 'numpy.arange', 'np.arange', (['(2.9)', '(4)', '(0.001)'], {}), '(2.9, 4, 0.001)\n', (2180, 2195), True, 'import numpy as np\n'), ((2396, 2425), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (2408, 2425), True, 'import matplotlib.pyplot as plt\n'), ((1645, 1672), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (1655, 1672), True, 'import matplotlib.pyplot as plt\n'), ((1684, 1694), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (1692, 1694), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import torch
from padertorch.contrib.je.modules.gmm import GMM
from padertorch.ops.losses import gaussian_kl_divergence
from torch import nn
from padertorch.contrib.je.modules.hmm_utils import batch_forward_backward, batch_viterbi, squeeze_sequence
from padertorch.utils import to_list
class HMM(GMM):
"""
>>> hmm = HMM(2, 3, 2)
>>> torch.exp(hmm.log_transition_mat)
"""
def __init__(
self, feature_size, num_units, states_per_unit=3,
covariance_type='full', locs_init_std=1.0, scale_init=1.0,
initial_state=False, final_state=False, viterbi_training=False
):
self.num_units = num_units
super().__init__(
feature_size, num_units * states_per_unit,
covariance_type=covariance_type,
locs_init_std=locs_init_std, scale_init=scale_init
)
self.states_per_unit = states_per_unit
weights_mask = np.zeros(num_units * states_per_unit)
weights_mask[::states_per_unit] = 1
self.log_weights_mask = nn.Parameter(
torch.Tensor(np.log(weights_mask)), requires_grad=False
)
within_hmm_transition_mask = (np.eye(self.num_classes) + np.eye(self.num_classes, k=1))
last_states = np.arange(self.states_per_unit - 1, self.num_classes - 1, self.states_per_unit)
within_hmm_transition_mask[last_states, last_states+1] = 0.
self.within_hmm_transition_mask = nn.Parameter(
torch.Tensor(within_hmm_transition_mask), requires_grad=False
)
between_hmm_transition_mask = np.zeros_like(within_hmm_transition_mask)
between_hmm_transition_mask[self.states_per_unit - 1::self.states_per_unit] = 1
between_hmm_transition_mask *= weights_mask
self.between_hmm_transition_mask = nn.Parameter(
torch.Tensor(between_hmm_transition_mask), requires_grad=False
)
self.initial_state = 0 if initial_state else None
self.final_state = self.num_classes - 1 if final_state else None
self.viterbi_training = viterbi_training
@property
def log_class_probs(self):
log_weights = self.log_weights + self.log_weights_mask
log_probs = torch.log_softmax(log_weights, dim=-1)
return log_probs
@property
def log_transition_mat(self):
log_transition_mat = (
self.within_hmm_transition_mask
* torch.ones_like(self.within_hmm_transition_mask) * np.log(0.5)
+ self.between_hmm_transition_mask
* (torch.max(self.log_class_probs, -100 * torch.ones_like(self.log_class_probs)) + np.log(0.5))
+ torch.log(self.within_hmm_transition_mask + self.between_hmm_transition_mask)
)
return log_transition_mat
def forward(
self, qz, seq_len=None, unit_sequence=None,
no_onset=False, no_offset=False
):
log_rho = -kl_divergence(qz, self.gaussians)
no_onset = to_list(no_onset, log_rho.shape[0])
no_offset = to_list(no_offset, log_rho.shape[0])
log_startprob = np.array([
-np.ones(self.num_classes)*np.log(self.num_classes) if non
else self.log_class_probs.detach().cpu().numpy().astype(np.float)
for non in no_onset
])
log_transmat = self.log_transition_mat.detach().cpu().numpy().astype(np.float)
framelogprob = log_rho.detach().cpu().numpy().astype(np.float)
initial_state = [None if non else self.initial_state for non in no_onset]
final_state = [None if noff else self.final_state for noff in no_offset]
for i, noff in enumerate(no_offset):
if not noff and self.final_state is None:
mask = np.zeros(self.num_classes)
mask[self.states_per_unit - 1::self.states_per_unit] = 1.
framelogprob[i, -1] += np.log(mask)
if unit_sequence is not None:
states_per_unit = self.hmm.states_per_unit
state_sequence = [
state_sequence_from_unit_sequence(seq, states_per_unit)
for seq in unit_sequence
]
else:
state_sequence = None
if not self.training or self.viterbi_training:
b, t, k = framelogprob.shape
state_alignment = batch_viterbi(
log_startprob, log_transmat, framelogprob, seq_len=seq_len,
state_sequence=state_sequence,
initial_state=initial_state, final_state=final_state
)
state_alignment = state_alignment.astype(np.int)
class_posteriors = np.zeros_like(framelogprob)
state_transitions = np.zeros((b, k, k))
class_posteriors[np.arange(b)[:, None], np.arange(t), state_alignment] = 1.
src_idx = state_alignment[:, :-1]
dst_idx = state_alignment[:, 1:]
np.add.at(state_transitions, (np.arange(b)[:, None], src_idx, dst_idx), 1)
else:
class_posteriors, state_transitions = batch_forward_backward(
log_startprob, log_transmat, framelogprob, seq_len=seq_len,
state_sequence=state_sequence,
initial_state=initial_state, final_state=final_state
)
return (
torch.Tensor(class_posteriors).to(log_rho.device),
torch.Tensor(state_transitions).to(log_rho.device),
log_rho
)
def state_sequence_from_unit_sequence(unit_sequence, states_per_unit):
return [
unit * states_per_unit + i
for unit in squeeze_sequence(unit_sequence)
for i in range(states_per_unit)
]
| [
"torch.log_softmax",
"torch.ones_like",
"numpy.zeros_like",
"padertorch.utils.to_list",
"numpy.log",
"numpy.zeros",
"numpy.ones",
"torch.Tensor",
"numpy.arange",
"padertorch.contrib.je.modules.hmm_utils.batch_viterbi",
"padertorch.contrib.je.modules.hmm_utils.squeeze_sequence",
"padertorch.con... | [((946, 983), 'numpy.zeros', 'np.zeros', (['(num_units * states_per_unit)'], {}), '(num_units * states_per_unit)\n', (954, 983), True, 'import numpy as np\n'), ((1270, 1349), 'numpy.arange', 'np.arange', (['(self.states_per_unit - 1)', '(self.num_classes - 1)', 'self.states_per_unit'], {}), '(self.states_per_unit - 1, self.num_classes - 1, self.states_per_unit)\n', (1279, 1349), True, 'import numpy as np\n'), ((1596, 1637), 'numpy.zeros_like', 'np.zeros_like', (['within_hmm_transition_mask'], {}), '(within_hmm_transition_mask)\n', (1609, 1637), True, 'import numpy as np\n'), ((2229, 2267), 'torch.log_softmax', 'torch.log_softmax', (['log_weights'], {'dim': '(-1)'}), '(log_weights, dim=-1)\n', (2246, 2267), False, 'import torch\n'), ((2991, 3026), 'padertorch.utils.to_list', 'to_list', (['no_onset', 'log_rho.shape[0]'], {}), '(no_onset, log_rho.shape[0])\n', (2998, 3026), False, 'from padertorch.utils import to_list\n'), ((3047, 3083), 'padertorch.utils.to_list', 'to_list', (['no_offset', 'log_rho.shape[0]'], {}), '(no_offset, log_rho.shape[0])\n', (3054, 3083), False, 'from padertorch.utils import to_list\n'), ((1190, 1214), 'numpy.eye', 'np.eye', (['self.num_classes'], {}), '(self.num_classes)\n', (1196, 1214), True, 'import numpy as np\n'), ((1217, 1246), 'numpy.eye', 'np.eye', (['self.num_classes'], {'k': '(1)'}), '(self.num_classes, k=1)\n', (1223, 1246), True, 'import numpy as np\n'), ((1486, 1526), 'torch.Tensor', 'torch.Tensor', (['within_hmm_transition_mask'], {}), '(within_hmm_transition_mask)\n', (1498, 1526), False, 'import torch\n'), ((1847, 1888), 'torch.Tensor', 'torch.Tensor', (['between_hmm_transition_mask'], {}), '(between_hmm_transition_mask)\n', (1859, 1888), False, 'import torch\n'), ((2671, 2748), 'torch.log', 'torch.log', (['(self.within_hmm_transition_mask + self.between_hmm_transition_mask)'], {}), '(self.within_hmm_transition_mask + self.between_hmm_transition_mask)\n', (2680, 2748), False, 'import torch\n'), ((4335, 4502), 'padertorch.contrib.je.modules.hmm_utils.batch_viterbi', 'batch_viterbi', (['log_startprob', 'log_transmat', 'framelogprob'], {'seq_len': 'seq_len', 'state_sequence': 'state_sequence', 'initial_state': 'initial_state', 'final_state': 'final_state'}), '(log_startprob, log_transmat, framelogprob, seq_len=seq_len,\n state_sequence=state_sequence, initial_state=initial_state, final_state\n =final_state)\n', (4348, 4502), False, 'from padertorch.contrib.je.modules.hmm_utils import batch_forward_backward, batch_viterbi, squeeze_sequence\n'), ((4648, 4675), 'numpy.zeros_like', 'np.zeros_like', (['framelogprob'], {}), '(framelogprob)\n', (4661, 4675), True, 'import numpy as np\n'), ((4708, 4727), 'numpy.zeros', 'np.zeros', (['(b, k, k)'], {}), '((b, k, k))\n', (4716, 4727), True, 'import numpy as np\n'), ((5058, 5234), 'padertorch.contrib.je.modules.hmm_utils.batch_forward_backward', 'batch_forward_backward', (['log_startprob', 'log_transmat', 'framelogprob'], {'seq_len': 'seq_len', 'state_sequence': 'state_sequence', 'initial_state': 'initial_state', 'final_state': 'final_state'}), '(log_startprob, log_transmat, framelogprob, seq_len=\n seq_len, state_sequence=state_sequence, initial_state=initial_state,\n final_state=final_state)\n', (5080, 5234), False, 'from padertorch.contrib.je.modules.hmm_utils import batch_forward_backward, batch_viterbi, squeeze_sequence\n'), ((5604, 5635), 'padertorch.contrib.je.modules.hmm_utils.squeeze_sequence', 'squeeze_sequence', (['unit_sequence'], {}), '(unit_sequence)\n', (5620, 5635), False, 'from padertorch.contrib.je.modules.hmm_utils import batch_forward_backward, batch_viterbi, squeeze_sequence\n'), ((1099, 1119), 'numpy.log', 'np.log', (['weights_mask'], {}), '(weights_mask)\n', (1105, 1119), True, 'import numpy as np\n'), ((3755, 3781), 'numpy.zeros', 'np.zeros', (['self.num_classes'], {}), '(self.num_classes)\n', (3763, 3781), True, 'import numpy as np\n'), ((3895, 3907), 'numpy.log', 'np.log', (['mask'], {}), '(mask)\n', (3901, 3907), True, 'import numpy as np\n'), ((2486, 2497), 'numpy.log', 'np.log', (['(0.5)'], {}), '(0.5)\n', (2492, 2497), True, 'import numpy as np\n'), ((4780, 4792), 'numpy.arange', 'np.arange', (['t'], {}), '(t)\n', (4789, 4792), True, 'import numpy as np\n'), ((5318, 5348), 'torch.Tensor', 'torch.Tensor', (['class_posteriors'], {}), '(class_posteriors)\n', (5330, 5348), False, 'import torch\n'), ((5381, 5412), 'torch.Tensor', 'torch.Tensor', (['state_transitions'], {}), '(state_transitions)\n', (5393, 5412), False, 'import torch\n'), ((2435, 2483), 'torch.ones_like', 'torch.ones_like', (['self.within_hmm_transition_mask'], {}), '(self.within_hmm_transition_mask)\n', (2450, 2483), False, 'import torch\n'), ((2644, 2655), 'numpy.log', 'np.log', (['(0.5)'], {}), '(0.5)\n', (2650, 2655), True, 'import numpy as np\n'), ((3158, 3182), 'numpy.log', 'np.log', (['self.num_classes'], {}), '(self.num_classes)\n', (3164, 3182), True, 'import numpy as np\n'), ((4757, 4769), 'numpy.arange', 'np.arange', (['b'], {}), '(b)\n', (4766, 4769), True, 'import numpy as np\n'), ((4949, 4961), 'numpy.arange', 'np.arange', (['b'], {}), '(b)\n', (4958, 4961), True, 'import numpy as np\n'), ((3132, 3157), 'numpy.ones', 'np.ones', (['self.num_classes'], {}), '(self.num_classes)\n', (3139, 3157), True, 'import numpy as np\n'), ((2603, 2640), 'torch.ones_like', 'torch.ones_like', (['self.log_class_probs'], {}), '(self.log_class_probs)\n', (2618, 2640), False, 'import torch\n')] |
#!/usr/bin/env python3
import numpy as np
from gaussianKernel import gaussianKernel
from svmTrain import svmTrain
from svmPredict import svmPredict
def dataset3Params(X, y, Xval, yval):
#EX6PARAMS returns your choice of C and sigma for Part 3 of the exercise
#where you select the optimal (C, sigma) learning parameters to use for SVM
#with RBF kernel
# [C, sigma] = EX6PARAMS(X, y, Xval, yval) returns your choice of C and
# sigma. You should complete this function to return the optimal C and
# sigma based on a cross-validation set.
#
# You need to return the following variables correctly.
#C = 1;
#sigma = 0.3;
# ====================== YOUR CODE HERE ======================
# Instructions: Fill in this function to return the optimal C and sigma
# learning parameters found using the cross validation set.
# You can use svmPredict to predict the labels on the cross
# validation set. For example,
# predictions = svmPredict(model, Xval);
# will return the predictions on the cross validation set.
#
# Note: You can compute the prediction error using
# mean(double(predictions ~= yval))
#
values = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30]
error = 10000
for i in range(len(values)):
C_val = values[i]
for j in range(len(values)):
sigma_val = values[j]
model = svmTrain(X, y, C_val, gaussianKernel, args=(sigma_val,))
predictions = svmPredict(model, Xval)
error_val = np.mean(predictions != yval)
if error_val < error:
error, C, sigma = error_val, C_val, sigma_val\
# =========================================================================
return (C, sigma)
#end
| [
"svmPredict.svmPredict",
"numpy.mean",
"svmTrain.svmTrain"
] | [((1494, 1550), 'svmTrain.svmTrain', 'svmTrain', (['X', 'y', 'C_val', 'gaussianKernel'], {'args': '(sigma_val,)'}), '(X, y, C_val, gaussianKernel, args=(sigma_val,))\n', (1502, 1550), False, 'from svmTrain import svmTrain\n'), ((1577, 1600), 'svmPredict.svmPredict', 'svmPredict', (['model', 'Xval'], {}), '(model, Xval)\n', (1587, 1600), False, 'from svmPredict import svmPredict\n'), ((1625, 1653), 'numpy.mean', 'np.mean', (['(predictions != yval)'], {}), '(predictions != yval)\n', (1632, 1653), True, 'import numpy as np\n')] |
# -*- coding:utf-8 -*-
import argparse
import sys
import os
import torch
import torch.nn as nn
from torch.autograd import Variable
import torchvision
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torchvision.utils import save_image
from models import Generator, Discriminator, FeatureExtractor
from PIL import Image
import numpy as np
from train import setup
def down_and_up_sampling(image, save_name, upsampling):
opt = setup()
# create output folder
try:
os.makedirs('output/high_res_fake')
os.makedirs('output/high_res_real')
os.makedirs('output/low_res')
except OSError:
pass
if torch.cuda.is_available() and not opt.cuda:
print('[WARNING]: You have a CUDA device, so you should probably run with --cuda')
transform = transforms.Compose([transforms.RandomCrop((
image.size[0],
image.size[1])),
transforms.Pad(padding = 0),
transforms.ToTensor()])
normalize = transforms.Normalize(mean = [0.485, 0.456, 0.406],
std = [0.229, 0.224, 0.225])
# [down sampling] down-sampling part
scale = transforms.Compose([transforms.ToPILImage(),
transforms.Resize((int(image.size[1] / opt.upSampling), int(image.size[0] / opt.upSampling))),
transforms.Pad(padding=0),
transforms.ToTensor(),
transforms.Normalize(
mean = [0.485, 0.456, 0.406],
std = [0.229, 0.224, 0.225])])
# Equivalent to un-normalizing ImageNet (for correct visualization)
unnormalize = transforms.Normalize(
mean = [-2.118, -2.036, -1.804],
std = [4.367, 4.464, 4.444])
if opt.dataset == 'folder':
# folder dataset
dataset = datasets.ImageFolder(root = opt.dataroot, transform = transform)
elif opt.dataset == 'cifar10':
dataset = datasets.CIFAR10(root = opt.dataroot, download = True, train = False, transform = transform)
elif opt.dataset == 'cifar100':
dataset = datasets.CIFAR100(root = opt.dataroot, download = True, train = False, transform = transform)
assert dataset
dataloader = torch.utils.data.DataLoader(dataset,
batch_size = opt.batchSize,
shuffle = False,
num_workers = int(opt.workers))
my_loader = transforms.Compose([transforms.ToTensor()])
image = my_loader(image)
# [paras] loading paras from .pth files
generator = Generator(16, opt.upSampling)
if opt.generatorWeights != '':
generator.load_state_dict(torch.load(opt.generatorWeights))
discriminator = Discriminator()
if opt.discriminatorWeights != '':
discriminator.load_state_dict(torch.load(opt.discriminatorWeights))
# For the content loss
feature_extractor = FeatureExtractor(torchvision.models.vgg19(pretrained = True))
content_criterion = nn.MSELoss()
adversarial_criterion = nn.BCELoss()
target_real = Variable(torch.ones(opt.batchSize, 1))
target_fake = Variable(torch.zeros(opt.batchSize, 1))
# if gpu is to be used
if opt.cuda:
generator.cuda()
discriminator.cuda()
feature_extractor.cuda()
content_criterion.cuda()
adversarial_criterion.cuda()
target_real = target_real.cuda()
target_fake = target_fake.cuda()
low_res = torch.FloatTensor(opt.batchSize, 3, opt.imageSize[0], opt.imageSize[1])
# print('Test started...')
mean_generator_content_loss = 0.0
mean_generator_adversarial_loss = 0.0
mean_generator_total_loss = 0.0
mean_discriminator_loss = 0.0
# Set evaluation mode (not training)
generator.eval()
discriminator.eval()
data = image
for i in range(1):
# Generate data
high_res_real = data
low_res = scale(high_res_real)
low_res = torch.tensor([np.array(low_res)])
high_res_real = normalize(high_res_real)
high_res_real = torch.tensor([np.array(high_res_real)])
# Generate real and fake inputs
if opt.cuda:
high_res_real = Variable(high_res_real.cuda())
high_res_fake = generator(Variable(low_res).cuda())
else:
high_res_real = Variable(high_res_real)
high_res_fake = generator(Variable(low_res)) # >>> create hr images
save_image(unnormalize(high_res_real[0]), 'output/high_res_real/' + save_name)
save_image(unnormalize(high_res_fake[0]), 'output/high_res_fake/' + save_name)
save_image(unnormalize(low_res[0]), 'output/low_res/' + save_name)
def padding(imageA, imageB, factor, savename):
modeA = Image.open(imageA)
lengthA, widthA = modeA.size
modeB = Image.open(imageB)
lengthB, widthB = modeB.size
# padding = 0, fill=255, padding_mode='constant'
transform = transforms.Compose([
transforms.Pad(padding=0),
transforms.CenterCrop((widthA * factor, lengthA * factor)),
transforms.ToTensor()])
modeB = transform(modeB)
save_image(modeB, savename)
def create_test_data(path, count, bar):
count = 0
size = len(os.listdir(path))
for img in os.listdir(path):
'''print('>>> process image : {}'.format(img))'''
image = Image.open(path + img)
down_and_up_sampling(image, save_name = img, upsampling = 4)
count += 100 / size
bar.setValue(count)
try:
os.makedirs(os.getcwd() + r'\output\padding_fake')
except OSError:
pass
for f in os.listdir(os.getcwd() + r'\output\high_res_fake'):
padding(os.getcwd() + r'\output\high_res_real\\' + f,
os.getcwd() + r'\output\high_res_fake\\' + f,
1, os.getcwd() + r'\output\padding_fake\\' + f)
lr_path = os.getcwd() + r'\output\low_res'
hr_real_path = os.getcwd() + r'\output\high_res_real'
hr_fake_path = os.getcwd() + r'\output\high_res_fake'
return lr_path, hr_real_path, hr_fake_path
| [
"torchvision.models.vgg19",
"models.Discriminator",
"torchvision.datasets.CIFAR10",
"torchvision.transforms.Pad",
"train.setup",
"torchvision.transforms.Normalize",
"torch.ones",
"torch.nn.MSELoss",
"torch.nn.BCELoss",
"torch.load",
"torch.FloatTensor",
"torchvision.transforms.ToPILImage",
"... | [((475, 482), 'train.setup', 'setup', ([], {}), '()\n', (480, 482), False, 'from train import setup\n'), ((1183, 1258), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (1203, 1258), True, 'import torchvision.transforms as transforms\n'), ((1962, 2040), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[-2.118, -2.036, -1.804]', 'std': '[4.367, 4.464, 4.444]'}), '(mean=[-2.118, -2.036, -1.804], std=[4.367, 4.464, 4.444])\n', (1982, 2040), True, 'import torchvision.transforms as transforms\n'), ((3006, 3035), 'models.Generator', 'Generator', (['(16)', 'opt.upSampling'], {}), '(16, opt.upSampling)\n', (3015, 3035), False, 'from models import Generator, Discriminator, FeatureExtractor\n'), ((3160, 3175), 'models.Discriminator', 'Discriminator', ([], {}), '()\n', (3173, 3175), False, 'from models import Generator, Discriminator, FeatureExtractor\n'), ((3430, 3442), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (3440, 3442), True, 'import torch.nn as nn\n'), ((3471, 3483), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (3481, 3483), True, 'import torch.nn as nn\n'), ((3899, 3970), 'torch.FloatTensor', 'torch.FloatTensor', (['opt.batchSize', '(3)', 'opt.imageSize[0]', 'opt.imageSize[1]'], {}), '(opt.batchSize, 3, opt.imageSize[0], opt.imageSize[1])\n', (3916, 3970), False, 'import torch\n'), ((5192, 5210), 'PIL.Image.open', 'Image.open', (['imageA'], {}), '(imageA)\n', (5202, 5210), False, 'from PIL import Image\n'), ((5261, 5279), 'PIL.Image.open', 'Image.open', (['imageB'], {}), '(imageB)\n', (5271, 5279), False, 'from PIL import Image\n'), ((5667, 5694), 'torchvision.utils.save_image', 'save_image', (['modeB', 'savename'], {}), '(modeB, savename)\n', (5677, 5694), False, 'from torchvision.utils import save_image\n'), ((5798, 5814), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (5808, 5814), False, 'import os\n'), ((527, 562), 'os.makedirs', 'os.makedirs', (['"""output/high_res_fake"""'], {}), "('output/high_res_fake')\n", (538, 562), False, 'import os\n'), ((571, 606), 'os.makedirs', 'os.makedirs', (['"""output/high_res_real"""'], {}), "('output/high_res_real')\n", (582, 606), False, 'import os\n'), ((615, 644), 'os.makedirs', 'os.makedirs', (['"""output/low_res"""'], {}), "('output/low_res')\n", (626, 644), False, 'import os\n'), ((686, 711), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (709, 711), False, 'import torch\n'), ((2210, 2270), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', ([], {'root': 'opt.dataroot', 'transform': 'transform'}), '(root=opt.dataroot, transform=transform)\n', (2230, 2270), True, 'import torchvision.datasets as datasets\n'), ((3360, 3401), 'torchvision.models.vgg19', 'torchvision.models.vgg19', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (3384, 3401), False, 'import torchvision\n'), ((3512, 3540), 'torch.ones', 'torch.ones', (['opt.batchSize', '(1)'], {}), '(opt.batchSize, 1)\n', (3522, 3540), False, 'import torch\n'), ((3569, 3598), 'torch.zeros', 'torch.zeros', (['opt.batchSize', '(1)'], {}), '(opt.batchSize, 1)\n', (3580, 3598), False, 'import torch\n'), ((5765, 5781), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (5775, 5781), False, 'import os\n'), ((5890, 5912), 'PIL.Image.open', 'Image.open', (['(path + img)'], {}), '(path + img)\n', (5900, 5912), False, 'from PIL import Image\n'), ((6419, 6430), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6428, 6430), False, 'import os\n'), ((6471, 6482), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6480, 6482), False, 'import os\n'), ((6529, 6540), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6538, 6540), False, 'import os\n'), ((858, 911), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(image.size[0], image.size[1])'], {}), '((image.size[0], image.size[1]))\n', (879, 911), True, 'import torchvision.transforms as transforms\n'), ((1078, 1103), 'torchvision.transforms.Pad', 'transforms.Pad', ([], {'padding': '(0)'}), '(padding=0)\n', (1092, 1103), True, 'import torchvision.transforms as transforms\n'), ((1143, 1164), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1162, 1164), True, 'import torchvision.transforms as transforms\n'), ((1374, 1397), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (1395, 1397), True, 'import torchvision.transforms as transforms\n'), ((1558, 1583), 'torchvision.transforms.Pad', 'transforms.Pad', ([], {'padding': '(0)'}), '(padding=0)\n', (1572, 1583), True, 'import torchvision.transforms as transforms\n'), ((1617, 1638), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1636, 1638), True, 'import torchvision.transforms as transforms\n'), ((1672, 1747), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (1692, 1747), True, 'import torchvision.transforms as transforms\n'), ((2328, 2417), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', ([], {'root': 'opt.dataroot', 'download': '(True)', 'train': '(False)', 'transform': 'transform'}), '(root=opt.dataroot, download=True, train=False, transform=\n transform)\n', (2344, 2417), True, 'import torchvision.datasets as datasets\n'), ((2892, 2913), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2911, 2913), True, 'import torchvision.transforms as transforms\n'), ((3105, 3137), 'torch.load', 'torch.load', (['opt.generatorWeights'], {}), '(opt.generatorWeights)\n', (3115, 3137), False, 'import torch\n'), ((3253, 3289), 'torch.load', 'torch.load', (['opt.discriminatorWeights'], {}), '(opt.discriminatorWeights)\n', (3263, 3289), False, 'import torch\n'), ((4778, 4801), 'torch.autograd.Variable', 'Variable', (['high_res_real'], {}), '(high_res_real)\n', (4786, 4801), False, 'from torch.autograd import Variable\n'), ((5443, 5468), 'torchvision.transforms.Pad', 'transforms.Pad', ([], {'padding': '(0)'}), '(padding=0)\n', (5457, 5468), True, 'import torchvision.transforms as transforms\n'), ((5510, 5568), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(widthA * factor, lengthA * factor)'], {}), '((widthA * factor, lengthA * factor))\n', (5531, 5568), True, 'import torchvision.transforms as transforms\n'), ((5610, 5631), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5629, 5631), True, 'import torchvision.transforms as transforms\n'), ((6173, 6184), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6182, 6184), False, 'import os\n'), ((2475, 2565), 'torchvision.datasets.CIFAR100', 'datasets.CIFAR100', ([], {'root': 'opt.dataroot', 'download': '(True)', 'train': '(False)', 'transform': 'transform'}), '(root=opt.dataroot, download=True, train=False, transform=\n transform)\n', (2492, 2565), True, 'import torchvision.datasets as datasets\n'), ((4406, 4423), 'numpy.array', 'np.array', (['low_res'], {}), '(low_res)\n', (4414, 4423), True, 'import numpy as np\n'), ((4513, 4536), 'numpy.array', 'np.array', (['high_res_real'], {}), '(high_res_real)\n', (4521, 4536), True, 'import numpy as np\n'), ((4840, 4857), 'torch.autograd.Variable', 'Variable', (['low_res'], {}), '(low_res)\n', (4848, 4857), False, 'from torch.autograd import Variable\n'), ((6072, 6083), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6081, 6083), False, 'import os\n'), ((6230, 6241), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6239, 6241), False, 'import os\n'), ((6292, 6303), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6301, 6303), False, 'import os\n'), ((6358, 6369), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6367, 6369), False, 'import os\n'), ((4710, 4727), 'torch.autograd.Variable', 'Variable', (['low_res'], {}), '(low_res)\n', (4718, 4727), False, 'from torch.autograd import Variable\n')] |
from __future__ import print_function
import argparse
import json
from collections import defaultdict
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('--questions_file', required=True)
parser.add_argument('--answers_file', required=True)
def main(args):
# Load true answers from questions file
true_answers = []
with open(args.questions_file, 'r') as f:
questions = json.load(f)['questions']
for q in questions:
true_answers.append(q['answer'])
correct_by_q_type = defaultdict(list)
# Load predicted answers
predicted_answers = []
with open(args.answers_file, 'r') as f:
for line in f:
predicted_answers.append(line.strip())
num_true, num_pred = len(true_answers), len(predicted_answers)
assert num_true == num_pred, 'Expected %d answers but got %d' % (
num_true, num_pred)
for i, (true_answer, predicted_answer) in enumerate(zip(true_answers, predicted_answers)):
correct = 1 if true_answer == predicted_answer else 0
correct_by_q_type['Overall'].append(correct)
q_type = questions[i]['program'][-1]['function']
correct_by_q_type[q_type].append(correct)
for q_type, vals in sorted(correct_by_q_type.items()):
vals = np.asarray(vals)
print(q_type, '%d / %d = %.2f' % (vals.sum(), vals.shape[0], 100.0 * vals.mean()))
if __name__ == '__main__':
args = parser.parse_args()
main(args)
| [
"collections.defaultdict",
"numpy.asarray",
"json.load",
"argparse.ArgumentParser"
] | [((132, 157), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (155, 157), False, 'import argparse\n'), ((518, 535), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (529, 535), False, 'from collections import defaultdict\n'), ((1226, 1242), 'numpy.asarray', 'np.asarray', (['vals'], {}), '(vals)\n', (1236, 1242), True, 'import numpy as np\n'), ((406, 418), 'json.load', 'json.load', (['f'], {}), '(f)\n', (415, 418), False, 'import json\n')] |
#!/usr/bin/env python3
import numpy as np
import wave
import struct
import sys
from sbc import *
from sbc_synthesis_v1 import *
V = np.zeros(shape = (2, 10*2*8))
N = np.zeros(shape = (16,8))
total_time_ms = 0
mSBC_enabled = 1
H2_first_byte = 0
H2_second_byte = 0
def find_syncword(h2_first_byte, h2_second_byte):
if h2_first_byte != 1:
return -1
hn = h2_second_byte >> 4
ln = h2_second_byte & 0x0F
if ln == 8:
sn0 = hn & 3
sn1 = hn >> 2
if sn0 != sn1:
return -1
if sn0 not in [0,3]:
return -1
return sn0
def sbc_unpack_frame(fin, available_bytes, frame):
global H2_first_byte, H2_second_byte
if available_bytes == 0:
print ("no available_bytes")
raise TypeError
frame.syncword = get_bits(fin,8)
if mSBC_enabled:
if frame.syncword != 173:
#print ("out of sync %02x" % frame.syncword)
H2_first_byte = H2_second_byte
H2_second_byte = frame.syncword
return -1
else:
if frame.syncword != 156:
#print ("out of sync %02x" % frame.syncword)
return -1
if mSBC_enabled:
frame.sampling_frequency = 0 # == 16 kHz
frame.nr_blocks = 15
frame.channel_mode = MONO
frame.allocation_method = LOUDNESS
frame.nr_subbands = 8
frame.bitpool = 26
frame.reserved_for_future_use = get_bits(fin,16)
else:
frame.sampling_frequency = get_bits(fin,2)
frame.nr_blocks = nr_blocks[get_bits(fin,2)]
frame.channel_mode = get_bits(fin,2)
frame.allocation_method = get_bits(fin,1)
frame.nr_subbands = nr_subbands[get_bits(fin,1)]
frame.bitpool = get_bits(fin,8)
if frame.channel_mode == MONO:
frame.nr_channels = 1
else:
frame.nr_channels = 2
frame.crc_check = get_bits(fin,8)
frame.init(frame.nr_blocks, frame.nr_subbands, frame.nr_channels)
# read joint stereo flags
if frame.channel_mode == JOINT_STEREO:
for sb in range(frame.nr_subbands-1):
frame.join[sb] = get_bits(fin,1)
get_bits(fin,1) # RFA
frame.scale_factor = np.zeros(shape=(frame.nr_channels, frame.nr_subbands), dtype = np.int32)
# read scale factors
for ch in range(frame.nr_channels):
for sb in range(frame.nr_subbands):
frame.scale_factor[ch][sb] = get_bits(fin, 4)
if mSBC_enabled:
#print ("syncword: ", find_syncword(H2_first_byte, H2_second_byte))
crc = calculate_crc_mSBC(frame)
else:
crc = calculate_crc(frame)
if crc != frame.crc_check:
print ("CRC mismatch: calculated %d, expected %d" % (crc, frame.crc_check))
return -1
frame.scalefactor = np.zeros(shape=(frame.nr_channels, frame.nr_subbands), dtype = np.int32)
for ch in range(frame.nr_channels):
for sb in range(frame.nr_subbands):
frame.scalefactor[ch][sb] = 1 << (frame.scale_factor[ch][sb] + 1)
frame.bits = sbc_bit_allocation(frame)
frame.audio_sample = np.ndarray(shape=(frame.nr_blocks, frame.nr_channels, frame.nr_subbands), dtype = np.uint16)
for blk in range(frame.nr_blocks):
for ch in range(frame.nr_channels):
for sb in range(frame.nr_subbands):
frame.audio_sample[blk][ch][sb] = get_bits(fin, frame.bits[ch][sb])
#print ("block %2d - audio sample: %s" % (blk, frame.audio_sample[blk][0]))
drop_remaining_bits()
return 0
def sbc_reconstruct_subband_samples(frame):
frame.levels = np.zeros(shape=(frame.nr_channels, frame.nr_subbands), dtype = np.int32)
frame.sb_sample = np.zeros(shape=(frame.nr_blocks, frame.nr_channels, frame.nr_subbands))
for ch in range(frame.nr_channels):
for sb in range(frame.nr_subbands):
frame.levels[ch][sb] = pow(2.0, frame.bits[ch][sb]) - 1
for blk in range(frame.nr_blocks):
for ch in range(frame.nr_channels):
for sb in range(frame.nr_subbands):
if frame.levels[ch][sb] > 0:
AS = frame.audio_sample[blk][ch][sb]
L = frame.levels[ch][sb]
SF = frame.scalefactor[ch][sb]
frame.sb_sample[blk][ch][sb] = SF * ((AS*2.0+1.0) / L -1.0 )
else:
frame.sb_sample[blk][ch][sb] = 0
# sythesis filter
if frame.channel_mode == JOINT_STEREO:
for blk in range(frame.nr_blocks):
for sb in range(frame.nr_subbands):
if frame.join[sb]==1:
ch_a = frame.sb_sample[blk][0][sb] + frame.sb_sample[blk][1][sb]
ch_b = frame.sb_sample[blk][0][sb] - frame.sb_sample[blk][1][sb]
frame.sb_sample[blk][0][sb] = ch_a
frame.sb_sample[blk][1][sb] = ch_b
return 0
def sbc_frame_synthesis_sig(frame, ch, blk, proto_table):
global V, N
M = frame.nr_subbands
L = 10 * M
M2 = 2*M
L2 = 2*L
S = np.zeros(M)
U = np.zeros(L)
W = np.zeros(L)
frame.X = np.zeros(M)
for i in range(M):
S[i] = frame.sb_sample[blk][ch][i]
for i in range(L2-1, M2-1,-1):
V[ch][i] = V[ch][i-M2]
for k in range(M2):
V[ch][k] = 0
for i in range(M):
V[ch][k] += N[k][i] * S[i]
for i in range(5):
for j in range(M):
U[i*M2+j] = V[ch][i*2*M2+j]
U[(i*2+1)*M+j] = V[ch][(i*4+3)*M+j]
for i in range(L):
D = proto_table[i] * (-M)
W[i] = U[i]*D
offset = blk*M
for j in range(M):
for i in range(10):
frame.X[j] += W[j+M*i]
frame.pcm[ch][offset + j] = np.int16(frame.X[j])
def sbc_frame_synthesis_v1(frame, ch, blk, proto_table):
global V
N = matrix_N()
M = frame.nr_subbands
L = 10 * M
M2 = 2*M
L2 = 2*L
S = np.zeros(M)
W = np.zeros(L)
frame.X = np.zeros(M)
for i in range(M):
S[i] = frame.sb_sample[blk][ch][i]
for i in range(L2-1, M2-1,-1):
V[ch][i] = V[ch][i-M2]
for k in range(M2):
V[ch][k] = 0
for i in range(M):
V[ch][k] += N[k][i] * S[i]
for i in range(L):
D = proto_table[i] * (-M)
W[i] = D * VSGN(i,M2) * V[ch][remap_V(i)]
offset = blk*M
for j in range(M):
for i in range(10):
frame.X[j] += W[j+M*i]
frame.pcm[ch][offset + j] = np.int16(frame.X[j])
def sbc_frame_synthesis(frame, ch, blk, proto_table, implementation = "SIG"):
global total_time_ms
t1 = time_ms()
if implementation == "SIG":
sbc_frame_synthesis_sig(frame, ch, blk, proto_table)
elif implementation == "V1":
sbc_frame_synthesis_v1(frame, ch, blk, proto_table)
else:
print ("synthesis %s not implemented" % implementation)
exit(1)
t2 = time_ms()
total_time_ms += t2-t1
def sbc_init_synthesis_sig(M):
global N
M2 = M << 1
N = np.zeros(shape = (M2,M))
for k in range(M2):
for i in range(M):
N[k][i] = np.cos((i+0.5)*(k+M/2)*np.pi/M)
def sbc_init_sythesis(nr_subbands, implementation = "SIG"):
if implementation == "SIG":
sbc_init_synthesis_sig(nr_subbands)
elif implementation == "V1":
sbc_init_synthesis_v1(nr_subbands)
else:
print ("synthesis %s not implemented" % implementation)
exit(1)
def sbc_synthesis(frame, implementation = "SIG"):
if frame.nr_subbands == 4:
proto_table = Proto_4_40
elif frame.nr_subbands == 8:
proto_table = Proto_8_80
else:
return -1
for ch in range(frame.nr_channels):
for blk in range(frame.nr_blocks):
sbc_frame_synthesis(frame, ch, blk, proto_table, implementation)
return frame.nr_blocks * frame.nr_subbands
def sbc_decode(frame, implementation = "SIG"):
err = sbc_reconstruct_subband_samples(frame)
if err >= 0:
err = sbc_synthesis(frame, implementation)
return err
def write_wav_file(fout, frame):
values = []
for i in range(frame.nr_subbands * frame.nr_blocks):
for ch in range(frame.nr_channels):
try:
packed_value = struct.pack('h', frame.pcm[ch][i])
values.append(packed_value)
except struct.error:
print (frame)
print (i, frame.pcm[ch][i], frame.pcm[ch])
exit(1)
value_str = ''.join(values)
fout.writeframes(value_str)
if __name__ == "__main__":
usage = '''
Usage: ./sbc_decoder.py input.(msbc|sbc) implementation[default=SIG, V1]
'''
if (len(sys.argv) < 2):
print(usage)
sys.exit(1)
try:
mSBC_enabled = 0
infile = sys.argv[1]
if not infile.endswith('.sbc'):
if infile.endswith('.msbc'):
wavfile = infile.replace('.msbc', '-decoded.wav')
mSBC_enabled = 1
else:
print(usage)
sys.exit(1)
else:
wavfile = infile.replace('.sbc', '-decoded-py.wav')
print ("input file: ", infile)
print ("output file: ", wavfile)
print ("mSBC enabled: ", mSBC_enabled)
fout = False
implementation = "SIG"
if len(sys.argv) == 3:
implementation = sys.argv[2]
if implementation != "V1":
print ("synthesis %s not implemented" % implementation)
exit(1)
print ("\nSynthesis implementation: %s\n" % implementation)
with open (infile, 'rb') as fin:
try:
fin.seek(0, 2)
file_size = fin.tell()
fin.seek(0, 0)
frame_count = 0
while True:
frame = SBCFrame()
if frame_count % 200 == 0:
print ("== Frame %d == offset %d" % (frame_count, fin.tell()))
err = sbc_unpack_frame(fin, file_size - fin.tell(), frame)
if err:
#print ("error, frame_count: ", frame_count)
continue
if frame_count == 0:
sbc_init_sythesis(frame.nr_subbands, implementation)
print (frame )
sbc_decode(frame, implementation)
if frame_count == 0:
fout = wave.open(wavfile, 'w')
fout.setnchannels(frame.nr_channels)
fout.setsampwidth(2)
fout.setframerate(sampling_frequencies[frame.sampling_frequency])
fout.setnframes(0)
fout.setcomptype = 'NONE'
print (frame.pcm)
write_wav_file(fout, frame)
frame_count += 1
# if frame_count == 1:
# break
except TypeError as err:
if not fout:
print (err)
else:
fout.close()
if frame_count > 0:
print ("DONE, SBC file %s decoded into WAV file %s " % (infile, wavfile))
print ("Average sythesis time per frame: %d ms/frame" % (total_time_ms/frame_count))
else:
print ("No frame found")
exit(0)
fout.close()
if frame_count > 0:
print ("DONE: SBC file %s decoded into WAV file %s " % (infile, wavfile))
print ("Average sythesis time per frame: %d ms/frame" % (total_time_ms/frame_count))
else:
print ("No frame found")
except IOError as e:
print(usage)
sys.exit(1)
| [
"wave.open",
"numpy.zeros",
"struct.pack",
"numpy.cos",
"numpy.int16",
"numpy.ndarray",
"sys.exit"
] | [((133, 164), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 10 * 2 * 8)'}), '(shape=(2, 10 * 2 * 8))\n', (141, 164), True, 'import numpy as np\n'), ((167, 190), 'numpy.zeros', 'np.zeros', ([], {'shape': '(16, 8)'}), '(shape=(16, 8))\n', (175, 190), True, 'import numpy as np\n'), ((2202, 2272), 'numpy.zeros', 'np.zeros', ([], {'shape': '(frame.nr_channels, frame.nr_subbands)', 'dtype': 'np.int32'}), '(shape=(frame.nr_channels, frame.nr_subbands), dtype=np.int32)\n', (2210, 2272), True, 'import numpy as np\n'), ((2802, 2872), 'numpy.zeros', 'np.zeros', ([], {'shape': '(frame.nr_channels, frame.nr_subbands)', 'dtype': 'np.int32'}), '(shape=(frame.nr_channels, frame.nr_subbands), dtype=np.int32)\n', (2810, 2872), True, 'import numpy as np\n'), ((3116, 3210), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(frame.nr_blocks, frame.nr_channels, frame.nr_subbands)', 'dtype': 'np.uint16'}), '(shape=(frame.nr_blocks, frame.nr_channels, frame.nr_subbands),\n dtype=np.uint16)\n', (3126, 3210), True, 'import numpy as np\n'), ((3617, 3687), 'numpy.zeros', 'np.zeros', ([], {'shape': '(frame.nr_channels, frame.nr_subbands)', 'dtype': 'np.int32'}), '(shape=(frame.nr_channels, frame.nr_subbands), dtype=np.int32)\n', (3625, 3687), True, 'import numpy as np\n'), ((3712, 3783), 'numpy.zeros', 'np.zeros', ([], {'shape': '(frame.nr_blocks, frame.nr_channels, frame.nr_subbands)'}), '(shape=(frame.nr_blocks, frame.nr_channels, frame.nr_subbands))\n', (3720, 3783), True, 'import numpy as np\n'), ((5069, 5080), 'numpy.zeros', 'np.zeros', (['M'], {}), '(M)\n', (5077, 5080), True, 'import numpy as np\n'), ((5089, 5100), 'numpy.zeros', 'np.zeros', (['L'], {}), '(L)\n', (5097, 5100), True, 'import numpy as np\n'), ((5109, 5120), 'numpy.zeros', 'np.zeros', (['L'], {}), '(L)\n', (5117, 5120), True, 'import numpy as np\n'), ((5135, 5146), 'numpy.zeros', 'np.zeros', (['M'], {}), '(M)\n', (5143, 5146), True, 'import numpy as np\n'), ((5961, 5972), 'numpy.zeros', 'np.zeros', (['M'], {}), '(M)\n', (5969, 5972), True, 'import numpy as np\n'), ((5981, 5992), 'numpy.zeros', 'np.zeros', (['L'], {}), '(L)\n', (5989, 5992), True, 'import numpy as np\n'), ((6007, 6018), 'numpy.zeros', 'np.zeros', (['M'], {}), '(M)\n', (6015, 6018), True, 'import numpy as np\n'), ((7082, 7105), 'numpy.zeros', 'np.zeros', ([], {'shape': '(M2, M)'}), '(shape=(M2, M))\n', (7090, 7105), True, 'import numpy as np\n'), ((5768, 5788), 'numpy.int16', 'np.int16', (['frame.X[j]'], {}), '(frame.X[j])\n', (5776, 5788), True, 'import numpy as np\n'), ((6529, 6549), 'numpy.int16', 'np.int16', (['frame.X[j]'], {}), '(frame.X[j])\n', (6537, 6549), True, 'import numpy as np\n'), ((8807, 8818), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (8815, 8818), False, 'import sys\n'), ((7180, 7223), 'numpy.cos', 'np.cos', (['((i + 0.5) * (k + M / 2) * np.pi / M)'], {}), '((i + 0.5) * (k + M / 2) * np.pi / M)\n', (7186, 7223), True, 'import numpy as np\n'), ((12025, 12036), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (12033, 12036), False, 'import sys\n'), ((8328, 8362), 'struct.pack', 'struct.pack', (['"""h"""', 'frame.pcm[ch][i]'], {}), "('h', frame.pcm[ch][i])\n", (8339, 8362), False, 'import struct\n'), ((9125, 9136), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9133, 9136), False, 'import sys\n'), ((10614, 10637), 'wave.open', 'wave.open', (['wavfile', '"""w"""'], {}), "(wavfile, 'w')\n", (10623, 10637), False, 'import wave\n')] |
import time
import os
import pathlib
from absl import app, flags, logging
from absl.flags import FLAGS
import cv2
import numpy as np
import tensorflow as tf
from yolov3_tf2.models import (
YoloV3, YoloV3Tiny
)
from yolov3_tf2.dataset import transform_images, load_tfrecord_dataset
from yolov3_tf2.utils import draw_outputs
flags.DEFINE_string('classes', './data/coco.names', 'path to classes file')
flags.DEFINE_string('weights', './checkpoints/yolov3.tf',
'path to weights file')
flags.DEFINE_boolean('tiny', False, 'yolov3 or yolov3-tiny')
flags.DEFINE_integer('size', 416, 'resize images to')
flags.DEFINE_string('image', './data/girl.png', 'path to image directory')
flags.DEFINE_string('output', './', 'path to output directory')
flags.DEFINE_integer('num_classes', 80, 'number of classes in the model')
def main(_argv):
physical_devices = tf.config.experimental.list_physical_devices('GPU')
for physical_device in physical_devices:
tf.config.experimental.set_memory_growth(physical_device, True)
if FLAGS.tiny:
yolo = YoloV3Tiny(classes=FLAGS.num_classes)
else:
yolo = YoloV3(classes=FLAGS.num_classes)
yolo.load_weights(FLAGS.weights).expect_partial()
logging.info('weights loaded')
class_names = [c.strip() for c in open(FLAGS.classes).readlines()]
logging.info('classes loaded')
print(FLAGS.output)
os.chdir(FLAGS.image)
for file in os.listdir(FLAGS.image):
if file.endswith(".jpg"):
logging.info("processing {}".format(file))
img_raw = tf.image.decode_image(open(file, 'rb').read(), channels=3)
img_predict(img_raw,yolo,class_names,file)
logging.info('output saved to: {}'.format(FLAGS.output))
def img_predict(img_raw,yolo,class_names,file):
img = tf.expand_dims(img_raw, 0)
img = transform_images(img, FLAGS.size)
t1 = time.time()
boxes, scores, classes, nums = yolo(img)
t2 = time.time()
logging.info('time: {}'.format(t2 - t1))
logging.info('detections:')
for i in range(nums[0]):
logging.info('\t{}, {}, {}'.format(class_names[int(classes[0][i])],
np.array(scores[0][i]),
np.array(boxes[0][i])))
img = cv2.cvtColor(img_raw.numpy(), cv2.COLOR_RGB2BGR)
img = draw_outputs(img, (boxes, scores, classes, nums), class_names)
img_name=pathlib.Path(file)
output_name=os.path.join(FLAGS.output,img_name.stem+"_det.jpg")
print(output_name)
cv2.imwrite(output_name, img)
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
| [
"yolov3_tf2.dataset.transform_images",
"absl.logging.info",
"pathlib.Path",
"yolov3_tf2.utils.draw_outputs",
"absl.flags.DEFINE_boolean",
"os.path.join",
"os.chdir",
"cv2.imwrite",
"absl.flags.DEFINE_integer",
"yolov3_tf2.models.YoloV3Tiny",
"tensorflow.config.experimental.set_memory_growth",
... | [((330, 405), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""classes"""', '"""./data/coco.names"""', '"""path to classes file"""'], {}), "('classes', './data/coco.names', 'path to classes file')\n", (349, 405), False, 'from absl import app, flags, logging\n'), ((406, 491), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""weights"""', '"""./checkpoints/yolov3.tf"""', '"""path to weights file"""'], {}), "('weights', './checkpoints/yolov3.tf',\n 'path to weights file')\n", (425, 491), False, 'from absl import app, flags, logging\n'), ((508, 568), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""tiny"""', '(False)', '"""yolov3 or yolov3-tiny"""'], {}), "('tiny', False, 'yolov3 or yolov3-tiny')\n", (528, 568), False, 'from absl import app, flags, logging\n'), ((569, 622), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""size"""', '(416)', '"""resize images to"""'], {}), "('size', 416, 'resize images to')\n", (589, 622), False, 'from absl import app, flags, logging\n'), ((623, 697), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""image"""', '"""./data/girl.png"""', '"""path to image directory"""'], {}), "('image', './data/girl.png', 'path to image directory')\n", (642, 697), False, 'from absl import app, flags, logging\n'), ((698, 761), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""output"""', '"""./"""', '"""path to output directory"""'], {}), "('output', './', 'path to output directory')\n", (717, 761), False, 'from absl import app, flags, logging\n'), ((762, 835), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_classes"""', '(80)', '"""number of classes in the model"""'], {}), "('num_classes', 80, 'number of classes in the model')\n", (782, 835), False, 'from absl import app, flags, logging\n'), ((878, 929), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (922, 929), True, 'import tensorflow as tf\n'), ((1238, 1268), 'absl.logging.info', 'logging.info', (['"""weights loaded"""'], {}), "('weights loaded')\n", (1250, 1268), False, 'from absl import app, flags, logging\n'), ((1345, 1375), 'absl.logging.info', 'logging.info', (['"""classes loaded"""'], {}), "('classes loaded')\n", (1357, 1375), False, 'from absl import app, flags, logging\n'), ((1410, 1431), 'os.chdir', 'os.chdir', (['FLAGS.image'], {}), '(FLAGS.image)\n', (1418, 1431), False, 'import os\n'), ((1448, 1471), 'os.listdir', 'os.listdir', (['FLAGS.image'], {}), '(FLAGS.image)\n', (1458, 1471), False, 'import os\n'), ((1823, 1849), 'tensorflow.expand_dims', 'tf.expand_dims', (['img_raw', '(0)'], {}), '(img_raw, 0)\n', (1837, 1849), True, 'import tensorflow as tf\n'), ((1860, 1893), 'yolov3_tf2.dataset.transform_images', 'transform_images', (['img', 'FLAGS.size'], {}), '(img, FLAGS.size)\n', (1876, 1893), False, 'from yolov3_tf2.dataset import transform_images, load_tfrecord_dataset\n'), ((1904, 1915), 'time.time', 'time.time', ([], {}), '()\n', (1913, 1915), False, 'import time\n'), ((1970, 1981), 'time.time', 'time.time', ([], {}), '()\n', (1979, 1981), False, 'import time\n'), ((2032, 2059), 'absl.logging.info', 'logging.info', (['"""detections:"""'], {}), "('detections:')\n", (2044, 2059), False, 'from absl import app, flags, logging\n'), ((2369, 2431), 'yolov3_tf2.utils.draw_outputs', 'draw_outputs', (['img', '(boxes, scores, classes, nums)', 'class_names'], {}), '(img, (boxes, scores, classes, nums), class_names)\n', (2381, 2431), False, 'from yolov3_tf2.utils import draw_outputs\n'), ((2445, 2463), 'pathlib.Path', 'pathlib.Path', (['file'], {}), '(file)\n', (2457, 2463), False, 'import pathlib\n'), ((2480, 2534), 'os.path.join', 'os.path.join', (['FLAGS.output', "(img_name.stem + '_det.jpg')"], {}), "(FLAGS.output, img_name.stem + '_det.jpg')\n", (2492, 2534), False, 'import os\n'), ((2559, 2588), 'cv2.imwrite', 'cv2.imwrite', (['output_name', 'img'], {}), '(output_name, img)\n', (2570, 2588), False, 'import cv2\n'), ((983, 1046), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['physical_device', '(True)'], {}), '(physical_device, True)\n', (1023, 1046), True, 'import tensorflow as tf\n'), ((1082, 1119), 'yolov3_tf2.models.YoloV3Tiny', 'YoloV3Tiny', ([], {'classes': 'FLAGS.num_classes'}), '(classes=FLAGS.num_classes)\n', (1092, 1119), False, 'from yolov3_tf2.models import YoloV3, YoloV3Tiny\n'), ((1145, 1178), 'yolov3_tf2.models.YoloV3', 'YoloV3', ([], {'classes': 'FLAGS.num_classes'}), '(classes=FLAGS.num_classes)\n', (1151, 1178), False, 'from yolov3_tf2.models import YoloV3, YoloV3Tiny\n'), ((2640, 2653), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (2647, 2653), False, 'from absl import app, flags, logging\n'), ((2208, 2230), 'numpy.array', 'np.array', (['scores[0][i]'], {}), '(scores[0][i])\n', (2216, 2230), True, 'import numpy as np\n'), ((2275, 2296), 'numpy.array', 'np.array', (['boxes[0][i]'], {}), '(boxes[0][i])\n', (2283, 2296), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 19 15:27:48 2020
@author: virati
Patient specific readouts for a *PREDICTIVE* model without need for parsimony
"""
from DBSpace.readout import BR_DataFrame as BRDF
from DBSpace.readout import ClinVect, decoder
from DBSpace.readout.BR_DataFrame import BR_Data_Tree
from DBSpace import nestdict
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rcParams['text.usetex'] = True
import numpy as np
# Misc libraries
import copy
import pickle
#Debugging
import ipdb
## MAJOR PARAMETERS for our partial biometric analysis
test_scale = 'pHDRS17' # Which scale are we using as the measurement of the depression state?
do_pts = ['901','903','905','906','907','908'] # Which patients do we want to include in this entire analysis?
''' DETRENDING
Which detrending scheme are we doing
This is important. Block goes into each patient and does zero-mean and linear detrend across time
None does not do this
All does a linear detrend across all concatenated observations. This is dumb and should not be done. Will eliminate this since it makes no sense
'''
# Initial
# Now we set up our DBSpace environment
#ClinFrame = ClinVect.CFrame(norm_scales=True)
ClinFrame = ClinVect.CStruct()
#BRFrame = BRDF.BR_Data_Tree(preFrame='Chronic_Frame.pickle')
BRFrame = pickle.load(open('/home/virati/Dropbox/projects/Research/MDD-DBS/Data/Chronic_FrameMay2020.pickle',"rb"))
do_shuffled_null = False
#%%
pt_coeff = nestdict()
for do_pt in do_pts:
main_readout = decoder.var_decoder(BRFrame = BRFrame,ClinFrame = ClinFrame,pts=do_pt,clin_measure=test_scale,shuffle_null=False,FeatureSet='main')
main_readout.filter_recs(rec_class='main_study')
main_readout.split_train_set(0.6)
#null_slopes,null_r2 = main_readout.model_analysis(do_null=True,n_iter=100)
main_slope,main_r2 = main_readout.model_analysis()
print(do_pt + ' Slope: ' + str(main_slope))
print('p<' + str(np.sum(null_slopes > main_slope[0])/100))
print('R2: ' + str(main_r2))
print('p<' + str(np.sum(null_r2 > main_r2[0])/100))
pt_coeff[do_pt] = main_readout.decode_model.coef_
| [
"DBSpace.readout.decoder.var_decoder",
"DBSpace.nestdict",
"numpy.sum",
"DBSpace.readout.ClinVect.CStruct"
] | [((1240, 1258), 'DBSpace.readout.ClinVect.CStruct', 'ClinVect.CStruct', ([], {}), '()\n', (1256, 1258), False, 'from DBSpace.readout import ClinVect, decoder\n'), ((1477, 1487), 'DBSpace.nestdict', 'nestdict', ([], {}), '()\n', (1485, 1487), False, 'from DBSpace import nestdict\n'), ((1528, 1664), 'DBSpace.readout.decoder.var_decoder', 'decoder.var_decoder', ([], {'BRFrame': 'BRFrame', 'ClinFrame': 'ClinFrame', 'pts': 'do_pt', 'clin_measure': 'test_scale', 'shuffle_null': '(False)', 'FeatureSet': '"""main"""'}), "(BRFrame=BRFrame, ClinFrame=ClinFrame, pts=do_pt,\n clin_measure=test_scale, shuffle_null=False, FeatureSet='main')\n", (1547, 1664), False, 'from DBSpace.readout import ClinVect, decoder\n'), ((1965, 2000), 'numpy.sum', 'np.sum', (['(null_slopes > main_slope[0])'], {}), '(null_slopes > main_slope[0])\n', (1971, 2000), True, 'import numpy as np\n'), ((2061, 2089), 'numpy.sum', 'np.sum', (['(null_r2 > main_r2[0])'], {}), '(null_r2 > main_r2[0])\n', (2067, 2089), True, 'import numpy as np\n')] |
import numpy as np
import scipy
class nerual_newtork(object):
def __init__(self, input_layers, hidden_layers, output_layers, learning_rate = 0.1, n_iterations =100 ):
self.input_nodes = input_layers
self.hidden_nodes = hidden_layers
self.output_nodes = output_layers
self.learning_rate = learning_rate
self.n_iterations = n_iterations
self.weights_with_input_hidden = (np.random.rand(self.hidden_nodes, self.input_nodes) - 0.5 )
self.weights_with_output_hidden = (np.random.rand(self.output_nodes, self.hidden_nodes) - 0.5)
self.activiation_function = lambda x : scipy.special.expit(x)
def train(self, inputs_list, targets_list):
inputs = np.array(inputs_list, ndmin=2).T
targets = np.array(targets_list, ndmin=2).T
hidden_inputs = np.dot(self.weights_with_input_hidden, inputs)
hidden_outputs = self.activiation_function(hidden_inputs)
final_inputs = np.dot(self.weights_with_output_hidden, hidden_outputs)
final_outputs = self.activiation_function(final_inputs)
def query(self, input_list):
inputs = np.array(input_list, ndmin=2).T
hidden_outputs = np.dot(self.weights_with_input_hidden, inputs)
final_outputs = np.dot(self.weights_with_output_hidden, hidden_outputs)
return final_outputs
input_layers = 3
hidden_layers = 3
output_layers = 3
nn = nerual_newtork(input_layers, hidden_layers, output_layers) | [
"numpy.random.rand",
"numpy.dot",
"numpy.array",
"scipy.special.expit"
] | [((834, 880), 'numpy.dot', 'np.dot', (['self.weights_with_input_hidden', 'inputs'], {}), '(self.weights_with_input_hidden, inputs)\n', (840, 880), True, 'import numpy as np\n'), ((971, 1026), 'numpy.dot', 'np.dot', (['self.weights_with_output_hidden', 'hidden_outputs'], {}), '(self.weights_with_output_hidden, hidden_outputs)\n', (977, 1026), True, 'import numpy as np\n'), ((1201, 1247), 'numpy.dot', 'np.dot', (['self.weights_with_input_hidden', 'inputs'], {}), '(self.weights_with_input_hidden, inputs)\n', (1207, 1247), True, 'import numpy as np\n'), ((1273, 1328), 'numpy.dot', 'np.dot', (['self.weights_with_output_hidden', 'hidden_outputs'], {}), '(self.weights_with_output_hidden, hidden_outputs)\n', (1279, 1328), True, 'import numpy as np\n'), ((423, 474), 'numpy.random.rand', 'np.random.rand', (['self.hidden_nodes', 'self.input_nodes'], {}), '(self.hidden_nodes, self.input_nodes)\n', (437, 474), True, 'import numpy as np\n'), ((526, 578), 'numpy.random.rand', 'np.random.rand', (['self.output_nodes', 'self.hidden_nodes'], {}), '(self.output_nodes, self.hidden_nodes)\n', (540, 578), True, 'import numpy as np\n'), ((634, 656), 'scipy.special.expit', 'scipy.special.expit', (['x'], {}), '(x)\n', (653, 656), False, 'import scipy\n'), ((724, 754), 'numpy.array', 'np.array', (['inputs_list'], {'ndmin': '(2)'}), '(inputs_list, ndmin=2)\n', (732, 754), True, 'import numpy as np\n'), ((775, 806), 'numpy.array', 'np.array', (['targets_list'], {'ndmin': '(2)'}), '(targets_list, ndmin=2)\n', (783, 806), True, 'import numpy as np\n'), ((1143, 1172), 'numpy.array', 'np.array', (['input_list'], {'ndmin': '(2)'}), '(input_list, ndmin=2)\n', (1151, 1172), True, 'import numpy as np\n')] |
import numpy as np
from pyldpc import (make_ldpc, binaryproduct, ldpc_images)
from pyldpc.utils_img import gray2bin, rgb2bin
import pytest
from itertools import product
@pytest.mark.filterwarnings("ignore: In LDPC applications, using systematic")
@pytest.mark.parametrize("systematic, sparse",
product([False, True], [False, True]))
def test_image_gray(systematic, sparse):
n = 100
d_v = 3
d_c = 4
seed = 0
rnd = np.random.RandomState(seed)
H, G = make_ldpc(n, d_v, d_c, seed=seed, systematic=systematic,
sparse=sparse)
assert not binaryproduct(H, G).any()
n, k = G.shape
snr = 10
img = rnd.randint(0, 255, size=(3, 3))
img_bin = gray2bin(img)
img_shape = img_bin.shape
coded, noisy = ldpc_images.encode_img(G, img_bin, snr, seed)
x = ldpc_images.decode_img(G, H, coded, snr, img_shape=img_shape)
assert ldpc_images.ber_img(img_bin, gray2bin(x)) == 0
@pytest.mark.filterwarnings("ignore: In LDPC applications, using systematic")
@pytest.mark.parametrize("systematic, sparse",
product([False, True], [False, True]))
def test_image_rgb(systematic, sparse):
n = 69
d_v = 2
d_c = 3
seed = 0
rnd = np.random.RandomState(seed)
H, G = make_ldpc(n, d_v, d_c, seed=seed, systematic=systematic,
sparse=sparse)
assert not binaryproduct(H, G).any()
n, k = G.shape
snr = 10
img = rnd.randint(0, 255, size=(3, 3, 3))
img_bin = rgb2bin(img)
coded, noisy = ldpc_images.encode_img(G, img_bin, snr, seed)
x = ldpc_images.decode_img(G, H, coded, snr, img_bin.shape)
assert ldpc_images.ber_img(img_bin, rgb2bin(x)) == 0
| [
"pyldpc.make_ldpc",
"numpy.random.RandomState",
"pyldpc.binaryproduct",
"pyldpc.ldpc_images.encode_img",
"pyldpc.utils_img.gray2bin",
"pyldpc.ldpc_images.decode_img",
"itertools.product",
"pytest.mark.filterwarnings",
"pyldpc.utils_img.rgb2bin"
] | [((173, 249), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore: In LDPC applications, using systematic"""'], {}), "('ignore: In LDPC applications, using systematic')\n", (199, 249), False, 'import pytest\n'), ((968, 1044), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore: In LDPC applications, using systematic"""'], {}), "('ignore: In LDPC applications, using systematic')\n", (994, 1044), False, 'import pytest\n'), ((461, 488), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (482, 488), True, 'import numpy as np\n'), ((500, 571), 'pyldpc.make_ldpc', 'make_ldpc', (['n', 'd_v', 'd_c'], {'seed': 'seed', 'systematic': 'systematic', 'sparse': 'sparse'}), '(n, d_v, d_c, seed=seed, systematic=systematic, sparse=sparse)\n', (509, 571), False, 'from pyldpc import make_ldpc, binaryproduct, ldpc_images\n'), ((725, 738), 'pyldpc.utils_img.gray2bin', 'gray2bin', (['img'], {}), '(img)\n', (733, 738), False, 'from pyldpc.utils_img import gray2bin, rgb2bin\n'), ((789, 834), 'pyldpc.ldpc_images.encode_img', 'ldpc_images.encode_img', (['G', 'img_bin', 'snr', 'seed'], {}), '(G, img_bin, snr, seed)\n', (811, 834), False, 'from pyldpc import make_ldpc, binaryproduct, ldpc_images\n'), ((844, 905), 'pyldpc.ldpc_images.decode_img', 'ldpc_images.decode_img', (['G', 'H', 'coded', 'snr'], {'img_shape': 'img_shape'}), '(G, H, coded, snr, img_shape=img_shape)\n', (866, 905), False, 'from pyldpc import make_ldpc, binaryproduct, ldpc_images\n'), ((322, 359), 'itertools.product', 'product', (['[False, True]', '[False, True]'], {}), '([False, True], [False, True])\n', (329, 359), False, 'from itertools import product\n'), ((1255, 1282), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (1276, 1282), True, 'import numpy as np\n'), ((1294, 1365), 'pyldpc.make_ldpc', 'make_ldpc', (['n', 'd_v', 'd_c'], {'seed': 'seed', 'systematic': 'systematic', 'sparse': 'sparse'}), '(n, d_v, d_c, seed=seed, systematic=systematic, sparse=sparse)\n', (1303, 1365), False, 'from pyldpc import make_ldpc, binaryproduct, ldpc_images\n'), ((1522, 1534), 'pyldpc.utils_img.rgb2bin', 'rgb2bin', (['img'], {}), '(img)\n', (1529, 1534), False, 'from pyldpc.utils_img import gray2bin, rgb2bin\n'), ((1554, 1599), 'pyldpc.ldpc_images.encode_img', 'ldpc_images.encode_img', (['G', 'img_bin', 'snr', 'seed'], {}), '(G, img_bin, snr, seed)\n', (1576, 1599), False, 'from pyldpc import make_ldpc, binaryproduct, ldpc_images\n'), ((1609, 1664), 'pyldpc.ldpc_images.decode_img', 'ldpc_images.decode_img', (['G', 'H', 'coded', 'snr', 'img_bin.shape'], {}), '(G, H, coded, snr, img_bin.shape)\n', (1631, 1664), False, 'from pyldpc import make_ldpc, binaryproduct, ldpc_images\n'), ((1117, 1154), 'itertools.product', 'product', (['[False, True]', '[False, True]'], {}), '([False, True], [False, True])\n', (1124, 1154), False, 'from itertools import product\n'), ((947, 958), 'pyldpc.utils_img.gray2bin', 'gray2bin', (['x'], {}), '(x)\n', (955, 958), False, 'from pyldpc.utils_img import gray2bin, rgb2bin\n'), ((1706, 1716), 'pyldpc.utils_img.rgb2bin', 'rgb2bin', (['x'], {}), '(x)\n', (1713, 1716), False, 'from pyldpc.utils_img import gray2bin, rgb2bin\n'), ((608, 627), 'pyldpc.binaryproduct', 'binaryproduct', (['H', 'G'], {}), '(H, G)\n', (621, 627), False, 'from pyldpc import make_ldpc, binaryproduct, ldpc_images\n'), ((1402, 1421), 'pyldpc.binaryproduct', 'binaryproduct', (['H', 'G'], {}), '(H, G)\n', (1415, 1421), False, 'from pyldpc import make_ldpc, binaryproduct, ldpc_images\n')] |
"""
Compare the various algorithms on a synthetic dataset.
"""
import pickle
import os
import copy
import gzip
import numpy as np
# Use the Agg backend in running on a server without the DISPLAY variable
if "DISPLAY" not in os.environ:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# import brewer2mpl
# colors = brewer2mpl.get_map("Set1", "Qualitative", 9).mpl_colors
# goodcolors = np.array([0,1,2,4,6,7,8])
# colors = np.array(colors)[goodcolors]
import harness
def load_data(data_path, test_path):
with gzip.open(data_path, 'r') as f:
S, true_model = pickle.load(f)
with gzip.open(test_path, 'r') as f:
S_test, test_model = pickle.load(f)
return S, S_test, true_model
def plot_pred_ll_vs_time(models, results, burnin=0,
std_ll=np.nan,
true_ll=np.nan):
from hips.plotting.layout import create_figure
from hips.plotting.colormaps import harvard_colors
# Make the ICML figure
fig = create_figure((4,3))
ax = fig.add_subplot(111)
col = harvard_colors()
plt.grid()
t_start = 0
t_stop = 0
for i, (model, result) in enumerate(zip(models, results)):
plt.plot(result.timestamps[burnin:], result.test_lls[burnin:], lw=2, color=col[i], label=model)
# Update time limits
t_start = min(t_start, result.timestamps[burnin:].min())
t_stop = max(t_stop, result.timestamps[burnin:].max())
# plt.legend(loc="outside right")
# Plot the standard Hawkes test ll
plt.plot([t_start, t_stop], std_ll*np.ones(2), lw=2, color=col[len(models)], label="Std.")
# Plot the true ll
plt.plot([t_start, t_stop], true_ll*np.ones(2), '--k', lw=2, label="True")
ax.set_xlim(t_start, t_stop)
ax.set_xlabel("time [sec]")
ax.set_ylabel("Pred. Log Lkhd.")
plt.show()
def plot_impulse_responses(models, results):
from hips.plotting.layout import create_figure
from hips.plotting.colormaps import harvard_colors
# Make the ICML figure
fig = create_figure((6,6))
col = harvard_colors()
plt.grid()
y_max = 0
for i, (model, result) in enumerate(zip(models, results)):
smpl = result.samples[-1]
W = smpl.W_effective
if "continuous" in str(smpl.__class__).lower():
t, irs = smpl.impulses
for k1 in range(K):
for k2 in range(K):
plt.subplot(K,K,k1*K + k2 + 1)
plt.plot(t, W[k1,k2] * irs[:,k1,k2], color=col[i], lw=2)
else:
irs = smpl.impulses
for k1 in range(K):
for k2 in range(K):
plt.subplot(K,K,k1*K + k2 + 1)
plt.plot(W[k1,k2] * irs[:,k1,k2], color=col[i], lw=2)
y_max = max(y_max, (W*irs).max())
for k1 in range(K):
for k2 in range(K):
plt.subplot(K,K,k1*K+k2+1)
plt.ylim(0,y_max*1.05)
plt.show()
# def run_comparison(data_path, test_path, output_dir, T_train=None, seed=None):
# """
# Run the comparison on the given data file
# :param data_path:
# :return:
# """
if __name__ == "__main__":
seed = None
run = 1
K = 4
C = 1
T = 1000
T_train = 1000
T_test = 1000
data_path = os.path.join("data", "synthetic", "synthetic_K%d_C%d_T%d.pkl.gz" % (K,C,T))
test_path = os.path.join("data", "synthetic", "synthetic_test_K%d_C%d_T%d.pkl.gz" % (K,C,T_test))
output_dir = os.path.join("results", "synthetic_K%d_C%d_T%d" % (K,C,T_train), "run%03d" % run)
# run_comparison(data_path, test_path, output_dir, T_train=T_train, seed=seed)
if seed is None:
seed = np.random.randint(2**32)
print("Setting seed to ", seed)
np.random.seed(seed)
assert os.path.exists(os.path.dirname(output_dir)), "Output directory does not exist!"
S, S_test, true_model = load_data(data_path, test_path)
# If T_train is given, only use a fraction of the dataset
if T_train is not None:
S = S[:T_train,:]
# Use the true basis
dt, dt_max = true_model.dt, true_model.dt_max
basis = true_model.basis
network = true_model.network
# First fit the standard model
results = []
output_path = os.path.join(output_dir, "std.pkl.gz")
std_results = \
harness.fit_standard_hawkes_model_bfgs(S, S_test, dt, dt_max, output_path,
model_args={"basis": basis, "alpha": 1.0, "beta": 1.0})
std_model = std_results.samples[0]
# results.append(std_results)
# Now fit the Bayesian models with MCMC or VB,
# initializing with the standard model
models = [
"SS-DTH (Gibbs)",
#"SS-CTH (Gibbs)",
"MoG-DTH (VB)",
"MoG-DTH (SVI)"
]
methods = [
harness.fit_spikeslab_network_hawkes_gibbs,
#harness.fit_ct_network_hawkes_gibbs,
harness.fit_network_hawkes_vb,
harness.fit_network_hawkes_svi
]
inf_args = [
{"N_samples": 3000, "standard_model": std_model},
#{"N_samples": 1000, "standard_model": std_model},
{"N_samples": 3000, "standard_model": std_model},
{"N_samples": 3000, "standard_model": std_model}
]
model_args = [
{"basis": basis, "network": copy.deepcopy(network), "weight_hypers": {"parallel_resampling": False}},
#{"network": copy.deepcopy(network), "impulse_hypers" : {"mu_0": 0., "lmbda_0": 2.0, "alpha_0": 2.0, "beta_0" : 1.0}},
{"basis": basis, "network": copy.deepcopy(network)},
{"basis": basis, "network": copy.deepcopy(network)},
]
assert len(models) == len(methods) == len(inf_args) == len(model_args)
for model, method, iargs, margs in zip(models, methods, inf_args, model_args):
output_path = os.path.join(output_dir, model.lower() + ".pkl.gz")
results.append(method(S, S_test, dt, dt_max, output_path,
model_args=margs,
**iargs))
# Plot the reuslts
plt.ion()
plot_pred_ll_vs_time(models, results, burnin=1,
std_ll=std_results.test_lls[-1],
true_ll=true_model.heldout_log_likelihood(S_test))
# Plot impulse responses
# plot_impulse_responses(models, results)
| [
"harness.fit_standard_hawkes_model_bfgs",
"matplotlib.pyplot.subplot",
"copy.deepcopy",
"matplotlib.pyplot.show",
"os.path.join",
"numpy.random.seed",
"gzip.open",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"os.path.dirname",
"hips.plotting.colormaps.harvard_colors",
"numpy.ones",
"... | [((263, 284), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (277, 284), False, 'import matplotlib\n'), ((1021, 1042), 'hips.plotting.layout.create_figure', 'create_figure', (['(4, 3)'], {}), '((4, 3))\n', (1034, 1042), False, 'from hips.plotting.layout import create_figure\n'), ((1082, 1098), 'hips.plotting.colormaps.harvard_colors', 'harvard_colors', ([], {}), '()\n', (1096, 1098), False, 'from hips.plotting.colormaps import harvard_colors\n'), ((1103, 1113), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1111, 1113), True, 'import matplotlib.pyplot as plt\n'), ((1856, 1866), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1864, 1866), True, 'import matplotlib.pyplot as plt\n'), ((2057, 2078), 'hips.plotting.layout.create_figure', 'create_figure', (['(6, 6)'], {}), '((6, 6))\n', (2070, 2078), False, 'from hips.plotting.layout import create_figure\n'), ((2088, 2104), 'hips.plotting.colormaps.harvard_colors', 'harvard_colors', ([], {}), '()\n', (2102, 2104), False, 'from hips.plotting.colormaps import harvard_colors\n'), ((2109, 2119), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2117, 2119), True, 'import matplotlib.pyplot as plt\n'), ((2963, 2973), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2971, 2973), True, 'import matplotlib.pyplot as plt\n'), ((3305, 3382), 'os.path.join', 'os.path.join', (['"""data"""', '"""synthetic"""', "('synthetic_K%d_C%d_T%d.pkl.gz' % (K, C, T))"], {}), "('data', 'synthetic', 'synthetic_K%d_C%d_T%d.pkl.gz' % (K, C, T))\n", (3317, 3382), False, 'import os\n'), ((3397, 3488), 'os.path.join', 'os.path.join', (['"""data"""', '"""synthetic"""', "('synthetic_test_K%d_C%d_T%d.pkl.gz' % (K, C, T_test))"], {}), "('data', 'synthetic', 'synthetic_test_K%d_C%d_T%d.pkl.gz' % (K,\n C, T_test))\n", (3409, 3488), False, 'import os\n'), ((3500, 3588), 'os.path.join', 'os.path.join', (['"""results"""', "('synthetic_K%d_C%d_T%d' % (K, C, T_train))", "('run%03d' % run)"], {}), "('results', 'synthetic_K%d_C%d_T%d' % (K, C, T_train), \n 'run%03d' % run)\n", (3512, 3588), False, 'import os\n'), ((3767, 3787), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3781, 3787), True, 'import numpy as np\n'), ((4266, 4304), 'os.path.join', 'os.path.join', (['output_dir', '"""std.pkl.gz"""'], {}), "(output_dir, 'std.pkl.gz')\n", (4278, 4304), False, 'import os\n'), ((4333, 4467), 'harness.fit_standard_hawkes_model_bfgs', 'harness.fit_standard_hawkes_model_bfgs', (['S', 'S_test', 'dt', 'dt_max', 'output_path'], {'model_args': "{'basis': basis, 'alpha': 1.0, 'beta': 1.0}"}), "(S, S_test, dt, dt_max, output_path,\n model_args={'basis': basis, 'alpha': 1.0, 'beta': 1.0})\n", (4371, 4467), False, 'import harness\n'), ((6029, 6038), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (6036, 6038), True, 'import matplotlib.pyplot as plt\n'), ((550, 575), 'gzip.open', 'gzip.open', (['data_path', '"""r"""'], {}), "(data_path, 'r')\n", (559, 575), False, 'import gzip\n'), ((606, 620), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (617, 620), False, 'import pickle\n'), ((631, 656), 'gzip.open', 'gzip.open', (['test_path', '"""r"""'], {}), "(test_path, 'r')\n", (640, 656), False, 'import gzip\n'), ((692, 706), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (703, 706), False, 'import pickle\n'), ((1218, 1318), 'matplotlib.pyplot.plot', 'plt.plot', (['result.timestamps[burnin:]', 'result.test_lls[burnin:]'], {'lw': '(2)', 'color': 'col[i]', 'label': 'model'}), '(result.timestamps[burnin:], result.test_lls[burnin:], lw=2, color=\n col[i], label=model)\n', (1226, 1318), True, 'import matplotlib.pyplot as plt\n'), ((3702, 3728), 'numpy.random.randint', 'np.random.randint', (['(2 ** 32)'], {}), '(2 ** 32)\n', (3719, 3728), True, 'import numpy as np\n'), ((3815, 3842), 'os.path.dirname', 'os.path.dirname', (['output_dir'], {}), '(output_dir)\n', (3830, 3842), False, 'import os\n'), ((1590, 1600), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (1597, 1600), True, 'import numpy as np\n'), ((1710, 1720), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (1717, 1720), True, 'import numpy as np\n'), ((2897, 2931), 'matplotlib.pyplot.subplot', 'plt.subplot', (['K', 'K', '(k1 * K + k2 + 1)'], {}), '(K, K, k1 * K + k2 + 1)\n', (2908, 2931), True, 'import matplotlib.pyplot as plt\n'), ((2936, 2961), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(y_max * 1.05)'], {}), '(0, y_max * 1.05)\n', (2944, 2961), True, 'import matplotlib.pyplot as plt\n'), ((5284, 5306), 'copy.deepcopy', 'copy.deepcopy', (['network'], {}), '(network)\n', (5297, 5306), False, 'import copy\n'), ((5521, 5543), 'copy.deepcopy', 'copy.deepcopy', (['network'], {}), '(network)\n', (5534, 5543), False, 'import copy\n'), ((5582, 5604), 'copy.deepcopy', 'copy.deepcopy', (['network'], {}), '(network)\n', (5595, 5604), False, 'import copy\n'), ((2442, 2476), 'matplotlib.pyplot.subplot', 'plt.subplot', (['K', 'K', '(k1 * K + k2 + 1)'], {}), '(K, K, k1 * K + k2 + 1)\n', (2453, 2476), True, 'import matplotlib.pyplot as plt\n'), ((2493, 2552), 'matplotlib.pyplot.plot', 'plt.plot', (['t', '(W[k1, k2] * irs[:, k1, k2])'], {'color': 'col[i]', 'lw': '(2)'}), '(t, W[k1, k2] * irs[:, k1, k2], color=col[i], lw=2)\n', (2501, 2552), True, 'import matplotlib.pyplot as plt\n'), ((2684, 2718), 'matplotlib.pyplot.subplot', 'plt.subplot', (['K', 'K', '(k1 * K + k2 + 1)'], {}), '(K, K, k1 * K + k2 + 1)\n', (2695, 2718), True, 'import matplotlib.pyplot as plt\n'), ((2735, 2791), 'matplotlib.pyplot.plot', 'plt.plot', (['(W[k1, k2] * irs[:, k1, k2])'], {'color': 'col[i]', 'lw': '(2)'}), '(W[k1, k2] * irs[:, k1, k2], color=col[i], lw=2)\n', (2743, 2791), True, 'import matplotlib.pyplot as plt\n')] |
from numpy import zeros,mean,std, sum, array,inf,isinf
import copy
from multiprocessing import Process, Manager, Queue
from queue import Empty
import time
def summed_sq_error(features,segment_ends):
num_segs = len(segment_ends)
seg_begin = 0
sse = 0
for l in range(0,num_segs):
seg_end = segment_ends[l]
#if num_segs == 28:
# print(l)
# print(seg_begin)
# print(seg_end)
sse += seg_sse(features,seg_begin,seg_end)
seg_begin = seg_end
#print(sse)
return sse
def seg_sse(features,seg_begin,seg_end):
num_features = features.shape[1]
ml = zeros((1,num_features))
count = 0
for t in range(seg_begin,seg_end):
ml += features[t,:]
count += 1
ml /= count
sse = 0
for t in range(seg_begin,seg_end):
sse += sum((features[t,:] - ml) ** 2)
return sse
def sse_worker(job_q,return_dict,features, segment_set):
while True:
try:
l = job_q.get(timeout=1)
except Empty:
break
return_dict[segment_set[l]] = calc_boundary_removal_sse(features, segment_set, l)
def calc_boundary_removal_sse(features, segment_set, l):
segment_temp = copy.deepcopy(segment_set)
del segment_temp[l]
if l == 0:
begin = 0
else:
begin = segment_temp[l-1]
return seg_sse(features,begin,segment_temp[l])
def generate_initial_cache(features,segment_set, num_procs):
sse_cache = {}
num_segments = len(segment_set)
job_queue = Queue()
for l in range(num_segments-1):
job_queue.put(l)
manager = Manager()
return_dict = manager.dict()
procs = []
for i in range(num_procs):
p = Process(
target=sse_worker,
args=(job_queue,
return_dict,features, segment_set))
procs.append(p)
p.start()
time.sleep(2)
for p in procs:
p.join()
return return_dict
def find_next_best_cached(features,segment_temp,sse_cache):
segment_set = copy.deepcopy(segment_temp)
available = filter(lambda x: x[0] != features.shape[0],sse_cache.items())
seg_bound, min_delta_sse = min(available, key = lambda x: x[1])
inverted = dict([[v,k] for k,v in enumerate(segment_set)])
ind = inverted[seg_bound]
del segment_set[ind]
del sse_cache[seg_bound]
if ind != 0:
sse_cache[segment_set[ind-1]] = calc_boundary_removal_sse(features,segment_set,ind-1)
if segment_set[ind] != features.shape[0]:
sse_cache[segment_set[ind]] = calc_boundary_removal_sse(features,segment_set,ind)
return min_delta_sse, segment_set, sse_cache
def to_segments(features, threshold = 0.1, return_means=False,debug=False):
if debug:
start_time = time.time()
print(features.shape)
num_frames, num_coeffs = features.shape
L = {}
segment_iter = {}
seg_temp = list(range(1,num_frames+1))
if debug:
print('begin boundary removals')
prev_time = time.time()
sse_cache = generate_initial_cache(features,seg_temp, 6)
for num_segments in range(num_frames-1,1,-1):
if debug:
cur = num_frames-1-num_segments
if cur % 100 == 0:
print('loop %d of %d' % (cur,num_frames-1))
#L[num_segments],segment_iter[num_segments], sse_cache = find_next_best(features,seg_temp,sse_cache,1)
L[num_segments],segment_iter[num_segments], sse_cache = find_next_best_cached(features,seg_temp,sse_cache)
seg_temp = segment_iter[num_segments]
if debug:
print('greedy segmentation took %f seconds' % (time.time()-start_time))
print('Finding threshold')
Larray = array(list(L.values()))
thresh = mean(Larray) + (threshold *std(Larray))
if debug:
print(mean(Larray))
print(thresh)
ks = list(segment_iter.keys())
for i in range(max(ks),min(ks)-1,-1):
if L[i] > thresh:
if debug:
print(i)
optimal = segment_iter[i]
break
else:
optimal = segment_iter[-1]
if not return_means:
return optimal
seg_begin = 0
segments = zeros((len(optimal),num_coeffs))
for i in range(len(optimal)):
seg_end = optimal[i]
segments[i,:] = mean(features[seg_begin:seg_end,:],axis=0)
seg_begin = seg_end
return optimal,segments
| [
"copy.deepcopy",
"numpy.sum",
"numpy.std",
"multiprocessing.Manager",
"numpy.zeros",
"time.sleep",
"time.time",
"numpy.mean",
"multiprocessing.Queue",
"multiprocessing.Process"
] | [((637, 661), 'numpy.zeros', 'zeros', (['(1, num_features)'], {}), '((1, num_features))\n', (642, 661), False, 'from numpy import zeros, mean, std, sum, array, inf, isinf\n'), ((1220, 1246), 'copy.deepcopy', 'copy.deepcopy', (['segment_set'], {}), '(segment_set)\n', (1233, 1246), False, 'import copy\n'), ((1533, 1540), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (1538, 1540), False, 'from multiprocessing import Process, Manager, Queue\n'), ((1618, 1627), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (1625, 1627), False, 'from multiprocessing import Process, Manager, Queue\n'), ((1900, 1913), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1910, 1913), False, 'import time\n'), ((2053, 2080), 'copy.deepcopy', 'copy.deepcopy', (['segment_temp'], {}), '(segment_temp)\n', (2066, 2080), False, 'import copy\n'), ((843, 874), 'numpy.sum', 'sum', (['((features[t, :] - ml) ** 2)'], {}), '((features[t, :] - ml) ** 2)\n', (846, 874), False, 'from numpy import zeros, mean, std, sum, array, inf, isinf\n'), ((1719, 1804), 'multiprocessing.Process', 'Process', ([], {'target': 'sse_worker', 'args': '(job_queue, return_dict, features, segment_set)'}), '(target=sse_worker, args=(job_queue, return_dict, features, segment_set)\n )\n', (1726, 1804), False, 'from multiprocessing import Process, Manager, Queue\n'), ((2787, 2798), 'time.time', 'time.time', ([], {}), '()\n', (2796, 2798), False, 'import time\n'), ((3024, 3035), 'time.time', 'time.time', ([], {}), '()\n', (3033, 3035), False, 'import time\n'), ((3752, 3764), 'numpy.mean', 'mean', (['Larray'], {}), '(Larray)\n', (3756, 3764), False, 'from numpy import zeros, mean, std, sum, array, inf, isinf\n'), ((4308, 4352), 'numpy.mean', 'mean', (['features[seg_begin:seg_end, :]'], {'axis': '(0)'}), '(features[seg_begin:seg_end, :], axis=0)\n', (4312, 4352), False, 'from numpy import zeros, mean, std, sum, array, inf, isinf\n'), ((3779, 3790), 'numpy.std', 'std', (['Larray'], {}), '(Larray)\n', (3782, 3790), False, 'from numpy import zeros, mean, std, sum, array, inf, isinf\n'), ((3820, 3832), 'numpy.mean', 'mean', (['Larray'], {}), '(Larray)\n', (3824, 3832), False, 'from numpy import zeros, mean, std, sum, array, inf, isinf\n'), ((3642, 3653), 'time.time', 'time.time', ([], {}), '()\n', (3651, 3653), False, 'import time\n')] |
"""
sigma.py
"""
import numpy as np
def sigma(self, n):
pol = n.shape[1]
if pol == 1:
sigma = (self.grada @ n) ** 2 + (self.gradr @ n) ** 2
elif pol == 2:
npoints = n.shape[0]
sigma = np.zeros((npoints, 3))
sigma[:,2] = (self.grada @ n[:, 1]) ** 2 + (self.gradr @ n[:, 1]) ** 2
sigma[:,0] = (self.grada @ n[:, 0]) ** 2 + (self.gradr @ n[:, 0]) ** 2
sigma[:,1] = self.grada @ n[:, 0] * self.grada @ n[:,1] + self.gradr @ n[:, 0] * self.gradr @ n[:,1]
else:
raise ValueError("Shape of density in second axis should not be greater than 2")
return sigma | [
"numpy.zeros"
] | [((227, 249), 'numpy.zeros', 'np.zeros', (['(npoints, 3)'], {}), '((npoints, 3))\n', (235, 249), True, 'import numpy as np\n')] |
""" Han """
import numpy as np
from shapely.geometry import Point, Polygon, LineString, MultiLineString
import matplotlib.pyplot as plt
""" Parameters to specify """
POLYGONS = {
"triangle.obj": [(0, 0), (0.068 + 0.002, 0), (0.068 + 0.002, 0.053 + 0.002), (0, 0.053 + 0.002)],
"square.obj": [(0, 0), (0.07 + 0.002, 0), (0.07 + 0.002, 0.07 + 0.002), (0, 0.07 + 0.002)],
"rectangle.obj": [(0, 0), (0.0675 + 0.002, 0), (0.0675 + 0.002, 0.045 + 0.002), (0, 0.045 + 0.002)],
"non-convex.obj": [(0, 0), (0.0754 + 0.002, 0), (0.0754 + 0.002, 0.0306 + 0.002), (0, 0.0306 + 0.002)],
"half-cylinder.obj": [(0, 0), (0.0835 + 0.002, 0), (0.0835 + 0.002, 0.0488 + 0.002), (0, 0.0488 + 0.002)],
"cylinder.obj": [(0, 0), (0.0665 + 0.002, 0), (0.0665 + 0.002, 0.056 + 0.002), (0, 0.056 + 0.002)],
}
CENTERS = {
"triangle.obj": (0.034 + 0.001, 0.0265 + 0.001),
"square.obj": (0.035 + 0.001, 0.035 + 0.001),
"rectangle.obj": (0.03375 + 0.001, 0.0225 + 0.001),
"non-convex.obj": (0.0377 + 0.001, 0.0153 + 0.001),
"half-cylinder.obj": (0.04175 + 0.001, 0.0244 + 0.001),
"cylinder.obj": (0.03325 + 0.001, 0.028 + 0.001),
}
heights = {
"triangle.obj": 0.02,
"square.obj": 0.02,
"rectangle.obj": 0.02,
"non-convex.obj": 0.02,
"half-cylinder.obj": 0.02,
"cylinder.obj": 0.02,
}
color_space = np.asarray([[78.0, 151.0, 167.0], # blue
[89.0, 161.0, 109.0], # green
[186, 117, 95], # brown
[242, 172, 43], # orange
[237.0, 201.0, 102.0], # yellow
[216, 176, 172], # gray
[255.0, 117.0, 89.0], # red
[176, 122, 191], # purple
[148, 183, 178], # cyan
[255, 187, 167]]) / 255.0 # pink
def generate_unit_scenario(shapes: list) -> np.ndarray:
""" Randomly generate challenging VPG test scenarios.
Params:
shapes (list): shapes of objects in the generated scene.
Return:
np.ndarray: each row represents the 2d pose for a shape.
"""
# Find polygons of all shapes
polys = [np.array(POLYGONS[s], dtype=np.float64) for s in shapes]
centers = [np.array(CENTERS[s], dtype=np.float64) for s in shapes]
configs = [[centers[0][0], centers[0][1], 0]]
# We start with an initial shape and build up
meta_poly = Polygon(polys[0])
# Iterate through all polygons and add them to meta_poly
for j, p in enumerate(polys):
if j == 0:
continue
# Randomly find an edge on meta_poly to attach polygon
coords = np.transpose(meta_poly.exterior.coords.xy)
matched = False
while not matched:
# Looking for an edge
index = np.random.randint(0, len(coords))
start_pt = coords[index]
# Looking for an arbitrary rotation
angle = np.random.randint(0, 4) * np.pi / 2
rotation_matrix = np.matrix([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]])
# Transitions to match the new polygon to the existing shape
center = np.copy(centers[j])
poly = np.copy(p)
center -= poly[0]
poly -= poly[0]
for i in range(len(poly)):
poly[i] = np.dot(rotation_matrix, np.transpose(poly[i]))
center = np.dot(rotation_matrix, np.transpose(center))
pt = np.random.randint(poly.shape[0])
center -= poly[pt]
poly -= poly[pt]
poly += start_pt
center += start_pt
# Check if the generated pose suggests a hard case
suggested_poly = Polygon(poly)
if meta_poly.intersects(suggested_poly):
if type(meta_poly.intersection(suggested_poly)) is Polygon and meta_poly.intersection(suggested_poly).area < 1e-15:
meta_poly = meta_poly.union(suggested_poly)
configs.append([center[0, 0], center[0, 1], angle])
break
if meta_poly.touches(suggested_poly):
if type(meta_poly.intersection(suggested_poly)) is not Point and meta_poly.intersection(suggested_poly).area < 1e-8:
meta_poly = meta_poly.union(suggested_poly)
configs.append([center[0, 0], center[0, 1], angle])
break
# Finally, a random rotation for all objects
def my_rotate(origin, point, angle):
"""
Rotate a point counterclockwise by a given angle around a given origin.
The angle should be given in radians.
"""
ox, oy = origin
px, py = point
qx = ox + np.cos(angle) * (px - ox) - np.sin(angle) * (py - oy)
qy = oy + np.sin(angle) * (px - ox) + np.cos(angle) * (py - oy)
return [qx, qy]
angle = np.random.uniform(-np.pi, np.pi)
configs = [my_rotate((0, 0), (config[0], config[1]), angle) + [config[2] + angle] for config in configs]
# fig, ax = plt.subplots()
# ax.plot(*meta_poly.exterior.xy)
# ax.plot(*(np.transpose(configs)[:2]), "o")
# ax.set_aspect(1)
# plt.show()
return configs
def generate(shape_list, num_scenarios, num_shapes_min, num_shapes_max, color_space):
""" Randomly generate challenging VPG test scenarios. Output to txt.
Params:
shape_list (list): all available shapes.
num_scenarios (int): number of scenarios to be generated.
num_shapes_min, num_shapes_max: the range of number of objects in a scenario
"""
np.random.seed(0)
num_generated = 0
while num_generated < num_scenarios:
color = color_space[num_generated % len(color_space)]
num_objects = np.random.randint(num_shapes_min, num_shapes_max + 1)
selected_objects = np.random.choice(shape_list, size=num_objects)
try:
configs = generate_unit_scenario(selected_objects)
configs = [[round(c, 6) for c in config] for config in configs]
with open("hard-cases/" + str(num_generated) + ".txt", "w") as out_file:
for i, obj in enumerate(selected_objects):
out_file.write('%s %.18e %.18e %.18e %.18e %.18e %.18e %.18e %.18e %.18e\n' % (obj,
color[0], color[1], color[2],
configs[i][0] + -0.5, configs[i][1], heights[obj],
0, 0, configs[i][2]))
num_generated += 1
except:
continue
if __name__ == "__main__":
generate([x for x in POLYGONS.keys()] * 5, 200, 5, 8, color_space)
| [
"numpy.random.uniform",
"numpy.random.seed",
"shapely.geometry.Polygon",
"numpy.copy",
"numpy.asarray",
"numpy.transpose",
"numpy.random.randint",
"numpy.array",
"numpy.sin",
"numpy.cos",
"numpy.random.choice"
] | [((1342, 1553), 'numpy.asarray', 'np.asarray', (['[[78.0, 151.0, 167.0], [89.0, 161.0, 109.0], [186, 117, 95], [242, 172, 43],\n [237.0, 201.0, 102.0], [216, 176, 172], [255.0, 117.0, 89.0], [176, 122,\n 191], [148, 183, 178], [255, 187, 167]]'], {}), '([[78.0, 151.0, 167.0], [89.0, 161.0, 109.0], [186, 117, 95], [\n 242, 172, 43], [237.0, 201.0, 102.0], [216, 176, 172], [255.0, 117.0, \n 89.0], [176, 122, 191], [148, 183, 178], [255, 187, 167]])\n', (1352, 1553), True, 'import numpy as np\n'), ((2461, 2478), 'shapely.geometry.Polygon', 'Polygon', (['polys[0]'], {}), '(polys[0])\n', (2468, 2478), False, 'from shapely.geometry import Point, Polygon, LineString, MultiLineString\n'), ((4943, 4975), 'numpy.random.uniform', 'np.random.uniform', (['(-np.pi)', 'np.pi'], {}), '(-np.pi, np.pi)\n', (4960, 4975), True, 'import numpy as np\n'), ((5649, 5666), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (5663, 5666), True, 'import numpy as np\n'), ((2217, 2256), 'numpy.array', 'np.array', (['POLYGONS[s]'], {'dtype': 'np.float64'}), '(POLYGONS[s], dtype=np.float64)\n', (2225, 2256), True, 'import numpy as np\n'), ((2289, 2327), 'numpy.array', 'np.array', (['CENTERS[s]'], {'dtype': 'np.float64'}), '(CENTERS[s], dtype=np.float64)\n', (2297, 2327), True, 'import numpy as np\n'), ((2694, 2736), 'numpy.transpose', 'np.transpose', (['meta_poly.exterior.coords.xy'], {}), '(meta_poly.exterior.coords.xy)\n', (2706, 2736), True, 'import numpy as np\n'), ((5814, 5867), 'numpy.random.randint', 'np.random.randint', (['num_shapes_min', '(num_shapes_max + 1)'], {}), '(num_shapes_min, num_shapes_max + 1)\n', (5831, 5867), True, 'import numpy as np\n'), ((5895, 5941), 'numpy.random.choice', 'np.random.choice', (['shape_list'], {'size': 'num_objects'}), '(shape_list, size=num_objects)\n', (5911, 5941), True, 'import numpy as np\n'), ((3218, 3237), 'numpy.copy', 'np.copy', (['centers[j]'], {}), '(centers[j])\n', (3225, 3237), True, 'import numpy as np\n'), ((3257, 3267), 'numpy.copy', 'np.copy', (['p'], {}), '(p)\n', (3264, 3267), True, 'import numpy as np\n'), ((3522, 3554), 'numpy.random.randint', 'np.random.randint', (['poly.shape[0]'], {}), '(poly.shape[0])\n', (3539, 3554), True, 'import numpy as np\n'), ((3767, 3780), 'shapely.geometry.Polygon', 'Polygon', (['poly'], {}), '(poly)\n', (3774, 3780), False, 'from shapely.geometry import Point, Polygon, LineString, MultiLineString\n'), ((3483, 3503), 'numpy.transpose', 'np.transpose', (['center'], {}), '(center)\n', (3495, 3503), True, 'import numpy as np\n'), ((4808, 4821), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (4814, 4821), True, 'import numpy as np\n'), ((4880, 4893), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (4886, 4893), True, 'import numpy as np\n'), ((2981, 3004), 'numpy.random.randint', 'np.random.randint', (['(0)', '(4)'], {}), '(0, 4)\n', (2998, 3004), True, 'import numpy as np\n'), ((3415, 3436), 'numpy.transpose', 'np.transpose', (['poly[i]'], {}), '(poly[i])\n', (3427, 3436), True, 'import numpy as np\n'), ((4780, 4793), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (4786, 4793), True, 'import numpy as np\n'), ((4852, 4865), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (4858, 4865), True, 'import numpy as np\n'), ((3059, 3072), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (3065, 3072), True, 'import numpy as np\n'), ((3092, 3105), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (3098, 3105), True, 'import numpy as np\n'), ((3107, 3120), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (3113, 3120), True, 'import numpy as np\n'), ((3075, 3088), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (3081, 3088), True, 'import numpy as np\n')] |
from pathlib import Path
from typing import Dict
from typing import List
from typing import Optional
import numpy
from astropy.io import fits
from tqdm import tqdm
from utils import paths
__all__ = ['extract_apogee', 'APOGEE_OUT_PATH', 'JSON_PATH']
APO_TELESCOPE = '25m'
APOGEE_PATH = Path(f'/data/abd/sdss/stars/apo{APO_TELESCOPE}').resolve()
APOGEE_OUT_PATH = paths.DATA_DIR.joinpath(f'apo{APO_TELESCOPE}.npy')
APOGEE_METADATA_PATH = paths.DATA_DIR.joinpath(f'apo{APO_TELESCOPE}_filenames.csv')
JSON_PATH = paths.RESULTS_DIR.joinpath(f'apo{APO_TELESCOPE}_bench.json')
NUM_DIMS = 8_575
def get_fields_map() -> Dict[str, List[str]]:
print('finding all fields...')
fields_map: Dict[str, List[str]] = dict()
for field in tqdm(list(APOGEE_PATH.iterdir())):
files = list(
file_name.name for file_name in field.iterdir()
if 'apStar-' in file_name.name
or 'asStar-' in file_name.name
)
if len(files) > 0:
fields_map[field.name] = files
num_spectra = sum(map(len, fields_map.values()))
print(f'collected {num_spectra} matching fits files for {APO_TELESCOPE}...')
return fields_map
def extract_combined_spectra(fields_map: Dict[str, List[str]], *, test_chunk: Optional[int] = None):
num_spectra = sum(map(len, fields_map.values()))
apogee_spectra = numpy.zeros(
shape=(num_spectra, NUM_DIMS),
dtype=numpy.float32,
)
indices_to_remove = set()
with tqdm(total=num_spectra) as progress_bar:
with open(APOGEE_METADATA_PATH, 'w') as metadata_csv:
metadata_csv.write(f'field,fits_name\n')
index = -1
for field, files in fields_map.items():
field_path = APOGEE_PATH.joinpath(field)
for file in files:
index += 1
file_path = field_path.joinpath(file)
# noinspection PyBroadException
try:
with fits.open(str(file_path)) as hdul:
spectra = numpy.asarray(hdul[1].data, dtype=numpy.float32)
except Exception as _:
indices_to_remove.add(index)
continue
if spectra.ndim == 1:
spectra = numpy.expand_dims(spectra, axis=1)
else:
spectra = spectra.T
with open(APOGEE_METADATA_PATH, 'a') as metadata_csv:
metadata_csv.write(f'{field},{file}\n')
apogee_spectra[index, :] = numpy.mean(spectra, axis=1)
progress_bar.update(1)
if test_chunk is not None and index >= (test_chunk - 1):
break
if test_chunk is not None and index >= (test_chunk - 1):
indices_to_remove.update(range(test_chunk, num_spectra))
break
if len(indices_to_remove) > 0:
print(f'removing {len(indices_to_remove)} bad spectra...')
indices_to_keep = list(filter(lambda i: i not in indices_to_remove, range(num_spectra)))
apogee_spectra = apogee_spectra[indices_to_keep, :]
print(f'saving {apogee_spectra.shape[0]} apo{APO_TELESCOPE} spectra...')
numpy.save(
file=str(APOGEE_OUT_PATH),
arr=apogee_spectra,
allow_pickle=False,
fix_imports=False,
)
return
def extract_apogee():
extract_combined_spectra(get_fields_map())
| [
"tqdm.tqdm",
"numpy.asarray",
"numpy.zeros",
"numpy.expand_dims",
"utils.paths.RESULTS_DIR.joinpath",
"pathlib.Path",
"utils.paths.DATA_DIR.joinpath",
"numpy.mean"
] | [((367, 417), 'utils.paths.DATA_DIR.joinpath', 'paths.DATA_DIR.joinpath', (['f"""apo{APO_TELESCOPE}.npy"""'], {}), "(f'apo{APO_TELESCOPE}.npy')\n", (390, 417), False, 'from utils import paths\n'), ((441, 501), 'utils.paths.DATA_DIR.joinpath', 'paths.DATA_DIR.joinpath', (['f"""apo{APO_TELESCOPE}_filenames.csv"""'], {}), "(f'apo{APO_TELESCOPE}_filenames.csv')\n", (464, 501), False, 'from utils import paths\n'), ((514, 574), 'utils.paths.RESULTS_DIR.joinpath', 'paths.RESULTS_DIR.joinpath', (['f"""apo{APO_TELESCOPE}_bench.json"""'], {}), "(f'apo{APO_TELESCOPE}_bench.json')\n", (540, 574), False, 'from utils import paths\n'), ((1358, 1421), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(num_spectra, NUM_DIMS)', 'dtype': 'numpy.float32'}), '(shape=(num_spectra, NUM_DIMS), dtype=numpy.float32)\n', (1369, 1421), False, 'import numpy\n'), ((290, 338), 'pathlib.Path', 'Path', (['f"""/data/abd/sdss/stars/apo{APO_TELESCOPE}"""'], {}), "(f'/data/abd/sdss/stars/apo{APO_TELESCOPE}')\n", (294, 338), False, 'from pathlib import Path\n'), ((1485, 1508), 'tqdm.tqdm', 'tqdm', ([], {'total': 'num_spectra'}), '(total=num_spectra)\n', (1489, 1508), False, 'from tqdm import tqdm\n'), ((2546, 2573), 'numpy.mean', 'numpy.mean', (['spectra'], {'axis': '(1)'}), '(spectra, axis=1)\n', (2556, 2573), False, 'import numpy\n'), ((2274, 2308), 'numpy.expand_dims', 'numpy.expand_dims', (['spectra'], {'axis': '(1)'}), '(spectra, axis=1)\n', (2291, 2308), False, 'import numpy\n'), ((2039, 2087), 'numpy.asarray', 'numpy.asarray', (['hdul[1].data'], {'dtype': 'numpy.float32'}), '(hdul[1].data, dtype=numpy.float32)\n', (2052, 2087), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
import numpy
import os
import run_model
import datetime
import argparse
from logger import create_logger
def main():
parser = argparse.ArgumentParser(
description='Training model ... '
)
#
# The directory from which to find the data. default in data_processors is './data/'
parser.add_argument(
'-fd', '--FileData', required=False,
help='Path of the dataset'
)
#
parser.add_argument(
'-d', '--DimModel', required=False,
help='Dimension of LSTM model '
)
parser.add_argument(
'-s', '--Seed', required=False,
help='Seed of random state'
)
parser.add_argument(
'-fp', '--FilePretrain', required=False,
help='File of pretrained model'
)
parser.add_argument(
'-me', '--MaxEpoch', required=False,
help='Max epoch number of training'
)
parser.add_argument(
'-do', '--DropOut', required=False,
help='Drop-out rate'
)
#
parser.add_argument(
'-m1', '--Map1', required=False,
help='First Train Map'
)
parser.add_argument(
'-m2', '--Map2', required=False,
help='Second Train Map'
)
parser.add_argument(
'-vl', '--Validation', required=False, action='store_true',
help='validate trained model (True/False, default is false)'
)
parser.add_argument(
'-sb', '--SizeBeam', required=False, choices=range(1, 20), default=4,
help='Validation mode: Size of Beam (Integer, default is 4)'
)
parser.add_argument(
'-lnf', '--LengthNormalizationFactor', required=False, default=0.5,
help='Validation mode: Length Normalization Factor [0.5-0.7] (0.5 is the default)'
)
args = parser.parse_args()
if args.MaxEpoch is None:
args.MaxEpoch = numpy.int32(20)
else:
args.MaxEpoch = numpy.int32(args.MaxEpoch)
if args.DimModel is None:
args.DimModel = numpy.int32(100)
else:
args.DimModel = numpy.int32(args.DimModel)
if args.Seed is None:
args.Seed = numpy.int32(90001)
else:
args.Seed = numpy.int32(args.Seed)
if args.DropOut is None:
args.DropOut = numpy.float32(0.9)
else:
args.DropOut = numpy.float32(args.DropOut)
#
if args.Map1 is None:
args.Map1 = 'map_2'
else:
args.Map1 = str(args.Map1)
if args.Map2 is None:
args.Map2 = 'map_3'
else:
args.Map2 = str(args.Map2)
assert isinstance(args.LengthNormalizationFactor, float)
assert 0.5 <= args.LengthNormalizationFactor <= 0.7
assert isinstance(args.SizeBeam, int), "Size of Beam is not an int"
id_process = os.getpid()
time_current = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
#
tag_model = '_PID=' + str(id_process) + '_TIME=' + time_current
#
path_track = './tracks/track' + tag_model + '/'
file_log = os.path.join(path_track + 'log.txt')
if not os.path.exists(path_track):
os.makedirs(path_track)
args.path_save = os.path.abspath(path_track)
logger = create_logger(file_log, 'trainer log')
logger.info(args)
run_model.train_model(args)
if __name__ == "__main__":
main()
print("END")
| [
"os.path.abspath",
"os.getpid",
"argparse.ArgumentParser",
"os.makedirs",
"logger.create_logger",
"run_model.train_model",
"numpy.float32",
"os.path.exists",
"datetime.datetime.now",
"numpy.int32",
"os.path.join"
] | [((170, 228), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Training model ... """'}), "(description='Training model ... ')\n", (193, 228), False, 'import argparse\n'), ((2842, 2853), 'os.getpid', 'os.getpid', ([], {}), '()\n', (2851, 2853), False, 'import os\n'), ((3084, 3120), 'os.path.join', 'os.path.join', (["(path_track + 'log.txt')"], {}), "(path_track + 'log.txt')\n", (3096, 3120), False, 'import os\n'), ((3220, 3247), 'os.path.abspath', 'os.path.abspath', (['path_track'], {}), '(path_track)\n', (3235, 3247), False, 'import os\n'), ((3264, 3302), 'logger.create_logger', 'create_logger', (['file_log', '"""trainer log"""'], {}), "(file_log, 'trainer log')\n", (3277, 3302), False, 'from logger import create_logger\n'), ((3333, 3360), 'run_model.train_model', 'run_model.train_model', (['args'], {}), '(args)\n', (3354, 3360), False, 'import run_model\n'), ((1934, 1949), 'numpy.int32', 'numpy.int32', (['(20)'], {}), '(20)\n', (1945, 1949), False, 'import numpy\n'), ((1988, 2014), 'numpy.int32', 'numpy.int32', (['args.MaxEpoch'], {}), '(args.MaxEpoch)\n', (1999, 2014), False, 'import numpy\n'), ((2073, 2089), 'numpy.int32', 'numpy.int32', (['(100)'], {}), '(100)\n', (2084, 2089), False, 'import numpy\n'), ((2126, 2152), 'numpy.int32', 'numpy.int32', (['args.DimModel'], {}), '(args.DimModel)\n', (2137, 2152), False, 'import numpy\n'), ((2201, 2219), 'numpy.int32', 'numpy.int32', (['(90001)'], {}), '(90001)\n', (2212, 2219), False, 'import numpy\n'), ((2252, 2274), 'numpy.int32', 'numpy.int32', (['args.Seed'], {}), '(args.Seed)\n', (2263, 2274), False, 'import numpy\n'), ((2329, 2347), 'numpy.float32', 'numpy.float32', (['(0.9)'], {}), '(0.9)\n', (2342, 2347), False, 'import numpy\n'), ((2383, 2410), 'numpy.float32', 'numpy.float32', (['args.DropOut'], {}), '(args.DropOut)\n', (2396, 2410), False, 'import numpy\n'), ((3135, 3161), 'os.path.exists', 'os.path.exists', (['path_track'], {}), '(path_track)\n', (3149, 3161), False, 'import os\n'), ((3172, 3195), 'os.makedirs', 'os.makedirs', (['path_track'], {}), '(path_track)\n', (3183, 3195), False, 'import os\n'), ((2874, 2897), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2895, 2897), False, 'import datetime\n')] |
"""@package docstring
Encoding and decoding python native data structures as
portable JData-spec annotated dict structure
Copyright (c) 2019-2022 <NAME> <q.<EMAIL> at neu.edu>
"""
__all__ = ['encode','decode','jdtype','jsonfilter']
##====================================================================================
## dependent libraries
##====================================================================================
import numpy as np
import copy
import zlib
import base64
##====================================================================================
## global variables
##====================================================================================
""" @brief Mapping Numpy data types to JData data types
complex-valued data are reflected in the doubled data size
"""
jdtype={'float32':'single','float64':'double','float_':'double',
'bool':'uint8','byte':'int8','short':'int16','ubyte':'uint8',
'ushort':'uint16','int_':'int32','uint':'uint32','complex_':'double','complex128':'double',
'complex64':'single','longlong':'int64','ulonglong':'uint64',
'csingle':'single','cdouble':'double'};
_zipper=['zlib','gzip','lzma','lz4','base64'];
##====================================================================================
## Python to JData encoding function
##====================================================================================
def encode(d, opt={}):
"""@brief Encoding a Python data structure to portable JData-annotated dict constructs
This function converts complex data types (usually not JSON-serializable) into
portable JData-annotated dict/list constructs that can be easily exported as JSON/JData
files
@param[in,out] d: an arbitrary Python data
@param[in] opt: options, can contain 'compression'=['zlib','lzma','gzip'] for data compression
"""
if('compression' in opt):
if(opt['compression']=='lzma'):
try:
try:
import lzma
except ImportError:
from backports import lzma
except Exception:
raise Exception('JData', 'you must install "lzma" module to compress with this format')
elif(opt['compression']=='lz4'):
try:
import lz4.frame
except ImportError:
raise Exception('JData', 'you must install "lz4" module to compress with this format')
if isinstance(d, float):
if(np.isnan(d)):
return '_NaN_';
elif(np.isinf(d)):
return '_Inf_' if (d>0) else '-_Inf_';
return d;
elif isinstance(d, list) or isinstance(d, tuple) or isinstance(d, set) or isinstance(d, frozenset):
return encodelist(d,opt);
elif isinstance(d, dict):
return encodedict(d,opt);
elif isinstance(d, complex):
newobj={
'_ArrayType_': 'double',
'_ArraySize_': 1,
'_ArrayIsComplex_': True,
'_ArrayData_': [d.real, d.imag]
};
return newobj;
elif isinstance(d, np.ndarray):
newobj={};
newobj["_ArrayType_"]=jdtype[str(d.dtype)] if (str(d.dtype) in jdtype) else str(d.dtype);
newobj["_ArraySize_"]=list(d.shape);
if(d.dtype==np.complex64 or d.dtype==np.complex128 or d.dtype==np.csingle or d.dtype==np.cdouble):
newobj['_ArrayIsComplex_']=True;
newobj['_ArrayData_']=[list(d.flatten().real), list(d.flatten().imag)];
else:
newobj["_ArrayData_"]=list(d.flatten());
if('compression' in opt):
if(opt['compression'] not in _zipper):
raise Exception('JData', 'compression method is not supported')
newobj['_ArrayZipType_']=opt['compression'];
newobj['_ArrayZipSize_']=[1+int('_ArrayIsComplex_' in newobj), d.size];
newobj['_ArrayZipData_']=np.asarray(newobj['_ArrayData_'],dtype=d.dtype).tostring();
if(opt['compression']=='zlib'):
newobj['_ArrayZipData_']=zlib.compress(newobj['_ArrayZipData_']);
elif(opt['compression']=='gzip'):
newobj['_ArrayZipData_']=zlib.compress(newobj['_ArrayZipData_'],zlib.MAX_WBITS|32);
elif(opt['compression']=='lzma'):
try:
try:
import lzma
except ImportError:
from backports import lzma
newobj['_ArrayZipData_']=lzma.compress(newobj['_ArrayZipData_'],lzma.FORMAT_ALONE);
except Exception:
print('you must install "lzma" module to compress with this format, ignoring')
pass
elif(opt['compression']=='lz4'):
try:
import lz4.frame
newobj['_ArrayZipData_']=lz4.frame.compress(newobj['_ArrayZipData_']);
except ImportError:
print('you must install "lz4" module to compress with this format, ignoring')
pass
if(('base64' in opt) and (opt['base64'])) or opt['compression']=='base64':
newobj['_ArrayZipData_']=base64.b64encode(newobj['_ArrayZipData_']);
newobj.pop('_ArrayData_');
return newobj;
else:
return copy.deepcopy(d);
##====================================================================================
## JData to Python decoding function
##====================================================================================
def decode(d, opt={}):
"""@brief Decoding a JData-annotated dict construct into native Python data
This function converts portable JData-annotated dict/list constructs back to native Python
data structures
@param[in,out] d: an arbitrary Python data, any JData-encoded components will be decoded
@param[in] opt: options
"""
if (isinstance(d, str) or type(d)=='unicode') and len(d)<=6 and len(d)>4 and d[-1]=='_':
if(d=='_NaN_'):
return float('nan');
elif(d=='_Inf_'):
return float('inf');
elif(d=='-_Inf_'):
return float('-inf');
return d;
elif isinstance(d, list) or isinstance(d, tuple) or isinstance(d, set) or isinstance(d, frozenset):
return decodelist(d,opt);
elif isinstance(d, dict):
if('_ArrayType_' in d):
if(isinstance(d['_ArraySize_'],str)):
d['_ArraySize_']=np.array(bytearray(d['_ArraySize_']));
if('_ArrayZipData_' in d):
newobj=d['_ArrayZipData_']
if(('base64' in opt) and (opt['base64'])) or ('_ArrayZipType_' in d and d['_ArrayZipType_']=='base64'):
newobj=base64.b64decode(newobj)
if('_ArrayZipType_' in d and d['_ArrayZipType_'] not in _zipper):
raise Exception('JData', 'compression method is not supported')
if(d['_ArrayZipType_']=='zlib'):
newobj=zlib.decompress(bytes(newobj))
elif(d['_ArrayZipType_']=='gzip'):
newobj=zlib.decompress(bytes(newobj),zlib.MAX_WBITS|32)
elif(d['_ArrayZipType_']=='lzma'):
try:
import lzma
except ImportError:
from backports import lzma
buf=bytearray(newobj) # set length to -1 (unknown) if EOF appears
buf[5:13] = b'\xff\xff\xff\xff\xff\xff\xff\xff'
newobj=lzma.decompress(buf,lzma.FORMAT_ALONE)
elif(d['_ArrayZipType_']=='lz4'):
try:
import lz4.frame
newobj=lz4.frame.decompress(bytes(newobj))
except Exception:
print('Warning: you must install "lz4" module to decompress a data record in this file, ignoring')
pass
newobj=np.fromstring(newobj,dtype=np.dtype(d['_ArrayType_'])).reshape(d['_ArrayZipSize_']);
if('_ArrayIsComplex_' in d and newobj.shape[0]==2):
newobj=newobj[0]+1j*newobj[1];
if('_ArrayOrder_' in d and (d['_ArrayOrder_'].lower()=='c' or d['_ArrayOrder_'].lower()=='col' or d['_ArrayOrder_'].lower()=='column')):
newobj=newobj.reshape(d['_ArraySize_'],order='F')
else:
newobj=newobj.reshape(d['_ArraySize_'])
return newobj;
elif('_ArrayData_' in d):
if(isinstance(d['_ArrayData_'],str)):
newobj=np.frombuffer(d['_ArrayData_'],dtype=np.dtype(d['_ArrayType_']));
else:
newobj=np.asarray(d['_ArrayData_'],dtype=np.dtype(d['_ArrayType_']));
if('_ArrayZipSize_' in d and newobj.shape[0]==1):
if(isinstance(d['_ArrayZipSize_'],str)):
d['_ArrayZipSize_']=np.array(bytearray(d['_ArrayZipSize_']));
newobj=newobj.reshape(d['_ArrayZipSize_']);
if('_ArrayIsComplex_' in d and newobj.shape[0]==2):
newobj=newobj[0]+1j*newobj[1];
if('_ArrayOrder_' in d and (d['_ArrayOrder_'].lower()=='c' or d['_ArrayOrder_'].lower()=='col' or d['_ArrayOrder_'].lower()=='column')):
newobj=newobj.reshape(d['_ArraySize_'],order='F')
else:
newobj=newobj.reshape(d['_ArraySize_'])
return newobj;
else:
raise Exception('JData', 'one and only one of _ArrayData_ or _ArrayZipData_ is required')
return decodedict(d,opt);
else:
return copy.deepcopy(d);
##====================================================================================
## helper functions
##====================================================================================
def jsonfilter(obj):
if type(obj) == 'long':
return str(obj)
elif type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
elif isinstance(obj, (bytes, bytearray)):
return obj.decode("utf-8")
elif isinstance(obj, float):
if(np.isnan(obj)):
return '_NaN_';
elif(np.isinf(obj)):
return '_Inf_' if (obj>0) else '-_Inf_';
def encodedict(d0, opt={}):
d=dict(d0);
for k, v in d0.items():
newkey=encode(k,opt)
d[newkey]=encode(v,opt);
if(k!=newkey):
d.pop(k)
return d;
def encodelist(d0, opt={}):
d=copy.deepcopy(d0)
for i, s in enumerate(d):
d[i] = encode(s,opt);
return d;
def decodedict(d0, opt={}):
d=dict(d0);
for k, v in d.items():
newkey=encode(k,opt)
d[newkey]=decode(v,opt);
if(k!=newkey):
d.pop(k)
return d;
def decodelist(d0, opt={}):
d=copy.deepcopy(d0)
for i, s in enumerate(d):
d[i] = decode(s,opt);
return d;
| [
"copy.deepcopy",
"backports.lzma.decompress",
"numpy.asarray",
"numpy.dtype",
"numpy.isinf",
"base64.b64decode",
"numpy.isnan",
"zlib.compress",
"backports.lzma.compress",
"base64.b64encode"
] | [((10784, 10801), 'copy.deepcopy', 'copy.deepcopy', (['d0'], {}), '(d0)\n', (10797, 10801), False, 'import copy\n'), ((11103, 11120), 'copy.deepcopy', 'copy.deepcopy', (['d0'], {}), '(d0)\n', (11116, 11120), False, 'import copy\n'), ((2467, 2478), 'numpy.isnan', 'np.isnan', (['d'], {}), '(d)\n', (2475, 2478), True, 'import numpy as np\n'), ((2522, 2533), 'numpy.isinf', 'np.isinf', (['d'], {}), '(d)\n', (2530, 2533), True, 'import numpy as np\n'), ((9856, 9872), 'copy.deepcopy', 'copy.deepcopy', (['d'], {}), '(d)\n', (9869, 9872), False, 'import copy\n'), ((10430, 10443), 'numpy.isnan', 'np.isnan', (['obj'], {}), '(obj)\n', (10438, 10443), True, 'import numpy as np\n'), ((5440, 5456), 'copy.deepcopy', 'copy.deepcopy', (['d'], {}), '(d)\n', (5453, 5456), False, 'import copy\n'), ((10487, 10500), 'numpy.isinf', 'np.isinf', (['obj'], {}), '(obj)\n', (10495, 10500), True, 'import numpy as np\n'), ((6859, 6883), 'base64.b64decode', 'base64.b64decode', (['newobj'], {}), '(newobj)\n', (6875, 6883), False, 'import base64\n'), ((4072, 4111), 'zlib.compress', 'zlib.compress', (["newobj['_ArrayZipData_']"], {}), "(newobj['_ArrayZipData_'])\n", (4085, 4111), False, 'import zlib\n'), ((5305, 5347), 'base64.b64encode', 'base64.b64encode', (["newobj['_ArrayZipData_']"], {}), "(newobj['_ArrayZipData_'])\n", (5321, 5347), False, 'import base64\n'), ((3919, 3967), 'numpy.asarray', 'np.asarray', (["newobj['_ArrayData_']"], {'dtype': 'd.dtype'}), "(newobj['_ArrayData_'], dtype=d.dtype)\n", (3929, 3967), True, 'import numpy as np\n'), ((4208, 4268), 'zlib.compress', 'zlib.compress', (["newobj['_ArrayZipData_']", '(zlib.MAX_WBITS | 32)'], {}), "(newobj['_ArrayZipData_'], zlib.MAX_WBITS | 32)\n", (4221, 4268), False, 'import zlib\n'), ((7669, 7708), 'backports.lzma.decompress', 'lzma.decompress', (['buf', 'lzma.FORMAT_ALONE'], {}), '(buf, lzma.FORMAT_ALONE)\n', (7684, 7708), False, 'from backports import lzma\n'), ((8131, 8157), 'numpy.dtype', 'np.dtype', (["d['_ArrayType_']"], {}), "(d['_ArrayType_'])\n", (8139, 8157), True, 'import numpy as np\n'), ((8800, 8826), 'numpy.dtype', 'np.dtype', (["d['_ArrayType_']"], {}), "(d['_ArrayType_'])\n", (8808, 8826), True, 'import numpy as np\n'), ((8912, 8938), 'numpy.dtype', 'np.dtype', (["d['_ArrayType_']"], {}), "(d['_ArrayType_'])\n", (8920, 8938), True, 'import numpy as np\n'), ((4559, 4617), 'backports.lzma.compress', 'lzma.compress', (["newobj['_ArrayZipData_']", 'lzma.FORMAT_ALONE'], {}), "(newobj['_ArrayZipData_'], lzma.FORMAT_ALONE)\n", (4572, 4617), False, 'from backports import lzma\n')] |
import dask.array
import h5py
import numpy as np
import torch
import torch.utils.data
class DaskDataset(torch.utils.data.Dataset):
"""
This represents a dataset store in a dask array. As such it can handle
datasets that are too big to fit into memory.
When queried for training examples, it returns torch tensors.
Written with reference to the base class:
https://github.com/pytorch/pytorch/blob/master/torch/utils/data/dataset.py
"""
def __init__(self, *arrays):
# `is_empty` is a hack to please fastai Learner.summary().
self.is_empty = not (len(arrays) and arrays[0].shape[0])
self.dtypes = tuple(self._torch_dtype(array.dtype) for array in arrays)
self.arrays = arrays
@classmethod
def _torch_dtype(cls, dask_dtype):
"""Converts a numpy datatype to a torch datatype."""
if dask_dtype == np.float16:
return torch.float16
elif dask_dtype == np.float32:
return torch.float32
elif dask_dtype == np.float64:
return torch.float64
elif dask_dtype == np.int32:
return torch.int64 # Embedding layers require longs.
elif dask_dtype == np.int64:
return torch.int64
else:
raise NotImplementedError(
f'Datatype {dask_dtype} not supported by DaskDataset.')
def __getitem__(self, index):
"""
Returns the example at index `index` for each array in this dataset.
"""
return tuple(
torch.tensor(np.array(array[index]), dtype=self.dtypes[i])
for i, array in enumerate(self.arrays))
def __len__(self):
"""Returns the number of examples in the array."""
return self.arrays[0].shape[0] if self.arrays else 0
class H5Dataset(torch.utils.data.Dataset):
"""
This represents a dataset stored in an HDF5 file.
"""
def __init__(self, h5fn, dsname):
"""
Loads dataset `dsname` from file `h5_filename` into a Pytorch tensor.
"""
super().__init__()
self.h5fn = h5fn
self.dsname = dsname
self.h5file = h5py.File(self.h5fn, 'r', libver='latest', swmr=True)
dtype_name = self.h5file[dsname].attrs['dtype']
if dtype_name == 'int64':
self.dtype = torch.int64
elif dtype_name == 'int32':
self.dtype = torch.int64 # Embedding layer wants longs.
elif dtype_name == 'float64':
self.dtype = torch.float64
elif dtype_name == 'float32':
self.dtype = torch.float32
elif dtype_name == 'float16':
self.dtype = torch.float16
else:
raise NotImplementedError(
f'Dataset datatype {dtype_name} not supported.')
ch_sz = ('auto', self.h5file[dsname].shape[1])
if len(self.h5file[dsname].shape) > 2:
ch_sz = ch_sz + (self.h5file[dsname].shape[2],)
self.data = dask.array.from_array(self.h5file[dsname], chunks=ch_sz)
def __getitem__(self, index):
"""Get row `index` from the dataset."""
return torch.tensor(np.array(self.data[index]), dtype=self.dtype)
def __len__(self):
"""Get the number of rows in the dataset."""
return len(self.data)
def get_batch(self, indices):
"""Get a batch of rows corresponding to `indices`."""
return torch.tensor(np.array(self.data[indices]), dtype=self.dtype)
def size(self, dim):
"""Get the size of dimension `dim` of the dataset."""
return self.data.shape[dim]
| [
"h5py.File",
"numpy.array"
] | [((2155, 2208), 'h5py.File', 'h5py.File', (['self.h5fn', '"""r"""'], {'libver': '"""latest"""', 'swmr': '(True)'}), "(self.h5fn, 'r', libver='latest', swmr=True)\n", (2164, 2208), False, 'import h5py\n'), ((3140, 3166), 'numpy.array', 'np.array', (['self.data[index]'], {}), '(self.data[index])\n', (3148, 3166), True, 'import numpy as np\n'), ((3418, 3446), 'numpy.array', 'np.array', (['self.data[indices]'], {}), '(self.data[indices])\n', (3426, 3446), True, 'import numpy as np\n'), ((1553, 1575), 'numpy.array', 'np.array', (['array[index]'], {}), '(array[index])\n', (1561, 1575), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2020 by ShabaniPy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the MIT license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Routines to analyse data taken on JJ.
"""
import logging
from typing import Tuple, Optional, Union
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import peak_widths
from lmfit.models import GaussianModel
def f2k_from_periodicity_and_width(periodicty: float, width: float) -> float:
"""Field to k estimate from the periodicity of a pattern and the assumed width.
"""
return 2 * np.pi / (periodicty * width)
def find_fraunhofer_center(
field: np.ndarray, ic: np.ndarray, debug: bool = False
) -> float:
"""Extract the field at which the Fraunhofer is centered.
Parameters
----------
field : np.ndarray
1D array of the magnetic field applied of the JJ.
ic : np.ndarray
1D array of the JJ critical current.
Returns
-------
float
Field at which the center of the pattern is located.
"""
max_loc = np.argmax(ic)
width, *_ = peak_widths(ic, [max_loc], rel_height=0.5)
width_index = int(round(width[0] * 0.65))
subset_field = field[max_loc - width_index : max_loc + width_index + 1]
subset_ic = ic[max_loc - width_index : max_loc + width_index + 1]
model = GaussianModel()
params = model.guess(subset_ic, subset_field)
out = model.fit(subset_ic, params, x=subset_field)
if debug:
plt.figure()
plt.plot(field, ic)
plt.plot(subset_field, out.best_fit)
plt.show()
return out.best_values["center"]
def recenter_fraunhofer(
field: np.ndarray, ic: np.ndarray, debug: bool = False
) -> np.ndarray:
"""Correct the offset in field of a Fraunhofer pattern.
Parameters
----------
field : np.ndarray
ND array of the magnetic field applied of the JJ, the last dimension is
expected to be swept.
ic : np.ndarray
ND array of the JJ critical current.
Returns
-------
np.ndarray
Field array from which the offset has been removed.
"""
it = np.nditer(field[..., 0], ["multi_index"])
res = np.copy(field)
for b in it:
index = it.multi_index
center = find_fraunhofer_center(field[index], ic[index], debug)
res[index] -= center
return res
def symmetrize_fraunhofer(
field: np.ndarray, ic: np.ndarray, debug: bool = False
) -> Tuple[np.ndarray, np.ndarray]:
"""Symmetrize a Fraunhofer pattern.
We conserve the side on which more lobes are visible and perform a mirror.
Parameters
----------
field : np.ndarray
1D array of the magnetic field applied of the JJ, the last dimension is
expected to be swept. The field should be offset free.
ic : np.ndarray
1D array of the JJ critical current.
Returns
-------
np.ndarray
New field array which has been symmetrizes.
np.ndarray
Critical current symmetrized with respect to 0 field.
"""
# Ensure we get increasing value of field
if field[0] > field[1]:
field = field[::-1]
ic = ic[::-1]
index = np.argmin(np.abs(field))
if index == 0:
side = "positive"
f = field
i = ic
elif index == len(field) - 1:
side = "negative"
f = field
i = ic
else:
if len(field[:index]) > len(field[index + 1 :]):
side = "negative"
f = field[: index + 1]
i = ic[: index + 1]
else:
side = "positive"
f = field[index:]
i = ic[index:]
if side == "positive":
out = (
np.concatenate((-f[1:][::-1], f), axis=None),
np.concatenate((i[1:][::-1], i), axis=None),
)
else:
out = (
np.concatenate((f, -f[:-1][::-1]), axis=None),
np.concatenate((i, i[:-1][::-1]), axis=None),
)
if debug:
plt.figure()
plt.plot(*out)
plt.show()
return out
| [
"matplotlib.pyplot.show",
"numpy.abs",
"numpy.copy",
"scipy.signal.peak_widths",
"numpy.argmax",
"matplotlib.pyplot.plot",
"numpy.nditer",
"matplotlib.pyplot.figure",
"lmfit.models.GaussianModel",
"numpy.concatenate"
] | [((1289, 1302), 'numpy.argmax', 'np.argmax', (['ic'], {}), '(ic)\n', (1298, 1302), True, 'import numpy as np\n'), ((1319, 1361), 'scipy.signal.peak_widths', 'peak_widths', (['ic', '[max_loc]'], {'rel_height': '(0.5)'}), '(ic, [max_loc], rel_height=0.5)\n', (1330, 1361), False, 'from scipy.signal import peak_widths\n'), ((1566, 1581), 'lmfit.models.GaussianModel', 'GaussianModel', ([], {}), '()\n', (1579, 1581), False, 'from lmfit.models import GaussianModel\n'), ((2363, 2404), 'numpy.nditer', 'np.nditer', (['field[..., 0]', "['multi_index']"], {}), "(field[..., 0], ['multi_index'])\n", (2372, 2404), True, 'import numpy as np\n'), ((2415, 2429), 'numpy.copy', 'np.copy', (['field'], {}), '(field)\n', (2422, 2429), True, 'import numpy as np\n'), ((1710, 1722), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1720, 1722), True, 'import matplotlib.pyplot as plt\n'), ((1731, 1750), 'matplotlib.pyplot.plot', 'plt.plot', (['field', 'ic'], {}), '(field, ic)\n', (1739, 1750), True, 'import matplotlib.pyplot as plt\n'), ((1759, 1795), 'matplotlib.pyplot.plot', 'plt.plot', (['subset_field', 'out.best_fit'], {}), '(subset_field, out.best_fit)\n', (1767, 1795), True, 'import matplotlib.pyplot as plt\n'), ((1804, 1814), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1812, 1814), True, 'import matplotlib.pyplot as plt\n'), ((3426, 3439), 'numpy.abs', 'np.abs', (['field'], {}), '(field)\n', (3432, 3439), True, 'import numpy as np\n'), ((4222, 4234), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4232, 4234), True, 'import matplotlib.pyplot as plt\n'), ((4243, 4257), 'matplotlib.pyplot.plot', 'plt.plot', (['*out'], {}), '(*out)\n', (4251, 4257), True, 'import matplotlib.pyplot as plt\n'), ((4266, 4276), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4274, 4276), True, 'import matplotlib.pyplot as plt\n'), ((3933, 3977), 'numpy.concatenate', 'np.concatenate', (['(-f[1:][::-1], f)'], {'axis': 'None'}), '((-f[1:][::-1], f), axis=None)\n', (3947, 3977), True, 'import numpy as np\n'), ((3991, 4034), 'numpy.concatenate', 'np.concatenate', (['(i[1:][::-1], i)'], {'axis': 'None'}), '((i[1:][::-1], i), axis=None)\n', (4005, 4034), True, 'import numpy as np\n'), ((4084, 4129), 'numpy.concatenate', 'np.concatenate', (['(f, -f[:-1][::-1])'], {'axis': 'None'}), '((f, -f[:-1][::-1]), axis=None)\n', (4098, 4129), True, 'import numpy as np\n'), ((4143, 4187), 'numpy.concatenate', 'np.concatenate', (['(i, i[:-1][::-1])'], {'axis': 'None'}), '((i, i[:-1][::-1]), axis=None)\n', (4157, 4187), True, 'import numpy as np\n')] |
import numpy as np
from tqdm import tqdm
from scipy import misc
from collections import deque, namedtuple
from datetime import datetime
import matplotlib.pyplot as plt
import tensorflow as tf
import os
from utils import linear_scale, preprocess, LearnerState
class DoubleDQNLearner(object):
#Args
# imsize: int of side of image length/width
# num_actions: int; the number of actions the agent can take
# discount: float; the discount factor to multiply by
# sample_time: int; number of steps to run the game before training starts (to populate memory)
# lr: float; learning rate
# batch_size: int of batch size
# max_memory: maximum number of trials to store in memory
# eps: a float between [0,1] indicating the starting epsilon value for eps-greedy
# num_eps: number of steps to get from "eps" to 0.
def __init__(self,num_actions,
imsize=32,
discount=.999,
sample_time=4000,
lr=4e-5,
batch_size=128,
max_memory=50000,
eps=1.0,
num_eps = 10000,
logdir = "dqn_results/"):
self.num_actions = num_actions
self.imsize = imsize
self.discount = discount
self.sample_time = sample_time
self.lr = lr
self.batch_size = batch_size
self.max_memory = max_memory
#logdir for saving tensorboard outputs
self.logdir=logdir
#this is the number of frames until we reach the final eps
self.num_eps = num_eps
#starting eps
self.eps = eps
self.sess = None
self.time = 0
self.init_time = 0
self.episode_time = 0
self.episode_rewards = []
self.average_q = []
self.remember_length = 4
#for non-conv
self.state_size = 4
#eps annealing
self.final_eps = 0
self.start_eps = self.eps
self.losses = []
self.rewards = []
self.main_network_scope = "main_network"
self.target_network_scope = "target_network"
self.ops = {
self.main_network_scope: {},
self.target_network_scope: {}
}
#initialize image memory and other memory
self.image_memory = deque([np.zeros((self.imsize, self.imsize,1)) for i in range(self.remember_length)],
maxlen=self.remember_length)
self.memory = deque(maxlen=self.max_memory)
def initialize_network(self):
g = tf.Graph()
self.g = g
### Double DQN Portion
self.create_q_network(self.g,self.main_network_scope)
self.create_q_network(self.g,self.target_network_scope)
main_net_vars =self.g.get_collection("variables",scope=self.main_network_scope)
target_net_vars = self.g.get_collection("variables",scope=self.target_network_scope)
assign_ops = []
for var, target in zip(main_net_vars,target_net_vars):
#print(var,target)
assign_ops.append(tf.assign(target, var))
with self.g.as_default():
self.copy_op = tf.group(assign_ops)
###
self.sess = tf.Session(graph=self.g)
self.sess.run(tf.variables_initializer(self.g.get_collection("variables")))
with self.g.as_default():
self.main_train_saver = tf.train.Saver(self.g.get_collection("variables"))
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
logdir = "%s/run-%s/" % (self.logdir,now)
self.train_writer = tf.summary.FileWriter(logdir,graph = self.g)
self.sess.run(self.copy_op)
def create_q_network(self,graph,scope):
def count_parameters(scope=None):
num_params = 0
for var in tf.trainable_variables(scope=scope):
num_params += int(np.prod(var.shape))
return num_params
with graph.as_default():
with tf.variable_scope(scope):
#We use the last four frames
X = tf.placeholder(tf.float32,shape=(None,self.imsize,self.imsize,self.remember_length),name="input")
y = tf.placeholder(tf.float32,shape=(None,self.num_actions),name="labels")
filters = 32
act = tf.nn.relu
activations = []
initializer = tf.truncated_normal_initializer(0.0,1e-2)
net = tf.layers.Conv2D(filters,2,4,padding="same",activation=act, kernel_initializer=initializer)(X)
activations.append(net)
net = tf.layers.Conv2D(filters*2,2,2,padding="same",activation=act,kernel_initializer=initializer)(net)
activations.append(net)
net = tf.layers.Conv2D(filters*2,3,1,padding="same",activation=act,kernel_initializer=initializer)(net)
activations.append(net)
net = tf.layers.flatten(net)
activations.append(net)
net = tf.layers.Dense(256,activation=act,kernel_initializer=initializer)(net)
activations.append(net)
print("Initialized network with %d parameters" % count_parameters(scope=scope))
logits = tf.layers.Dense(self.num_actions,activation=None,
kernel_initializer=initializer,name="logits")(net)
loss = tf.reduce_mean(self.huber_loss(logits-y),name="loss")
optimizer = tf.train.RMSPropOptimizer(learning_rate=self.lr)
gradients = tf.gradients(loss, tf.trainable_variables(scope=scope))
#gradients
gradients, _ = tf.clip_by_global_norm(gradients,10)
train_op = optimizer.minimize(loss,name="train_op")
#Getting the histogram summaries
print(len(activations))
print(len(tf.trainable_variables(scope=scope)))
print(len(gradients))
hists = [tf.summary.histogram(var.name, var) for var in activations] +\
[tf.summary.histogram(var.name, var) for var in tf.trainable_variables(scope=scope)] +\
[tf.summary.histogram(var.name, var) for var in gradients]
metric_summaries = [tf.summary.scalar("Loss", loss)]
tf_summary = tf.summary.merge(metric_summaries,name="tf_summary")
tf_hist_summary = tf.summary.merge(hists,name="hist_summary")
self.ops[scope] = {
'input': X,
'labels':y,
'metric_summaries': metric_summaries,
'tf_summary': tf_summary,
'tf_hist_summary': tf_hist_summary,
'loss': loss,
'logits': logits,
'train_op': train_op
}
#memory should be filepath+'.npy' and checkpoint should be filepath+".ckpt"
def load_from_path(self,filepath):
self.main_train_saver.restore(self.sess,filepath+".ckpt")
self.memory = deque(np.load(filepath+'.npy'))
self.sess.run(self.copy_op)
def save_to_path(self,filepath):
self.main_train_saver.save(self.sess,os.getcwd()+"/models/%s.ckpt" % filepath)
np.save(os.getcwd() + "/models/%s.npy" % filepath,self.memory)
def get_memory_batch(self):
if len(self.memory) < self.batch_size:
inds = np.random.choice(np.arange(len(self.memory)),replace=True,size=self.batch_size)
else:
inds = np.random.choice(np.arange(len(self.memory)),replace=False,size=self.batch_size)
return [self.memory[i] for i in inds]
def huber_loss(self, x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
)
def reset_memory(self):
self.image_memory = deque([np.zeros((self.imsize, self.imsize,1)) for i in range(self.remember_length)],
maxlen=self.remember_length)
def reset_learner_state(self):
self.last_learner_state = LearnerState(np.zeros((self.imsize,self.imsize,self.remember_length)),0)
#self.last_learner_state = LearnerState(np.zeros((self.state_size)),0)
def eps_greedy(self,eps,q_values_of_state):
if np.random.choice([True,False],p=[eps,1-eps]):
action = np.random.choice(np.arange(self.num_actions))
else:
action = np.argmax(q_values_of_state)
return action
def action_callback(self, state):
if (self.init_time < self.sample_time):
current_screen = preprocess(state['pixels'],self.imsize)
self.image_memory.append(current_screen)
current_state = np.concatenate(self.image_memory,axis=2)
last_action = np.random.choice(np.arange(self.num_actions))
if not np.all(np.mean(current_state,(0,1)) == 0):
self.memory.append((self.last_learner_state.last_state,
current_state,
last_action,
self.last_learner_state.last_reward))
self.init_time +=1
if self.init_time == self.sample_time:
print("Stored %d initial memories" % len(self.memory))
self.last_learner_state = LearnerState(current_state,None)
return last_action
else:
#First we need to get current state.
current_screen = preprocess(state['pixels'],self.imsize)
#plt.imshow(current_screen[:,:,0])
#plt.show()
self.image_memory.append(current_screen)
#This returns a (32,32,4) state.
current_state = np.concatenate(self.image_memory,axis=2)
#current_state = state
#Get q-values of last state and get last action
last_q_values = self.sess.run(self.ops[self.main_network_scope]['logits'],
feed_dict={self.ops[self.main_network_scope]['input']:[self.last_learner_state.last_state]})
self.average_q.append(np.mean(last_q_values))
last_action = self.eps_greedy(self.eps, last_q_values)
#append to memory
self.memory.append((self.last_learner_state.last_state,
current_state,
last_action,
self.last_learner_state.last_reward))
#set the last learner state
self.last_learner_state = LearnerState(current_state,None)
#now we perform learning
memory_batch = self.get_memory_batch()
#x_batch = np.zeros((self.batch_size,self.imsize,self.imsize,4))
x_batch = np.concatenate([np.expand_dims(memory_batch[i][0],0) for i in range(len(memory_batch))])
#y_batch = np.zeros((self.batch_size, self.num_actions))
css = np.concatenate([np.expand_dims(memory_batch[i][1],0) for i in range(len(memory_batch))])
yb_in = np.concatenate([np.expand_dims(memory_batch[i][0],0) for i in range(len(memory_batch))])
a_inds = np.array([memory_batch[i][2] for i in range(len(memory_batch))])
#need to turn to float, otherwise will be integers!
target = np.array([memory_batch[i][3] for i in range(len(memory_batch))]).astype(np.float32)
target_q = self.sess.run(self.ops[self.target_network_scope]["logits"],
feed_dict={self.ops[self.target_network_scope]['input']:css})
target_q_1 = self.sess.run(self.ops[self.main_network_scope]['logits'],
feed_dict={self.ops[self.main_network_scope]['input']:css})
target_q_1_mask = np.argmax(target_q_1,1)
lr_g0 = target >= 0
#lr_g0 = target != -10
#target[lr_g0] = target[lr_g0] + self.discount * np.amax(target_q[lr_g0],1)
target[lr_g0] = target[lr_g0] + self.discount * target_q[lr_g0][np.arange(sum(lr_g0)),target_q_1_mask[lr_g0]]
yb = self.sess.run(self.ops[self.main_network_scope]['logits'],
feed_dict={self.ops[self.main_network_scope]['input']:yb_in})
yb[np.arange(a_inds.shape[0]),a_inds] = target
y_batch = yb
#x_batch = (x_batch - np.mean(x_batch)) / (np.std(x_batch)+1e-6)
loss, sstr, hstr, _ = self.sess.run([self.ops[self.main_network_scope]['loss'],
self.ops[self.main_network_scope]['tf_summary'],
self.ops[self.main_network_scope]['tf_hist_summary'],
self.ops[self.main_network_scope]['train_op']],
feed_dict={self.ops[self.main_network_scope]['input']:x_batch,
self.ops[self.main_network_scope]['labels']:y_batch})
self.losses.append(loss)
self.time +=1
#Double Q-Learning Copy
if self.time % 500 == 0:
self.sess.run(self.copy_op)
#action summary
#action_summary = tf.Summary(value=[tf.Summary.Value(tag='action',simple_value=last_action)])
#self.train_writer.add_summary(action_summary,self.time)
self.last_learner_state.last_action = last_action
#eps summary
self.eps -= (self.start_eps - self.final_eps) / self.num_eps
self.eps = max(self.final_eps,self.eps)
eps_summary = tf.Summary(value=[tf.Summary.Value(tag="eps", simple_value=self.eps)])
if self.time % 100 == 0:
self.train_writer.add_summary(sstr,self.time)
self.train_writer.add_summary(eps_summary,self.time)
if self.time % 1000 == 0:
self.train_writer.add_summary(hstr,self.time)
return last_action
def reward_callback(self, reward):
#Reward of < 0 is an end state
if reward < 0:
self.episode_time +=1
mean_rewards = np.sum(self.episode_rewards)
#average q summary
avg_q_summary = tf.Summary(value=[tf.Summary.Value(tag='average_q',simple_value=np.mean(self.average_q))])
self.average_q = []
#not really an average, right now!
rewards_summary = tf.Summary(value=[
tf.Summary.Value(tag="average_episode_reward", simple_value=mean_rewards),
])
self.train_writer.add_summary(avg_q_summary,self.episode_time)
self.train_writer.add_summary(rewards_summary,self.episode_time)
self.episode_rewards = []
reward = -1
if reward > 0:
reward = 1
self.episode_rewards.append(reward)
self.rewards.append(reward)
self.last_learner_state.last_reward = reward
def close(self):
self.sess.close() | [
"numpy.load",
"numpy.sum",
"tensorflow.trainable_variables",
"numpy.argmax",
"utils.LearnerState",
"tensorflow.train.RMSPropOptimizer",
"datetime.datetime.utcnow",
"tensorflow.assign",
"numpy.mean",
"numpy.arange",
"tensorflow.summary.merge",
"tensorflow.clip_by_global_norm",
"collections.de... | [((2560, 2589), 'collections.deque', 'deque', ([], {'maxlen': 'self.max_memory'}), '(maxlen=self.max_memory)\n', (2565, 2589), False, 'from collections import deque, namedtuple\n'), ((2637, 2647), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2645, 2647), True, 'import tensorflow as tf\n'), ((3332, 3356), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.g'}), '(graph=self.g)\n', (3342, 3356), True, 'import tensorflow as tf\n'), ((8662, 8711), 'numpy.random.choice', 'np.random.choice', (['[True, False]'], {'p': '[eps, 1 - eps]'}), '([True, False], p=[eps, 1 - eps])\n', (8678, 8711), True, 'import numpy as np\n'), ((3270, 3290), 'tensorflow.group', 'tf.group', (['assign_ops'], {}), '(assign_ops)\n', (3278, 3290), True, 'import tensorflow as tf\n'), ((3718, 3761), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['logdir'], {'graph': 'self.g'}), '(logdir, graph=self.g)\n', (3739, 3761), True, 'import tensorflow as tf\n'), ((3965, 4000), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {'scope': 'scope'}), '(scope=scope)\n', (3987, 4000), True, 'import tensorflow as tf\n'), ((7292, 7318), 'numpy.load', 'np.load', (["(filepath + '.npy')"], {}), "(filepath + '.npy')\n", (7299, 7318), True, 'import numpy as np\n'), ((8459, 8517), 'numpy.zeros', 'np.zeros', (['(self.imsize, self.imsize, self.remember_length)'], {}), '((self.imsize, self.imsize, self.remember_length))\n', (8467, 8517), True, 'import numpy as np\n'), ((8810, 8838), 'numpy.argmax', 'np.argmax', (['q_values_of_state'], {}), '(q_values_of_state)\n', (8819, 8838), True, 'import numpy as np\n'), ((8982, 9022), 'utils.preprocess', 'preprocess', (["state['pixels']", 'self.imsize'], {}), "(state['pixels'], self.imsize)\n", (8992, 9022), False, 'from utils import linear_scale, preprocess, LearnerState\n'), ((9103, 9144), 'numpy.concatenate', 'np.concatenate', (['self.image_memory'], {'axis': '(2)'}), '(self.image_memory, axis=2)\n', (9117, 9144), True, 'import numpy as np\n'), ((9729, 9762), 'utils.LearnerState', 'LearnerState', (['current_state', 'None'], {}), '(current_state, None)\n', (9741, 9762), False, 'from utils import linear_scale, preprocess, LearnerState\n'), ((9885, 9925), 'utils.preprocess', 'preprocess', (["state['pixels']", 'self.imsize'], {}), "(state['pixels'], self.imsize)\n", (9895, 9925), False, 'from utils import linear_scale, preprocess, LearnerState\n'), ((10135, 10176), 'numpy.concatenate', 'np.concatenate', (['self.image_memory'], {'axis': '(2)'}), '(self.image_memory, axis=2)\n', (10149, 10176), True, 'import numpy as np\n'), ((10994, 11027), 'utils.LearnerState', 'LearnerState', (['current_state', 'None'], {}), '(current_state, None)\n', (11006, 11027), False, 'from utils import linear_scale, preprocess, LearnerState\n'), ((12292, 12316), 'numpy.argmax', 'np.argmax', (['target_q_1', '(1)'], {}), '(target_q_1, 1)\n', (12301, 12316), True, 'import numpy as np\n'), ((14785, 14813), 'numpy.sum', 'np.sum', (['self.episode_rewards'], {}), '(self.episode_rewards)\n', (14791, 14813), True, 'import numpy as np\n'), ((2403, 2442), 'numpy.zeros', 'np.zeros', (['(self.imsize, self.imsize, 1)'], {}), '((self.imsize, self.imsize, 1))\n', (2411, 2442), True, 'import numpy as np\n'), ((3184, 3206), 'tensorflow.assign', 'tf.assign', (['target', 'var'], {}), '(target, var)\n', (3193, 3206), True, 'import tensorflow as tf\n'), ((4145, 4169), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (4162, 4169), True, 'import tensorflow as tf\n'), ((4236, 4343), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, self.imsize, self.imsize, self.remember_length)', 'name': '"""input"""'}), "(tf.float32, shape=(None, self.imsize, self.imsize, self.\n remember_length), name='input')\n", (4250, 4343), True, 'import tensorflow as tf\n'), ((4354, 4427), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, self.num_actions)', 'name': '"""labels"""'}), "(tf.float32, shape=(None, self.num_actions), name='labels')\n", (4368, 4427), True, 'import tensorflow as tf\n'), ((4552, 4594), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', (['(0.0)', '(0.01)'], {}), '(0.0, 0.01)\n', (4583, 4594), True, 'import tensorflow as tf\n'), ((5097, 5119), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['net'], {}), '(net)\n', (5114, 5119), True, 'import tensorflow as tf\n'), ((5664, 5712), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', ([], {'learning_rate': 'self.lr'}), '(learning_rate=self.lr)\n', (5689, 5712), True, 'import tensorflow as tf\n'), ((5855, 5892), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['gradients', '(10)'], {}), '(gradients, 10)\n', (5877, 5892), True, 'import tensorflow as tf\n'), ((6527, 6580), 'tensorflow.summary.merge', 'tf.summary.merge', (['metric_summaries'], {'name': '"""tf_summary"""'}), "(metric_summaries, name='tf_summary')\n", (6543, 6580), True, 'import tensorflow as tf\n'), ((6614, 6658), 'tensorflow.summary.merge', 'tf.summary.merge', (['hists'], {'name': '"""hist_summary"""'}), "(hists, name='hist_summary')\n", (6630, 6658), True, 'import tensorflow as tf\n'), ((7445, 7456), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7454, 7456), False, 'import os\n'), ((7503, 7514), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7512, 7514), False, 'import os\n'), ((8062, 8071), 'tensorflow.abs', 'tf.abs', (['x'], {}), '(x)\n', (8068, 8071), True, 'import tensorflow as tf\n'), ((8093, 8105), 'tensorflow.square', 'tf.square', (['x'], {}), '(x)\n', (8102, 8105), True, 'import tensorflow as tf\n'), ((8233, 8272), 'numpy.zeros', 'np.zeros', (['(self.imsize, self.imsize, 1)'], {}), '((self.imsize, self.imsize, 1))\n', (8241, 8272), True, 'import numpy as np\n'), ((8746, 8773), 'numpy.arange', 'np.arange', (['self.num_actions'], {}), '(self.num_actions)\n', (8755, 8773), True, 'import numpy as np\n'), ((9187, 9214), 'numpy.arange', 'np.arange', (['self.num_actions'], {}), '(self.num_actions)\n', (9196, 9214), True, 'import numpy as np\n'), ((10541, 10563), 'numpy.mean', 'np.mean', (['last_q_values'], {}), '(last_q_values)\n', (10548, 10563), True, 'import numpy as np\n'), ((3589, 3606), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (3604, 3606), False, 'from datetime import datetime\n'), ((4036, 4054), 'numpy.prod', 'np.prod', (['var.shape'], {}), '(var.shape)\n', (4043, 4054), True, 'import numpy as np\n'), ((4617, 4716), 'tensorflow.layers.Conv2D', 'tf.layers.Conv2D', (['filters', '(2)', '(4)'], {'padding': '"""same"""', 'activation': 'act', 'kernel_initializer': 'initializer'}), "(filters, 2, 4, padding='same', activation=act,\n kernel_initializer=initializer)\n", (4633, 4716), True, 'import tensorflow as tf\n'), ((4775, 4878), 'tensorflow.layers.Conv2D', 'tf.layers.Conv2D', (['(filters * 2)', '(2)', '(2)'], {'padding': '"""same"""', 'activation': 'act', 'kernel_initializer': 'initializer'}), "(filters * 2, 2, 2, padding='same', activation=act,\n kernel_initializer=initializer)\n", (4791, 4878), True, 'import tensorflow as tf\n'), ((4936, 5039), 'tensorflow.layers.Conv2D', 'tf.layers.Conv2D', (['(filters * 2)', '(3)', '(1)'], {'padding': '"""same"""', 'activation': 'act', 'kernel_initializer': 'initializer'}), "(filters * 2, 3, 1, padding='same', activation=act,\n kernel_initializer=initializer)\n", (4952, 5039), True, 'import tensorflow as tf\n'), ((5183, 5251), 'tensorflow.layers.Dense', 'tf.layers.Dense', (['(256)'], {'activation': 'act', 'kernel_initializer': 'initializer'}), '(256, activation=act, kernel_initializer=initializer)\n', (5198, 5251), True, 'import tensorflow as tf\n'), ((5417, 5519), 'tensorflow.layers.Dense', 'tf.layers.Dense', (['self.num_actions'], {'activation': 'None', 'kernel_initializer': 'initializer', 'name': '"""logits"""'}), "(self.num_actions, activation=None, kernel_initializer=\n initializer, name='logits')\n", (5432, 5519), True, 'import tensorflow as tf\n'), ((5760, 5795), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {'scope': 'scope'}), '(scope=scope)\n', (5782, 5795), True, 'import tensorflow as tf\n'), ((6465, 6496), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Loss"""', 'loss'], {}), "('Loss', loss)\n", (6482, 6496), True, 'import tensorflow as tf\n'), ((8134, 8143), 'tensorflow.abs', 'tf.abs', (['x'], {}), '(x)\n', (8140, 8143), True, 'import tensorflow as tf\n'), ((11244, 11281), 'numpy.expand_dims', 'np.expand_dims', (['memory_batch[i][0]', '(0)'], {}), '(memory_batch[i][0], 0)\n', (11258, 11281), True, 'import numpy as np\n'), ((11421, 11458), 'numpy.expand_dims', 'np.expand_dims', (['memory_batch[i][1]', '(0)'], {}), '(memory_batch[i][1], 0)\n', (11435, 11458), True, 'import numpy as np\n'), ((11530, 11567), 'numpy.expand_dims', 'np.expand_dims', (['memory_batch[i][0]', '(0)'], {}), '(memory_batch[i][0], 0)\n', (11544, 11567), True, 'import numpy as np\n'), ((12791, 12817), 'numpy.arange', 'np.arange', (['a_inds.shape[0]'], {}), '(a_inds.shape[0])\n', (12800, 12817), True, 'import numpy as np\n'), ((6076, 6111), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {'scope': 'scope'}), '(scope=scope)\n', (6098, 6111), True, 'import tensorflow as tf\n'), ((6370, 6405), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['var.name', 'var'], {}), '(var.name, var)\n', (6390, 6405), True, 'import tensorflow as tf\n'), ((9242, 9272), 'numpy.mean', 'np.mean', (['current_state', '(0, 1)'], {}), '(current_state, (0, 1))\n', (9249, 9272), True, 'import numpy as np\n'), ((14231, 14281), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""eps"""', 'simple_value': 'self.eps'}), "(tag='eps', simple_value=self.eps)\n", (14247, 14281), True, 'import tensorflow as tf\n'), ((15151, 15224), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""average_episode_reward"""', 'simple_value': 'mean_rewards'}), "(tag='average_episode_reward', simple_value=mean_rewards)\n", (15167, 15224), True, 'import tensorflow as tf\n'), ((6178, 6213), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['var.name', 'var'], {}), '(var.name, var)\n', (6198, 6213), True, 'import tensorflow as tf\n'), ((6262, 6297), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['var.name', 'var'], {}), '(var.name, var)\n', (6282, 6297), True, 'import tensorflow as tf\n'), ((6309, 6344), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {'scope': 'scope'}), '(scope=scope)\n', (6331, 6344), True, 'import tensorflow as tf\n'), ((14950, 14973), 'numpy.mean', 'np.mean', (['self.average_q'], {}), '(self.average_q)\n', (14957, 14973), True, 'import numpy as np\n')] |
#!/usr/bin python3
# Imports
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Python:
import logging
from os import getenv
from urllib.parse import urlparse, unquote_plus
from dataclasses import dataclass
from functools import lru_cache
from typing import Awaitable, Union, Iterable, Any, Dict
from datetime import datetime, date
# 3rd party:
import asyncpg
from orjson import dumps, loads, JSONDecodeError
from azure.cosmos.cosmos_client import CosmosClient
from azure.functions import HttpRequest
from pandas import DataFrame
from numpy import ceil
# Internal:
from .constants import (
DBQueries, DatabaseCredentials, PAGINATION_PATTERN,
MAX_ITEMS_PER_RESPONSE, DATA_TYPES
)
from .queries import QueryParser
from .types import QueryResponseType, QueryData, ResponseStructure
from .exceptions import NotAvailable
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Header
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2020, Public Health England"
__license__ = "MIT"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__all__ = [
'get_data'
]
ENVIRONMENT = getenv("API_ENV", "PRODUCTION")
PREFERRED_LOCATIONS = getenv("AzureCosmosDBLocations", "").split(",") or None
base_metrics = ["areaCode", "areaType", "areaName", "date"]
single_partition_types = {"utla", "ltla", "nhstrust", "msoa"}
dtypes = DATA_TYPES.copy()
dtypes["date"] = str
type_map: Dict[object, object] = {
int: float,
float: float,
str: str
}
generic_dtypes = {
metric_name: type_map.get(dtypes[metric_name], object)
if dtypes[metric_name] not in [int, float] else float
for metric_name in dtypes
}
integer_dtypes = {type_ for type_, base_type in dtypes.items() if base_type is int}
string_dtypes = {type_ for type_, base_type in dtypes.items() if base_type is str}
json_dtypes = {type_ for type_, base_type in dtypes.items() if base_type in [list, dict]}
logger = logging.getLogger('azure')
logger.setLevel(logging.WARNING)
# DB_KWS = dict(
# url=DatabaseCredentials.host,
# credential={'masterKey': DatabaseCredentials.key},
# preferred_locations=PREFERRED_LOCATIONS,
# connection_timeout=10000
# )
#
# client = CosmosClient(**DB_KWS)
# db = client.get_database_client(DatabaseCredentials.db_name)
# container = db.get_container_client(DatabaseCredentials.data_collection)
def json_formatter(obj):
if isinstance(date, (date, datetime)):
return obj.isoformat()
def log_response(query, arguments):
"""
Closure for logging DB query information.
Main function receives the ``query`` and its ``arguments`` and returns
a function that may be passed to the ``cosmos_client.query_items``
as the ``response_hook`` keyword argument.
"""
count = 0
def process(metadata, results):
nonlocal count, query
for item in arguments:
query = query.replace(item['name'], item['value'])
custom_dims = dict(
charge=metadata.get('x-ms-request-charge', None),
query=query,
query_raw=query,
response_count=metadata.get('x-ms-item-count', None),
path=metadata.get('x-ms-alt-content-path', None),
parameters=arguments,
request_round=count
)
logging.info(f"DB QUERY: { dumps(custom_dims) }")
return process
class Connection:
def __init__(self, conn_str=getenv("POSTGRES_CONNECTION_STRING")):
self.conn_str = conn_str
self._connection = asyncpg.connect(self.conn_str, statement_cache_size=0, timeout=60)
def __await__(self):
yield from self._connection.__await__()
def __aenter__(self):
return self._connection
async def __aexit__(self, exc_type, exc_val, exc_tb):
return self._connection.close()
@dataclass
class RequestMethod:
Get: str = "GET"
Head: str = "HEAD"
Options: str = "OPTIONS"
Post: str = "POST"
Put: str = "PUT"
Patch: str = "PATCH"
async def get_count(conn, db_args: Iterable[Any], partition_id: str, filters: str):
"""
Count is a very expensive DB call, and is therefore cached in the memory.
"""
query = DBQueries.count.substitute(partition=partition_id, filters=filters)
return await conn.fetchrow(query, *db_args)
def format_data(df: DataFrame, response_metrics: Iterable[str]) -> DataFrame:
int_response_metrics = set(response_metrics).intersection(integer_dtypes)
df.loc[:, int_response_metrics] = df[int_response_metrics].astype(object)
for col in int_response_metrics:
notnull = df[col].notnull()
try:
df.loc[notnull, col] = (
df
.loc[notnull, col]
.str.replace(".0+$", "", regex=True)
.astype(int)
)
except AttributeError:
df.loc[notnull, col] = (
df
.loc[notnull, col]
.astype(int)
)
df = df.where(df.notnull(), None)
str_response_metrics = set(response_metrics).intersection(string_dtypes)
df.loc[:, str_response_metrics] = (
df
.loc[:, str_response_metrics]
.apply(lambda column: column.str.strip('"'))
)
return df
def set_column_labels(df: DataFrame, structure: ResponseStructure):
if isinstance(structure, list):
return df
response_columns = list(structure.values())
difference = set(response_columns) - set(df.columns)
for col in difference:
df = df.assign(**{col: None})
df = (
df
.loc[:, response_columns]
.rename(columns=dict(zip(response_columns, structure)))
)
return df
def format_response(df: DataFrame, request: HttpRequest, response_type: str,
count: int, page_number: int, n_metrics: int, structure: dict,
raw_filters: list) -> bytes:
if response_type == 'csv':
return df.to_csv(float_format="%.20g", index=False).encode()
total_pages = int(ceil(count / (MAX_ITEMS_PER_RESPONSE * n_metrics)))
prepped_url = PAGINATION_PATTERN.sub("", request.url)
parsed_url = urlparse(prepped_url)
url = unquote_plus(f"/v1/data?{parsed_url.query}".strip("&"))
if "latestBy" in prepped_url:
count = df.shape[0]
payload = {
'length': df.shape[0],
'maxPageLimit': MAX_ITEMS_PER_RESPONSE,
'totalRecords': count,
'data': df.to_dict("records"),
'requestPayload': {
'structure': structure,
'filters': raw_filters
}
}
if (latest_by := request.params.get("latestBy")) is None:
payload.update({
"pagination": {
'current': f"{url}&page={page_number}",
'next': f"{url}&page={page_number + 1}" if page_number < total_pages else None,
'previous': f"{url}&page={page_number - 1}" if (page_number - 1) > 0 else None,
'first': f"{url}&page=1",
'last': f"{url}&page={total_pages}"
}
})
payload['requestPayload']['page'] = page_number
else:
payload['requestPayload']['latestBy'] = latest_by
return dumps(payload, default=json_formatter)
@lru_cache(32)
def get_partition_id(area_type: str, timestamp: str) -> str:
ts = datetime.fromisoformat(timestamp[:26])
if area_type.lower() not in single_partition_types:
area_type = "other"
partition_id = f"{ts:%Y_%-m_%-d}_{area_type.lower()}"
return partition_id
async def get_query(request: HttpRequest, latest_by: Union[str, None], partition_id: str,
filters: str, page_number: int, n_metrics: int) -> Awaitable[str]:
if ENVIRONMENT != "DEVELOPMENT":
# Released metrics only.
filters += f" AND mr.released IS TRUE"
if latest_by is not None:
query = DBQueries.latest_date_for_metric.substitute(
partition=partition_id,
filters=filters,
latest_by=latest_by
)
elif request.method == RequestMethod.Get:
query = DBQueries.data_query.substitute(
partition=partition_id,
filters=filters,
limit=MAX_ITEMS_PER_RESPONSE * n_metrics,
offset=MAX_ITEMS_PER_RESPONSE * n_metrics * (page_number - 1)
)
else:
query = DBQueries.exists.substitute(
partition=partition_id,
filters=filters,
offset=MAX_ITEMS_PER_RESPONSE * n_metrics * (page_number - 1)
)
logging.info(query)
return query
def to_json(data) -> Union[dict, list]:
try:
return loads(data)
except JSONDecodeError:
return list()
def format_dtypes(df: DataFrame, column_types: Dict[str, object]) -> DataFrame:
json_columns = json_dtypes.intersection(column_types)
df = df.where(df != 'null', None)
df.loc[:, json_columns] = (
df
.loc[:, json_columns]
.apply(lambda column: column.map(to_json))
)
return df.astype(column_types)
async def get_data(request: HttpRequest, tokens: QueryParser, formatter: str,
timestamp: str) -> QueryResponseType:
query_data: QueryData = tokens.query_data
arguments = query_data.arguments
filters = query_data.query
structure = await tokens.structure
raw_filters = tokens.raw_filters
if isinstance(structure, dict):
metrics = list(structure.values())
else:
metrics = list(structure)
if tokens.page_number is not None:
page_number = int(tokens.page_number)
else:
page_number = 1
n_metrics = len(metrics)
partition_id = get_partition_id(query_data.area_type, timestamp)
query = await get_query(
request=request,
latest_by=tokens.only_latest_by,
partition_id=partition_id,
filters=filters,
page_number=page_number,
n_metrics=n_metrics
)
db_metrics = set(metrics) - {"areaCode", "areaName", "areaType", "date"}
db_args = [
list(db_metrics),
*arguments
]
logging.info(dumps({"arguments": db_args}, default=json_formatter))
count = dict()
async with Connection() as conn:
if request.method == RequestMethod.Get:
if tokens.only_latest_by is None:
count = await get_count(
conn,
db_args,
partition_id=partition_id,
filters=filters
)
if not count:
raise NotAvailable()
values = await conn.fetch(query, *db_args)
else:
values = await conn.fetchrow(query, *db_args)
count = count.get("count", 0)
logging.info(query)
if request.method == RequestMethod.Head:
if values is None or not values.get('exists', False):
raise NotAvailable()
return str()
elif values is None or not len(values):
raise NotAvailable()
df = DataFrame(values, columns=[*base_metrics, "metric", "value"])
response_metrics = df.metric.unique()
column_types = {
metric: generic_dtypes[metric]
for metric in filter(response_metrics.__contains__, generic_dtypes)
}
payload = (
df
.pivot_table(values="value", index=base_metrics, columns="metric", aggfunc='first')
.reset_index()
.sort_values(["areaCode", "date"], ascending=[True, False])
.pipe(format_dtypes, column_types=column_types)
.loc[:, [*base_metrics, *response_metrics]]
.pipe(format_data, response_metrics=response_metrics)
.pipe(set_column_labels, structure=structure)
.pipe(
format_response,
request=request,
response_type=formatter,
count=count,
page_number=page_number,
n_metrics=n_metrics,
structure=structure,
raw_filters=raw_filters
)
)
return payload
| [
"pandas.DataFrame",
"asyncpg.connect",
"datetime.datetime.fromisoformat",
"numpy.ceil",
"orjson.loads",
"logging.getLogger",
"logging.info",
"orjson.dumps",
"functools.lru_cache",
"os.getenv",
"urllib.parse.urlparse"
] | [((1213, 1244), 'os.getenv', 'getenv', (['"""API_ENV"""', '"""PRODUCTION"""'], {}), "('API_ENV', 'PRODUCTION')\n", (1219, 1244), False, 'from os import getenv\n'), ((2016, 2042), 'logging.getLogger', 'logging.getLogger', (['"""azure"""'], {}), "('azure')\n", (2033, 2042), False, 'import logging\n'), ((7317, 7330), 'functools.lru_cache', 'lru_cache', (['(32)'], {}), '(32)\n', (7326, 7330), False, 'from functools import lru_cache\n'), ((6224, 6245), 'urllib.parse.urlparse', 'urlparse', (['prepped_url'], {}), '(prepped_url)\n', (6232, 6245), False, 'from urllib.parse import urlparse, unquote_plus\n'), ((7275, 7313), 'orjson.dumps', 'dumps', (['payload'], {'default': 'json_formatter'}), '(payload, default=json_formatter)\n', (7280, 7313), False, 'from orjson import dumps, loads, JSONDecodeError\n'), ((7401, 7439), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['timestamp[:26]'], {}), '(timestamp[:26])\n', (7423, 7439), False, 'from datetime import datetime, date\n'), ((8611, 8630), 'logging.info', 'logging.info', (['query'], {}), '(query)\n', (8623, 8630), False, 'import logging\n'), ((10820, 10839), 'logging.info', 'logging.info', (['query'], {}), '(query)\n', (10832, 10839), False, 'import logging\n'), ((11086, 11147), 'pandas.DataFrame', 'DataFrame', (['values'], {'columns': "[*base_metrics, 'metric', 'value']"}), "(values, columns=[*base_metrics, 'metric', 'value'])\n", (11095, 11147), False, 'from pandas import DataFrame\n'), ((3494, 3530), 'os.getenv', 'getenv', (['"""POSTGRES_CONNECTION_STRING"""'], {}), "('POSTGRES_CONNECTION_STRING')\n", (3500, 3530), False, 'from os import getenv\n'), ((3593, 3659), 'asyncpg.connect', 'asyncpg.connect', (['self.conn_str'], {'statement_cache_size': '(0)', 'timeout': '(60)'}), '(self.conn_str, statement_cache_size=0, timeout=60)\n', (3608, 3659), False, 'import asyncpg\n'), ((6097, 6147), 'numpy.ceil', 'ceil', (['(count / (MAX_ITEMS_PER_RESPONSE * n_metrics))'], {}), '(count / (MAX_ITEMS_PER_RESPONSE * n_metrics))\n', (6101, 6147), False, 'from numpy import ceil\n'), ((8714, 8725), 'orjson.loads', 'loads', (['data'], {}), '(data)\n', (8719, 8725), False, 'from orjson import dumps, loads, JSONDecodeError\n'), ((10177, 10230), 'orjson.dumps', 'dumps', (["{'arguments': db_args}"], {'default': 'json_formatter'}), "({'arguments': db_args}, default=json_formatter)\n", (10182, 10230), False, 'from orjson import dumps, loads, JSONDecodeError\n'), ((1267, 1303), 'os.getenv', 'getenv', (['"""AzureCosmosDBLocations"""', '""""""'], {}), "('AzureCosmosDBLocations', '')\n", (1273, 1303), False, 'from os import getenv\n'), ((3399, 3417), 'orjson.dumps', 'dumps', (['custom_dims'], {}), '(custom_dims)\n', (3404, 3417), False, 'from orjson import dumps, loads, JSONDecodeError\n')] |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import os
import time
import numpy as np
from keras.models import Model
from keras.layers import Input, Dense, BatchNormalization, LeakyReLU, Flatten, Activation, Reshape
from keras.optimizers import Adam
from keras.utils import plot_model
from utils.datasets import DATASET
from utils.saving import save_img_grid, save_loss_log, save_model
from utils.data_ops import scale
class GAN():
"""
Class for constructing a simple Generative Adversarial Network.
Based on Goodfellow et al. (2014) https://arxiv.org/pdf/1406.2661
"""
name = "GAN"
def __init__(self, epochs, batch_size, noise_dim, dataset, loss_path, result_path, checkpoint_path):
creation_time = time.strftime('%Y%m%d-%H%M%S')
dir_prefix = self.name + "_" + creation_time
self.epochs = epochs
self.batch_size = batch_size
self.noise_dim = noise_dim
self.dataset = dataset
self.loss_path = loss_path + "/" + dir_prefix
self.result_path = result_path + "/" + dir_prefix
self.checkpoint_path = checkpoint_path + "/" + dir_prefix
if self.dataset.lower() in DATASET:
#Load dataset
self.data_x, self.data_y = DATASET[self.dataset]()
self.data_x = scale(self.data_x, -1, 1, 0, 255)
#Set dimensions for input of discriminator
self.input_height = self.data_x.shape[-2]
self.input_width = self.data_x.shape[-1]
self.input_channels = self.data_x.shape[1]
#Set dimensions for output of generator
self.output_height = self.input_height
self.output_width = self.input_width
self.output_channels = self.input_channels
#Number of sample generated images
self.sample_count = 64
#Number of mini batches
self.batch_count = int(np.ceil(self.data_x.shape[0]/float(self.batch_size)))
else:
raise NotImplementedError
self.build((self.noise_dim,), ((self.input_channels, self.input_height, self.input_width)))
def discriminator(self, img_shape, base_feature_count = 512, scale_steps = 4):
"""
Constrcuts a MLP discriminator for the GAN
"""
assert base_feature_count / 2**(scale_steps-1) >= 1
img_in = Input(img_shape, name = "D_Input")
x = img_in
x = Flatten(name = "Flatten")(x)
for s in range(scale_steps):
x = Dense(int(base_feature_count/2**s), name = "FC_"+str(s))(x)
x = LeakyReLU(0.2, name = "LReLU_"+str(s))(x)
#x = BatchNormalization(name = "BN_"+str(s))(x)
x = Dense(1, name = "FC_"+str(scale_steps))(x)
x = Activation("sigmoid", name = "Sigmoid")(x)
return Model(img_in, x)
def generator(self, noise_dim, img_shape, base_feature_count = 128, scale_steps = 3):
"""
Constrcuts a MLP generatpr for the GAN
"""
noise_in = Input(noise_dim, name = "G_Input")
x = noise_in
for s in range(scale_steps):
x = Dense(base_feature_count*2**s, name = "FC_"+str(s))(x)
x = LeakyReLU(0.2, name = "LReLU_"+str(s))(x)
x = BatchNormalization(name = "BN_"+str(s))(x)
x = Dense(np.prod(img_shape), name = "FC_"+str(scale_steps))(x)
x = Activation("tanh", name = "Tanh")(x)
x = Reshape(img_shape, name = "Reshape")(x)
return Model(noise_in, x)
def build(self, z, img_shape):
"""
Builds and compiles the discriminator, generator, and the combined network
"""
self.disNet = self.discriminator(img_shape)
self.disNet.compile(optimizer = Adam(lr=0.0002, beta_1 = 0.5), loss = "binary_crossentropy")
self.genNet = self.generator(z, img_shape)
noise_input = Input(shape=z)
gen_out = self.genNet(noise_input)
self.disNet.trainable = False
dis_out = self.disNet(gen_out)
self.gan = Model(noise_input, dis_out);
self.gan.compile(optimizer = Adam(lr=0.0002, beta_1 = 0.5), loss = "binary_crossentropy")
self.genNet.summary()
self.disNet.summary()
self.gan.summary()
# plot_model(self.genNet, to_file='genNet.png')
# plot_model(self.disNet, to_file='disNet.png')
# plot_model(self.gan, to_file='GAN.png')
def fit(self, k = 1):
"""
Trains the GAN based on the inputs to supplied when initializing the GAN object
Trains the generator once per k discriminator iterations
"""
if not os.path.exists(self.checkpoint_path):
os.makedirs(self.checkpoint_path)
if not os.path.exists(self.result_path):
os.makedirs(self.result_path)
if not os.path.exists(self.loss_path):
os.makedirs(self.loss_path)
training_history = {
"generator" : [],
"discriminator" : []}
sample_generated_noise = np.random.uniform(-1.,1.,size = (self.sample_count,self.noise_dim)).astype(np.float32)
save_img_grid(self.result_path, "/fake_imgs_0", self.generate(sample_generated_noise), 8, 8, "Untrained GAN")
save_img_grid(self.result_path, "/real_imgs", self.data_x[np.random.randint(0,self.data_x.shape[0], size=self.sample_count)], 8, 8, "Real MNIST")
for epoch in range(1,self.epochs+1):
g_loss_epoch = []
d_loss_epoch = []
for idx in range(self.batch_count):
#Train discriminator
real_imgs = self.data_x[idx*self.batch_size:(idx+1)*self.batch_size]
noise = np.random.uniform(-1.,1.,size = (real_imgs.shape[0],self.noise_dim)).astype(np.float32)
fake_imgs = self.generate(noise)
fake_y = np.zeros((real_imgs.shape[0], 1))
real_y = np.ones((real_imgs.shape[0], 1))
d_loss_real = self.disNet.train_on_batch(real_imgs, real_y)
d_loss_fake = self.disNet.train_on_batch(fake_imgs, fake_y)
d_loss = 0.5 * (d_loss_real + d_loss_fake)
d_loss_epoch.append(d_loss)
#Train generator
if idx % k == 0:
noise = np.random.uniform(-1.,1.,size = (self.batch_size,self.noise_dim)).astype(np.float32)
y = np.ones((self.batch_size, 1))
g_loss = self.gan.train_on_batch(noise, y)
g_loss_epoch.append(g_loss)
training_history["generator"].append(np.mean(np.array(g_loss_epoch), axis=0))
training_history["discriminator"].append(np.mean(np.array(d_loss_epoch), axis=0))
print("Epoch: [{}]\ng_loss: {:.4f}, d_loss: {:.4f}"
.format(epoch, training_history["generator"][-1], training_history["discriminator"][-1]))
save_model(self.checkpoint_path, "/gen", self.genNet, epoch)
save_model(self.checkpoint_path, "/dis", self.disNet, epoch)
save_model(self.checkpoint_path, "/gan", self.gan, epoch)
save_img_grid(self.result_path, "/fake_imgs"+"_"+str(epoch), self.generate(sample_generated_noise), 8, 8, "Epoch: " + str(epoch))
save_loss_log(self.loss_path, "/loss", training_history)
def generate(self, noise_input):
"""
Returns output from the Generator net
"""
return self.genNet.predict(noise_input)
def predict(self, img_input):
"""
Returns output from the Discriminator net
"""
return self.disNet.predict(img_input)
def pretty_print(self):
"""
Returns supplied values in a pretty print out format
(Need adjustment)
"""
print("\nepochs = \t\t{}\nbatch_size = \t\t{}\nnoise_dim = \t\t{}\ndataset = \t\t{}\
\nloss_path = \t\t{}\nresult_path = \t\t{}\ncheckpoint_path = \t{}\
\ninput height = \t\t{}\ninput width = \t\t{}\ninput channels = \t{}\
\noutput height = \t{}\noutput width = \t\t{}\noutput channels = \t{}\
\ngen. sample count = \t{}\nbatch count = \t\t{}\n".format(self.epochs,\
self.batch_size,self.noise_dim,self.dataset,\
self.loss_path,self.result_path,self.checkpoint_path,\
self.input_height,self.input_width,self.input_channels,\
self.output_height, self.output_width, self.output_channels,\
self.sample_count, self.batch_count))
if __name__ == "__main__":
gan = GAN(10, 64, 100, "mnist", "Loss", "Images", "Saved_models")
gan.pretty_print()
gan.fit() | [
"numpy.random.uniform",
"os.makedirs",
"keras.layers.Activation",
"keras.layers.Flatten",
"time.strftime",
"keras.models.Model",
"os.path.exists",
"utils.saving.save_loss_log",
"keras.optimizers.Adam",
"numpy.zeros",
"numpy.random.randint",
"numpy.ones",
"numpy.array",
"keras.layers.Input"... | [((755, 785), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (768, 785), False, 'import time\n'), ((2486, 2518), 'keras.layers.Input', 'Input', (['img_shape'], {'name': '"""D_Input"""'}), "(img_shape, name='D_Input')\n", (2491, 2518), False, 'from keras.layers import Input, Dense, BatchNormalization, LeakyReLU, Flatten, Activation, Reshape\n'), ((2977, 2993), 'keras.models.Model', 'Model', (['img_in', 'x'], {}), '(img_in, x)\n', (2982, 2993), False, 'from keras.models import Model\n'), ((3188, 3220), 'keras.layers.Input', 'Input', (['noise_dim'], {'name': '"""G_Input"""'}), "(noise_dim, name='G_Input')\n", (3193, 3220), False, 'from keras.layers import Input, Dense, BatchNormalization, LeakyReLU, Flatten, Activation, Reshape\n'), ((3684, 3702), 'keras.models.Model', 'Model', (['noise_in', 'x'], {}), '(noise_in, x)\n', (3689, 3702), False, 'from keras.models import Model\n'), ((4098, 4112), 'keras.layers.Input', 'Input', ([], {'shape': 'z'}), '(shape=z)\n', (4103, 4112), False, 'from keras.layers import Input, Dense, BatchNormalization, LeakyReLU, Flatten, Activation, Reshape\n'), ((4270, 4297), 'keras.models.Model', 'Model', (['noise_input', 'dis_out'], {}), '(noise_input, dis_out)\n', (4275, 4297), False, 'from keras.models import Model\n'), ((1325, 1358), 'utils.data_ops.scale', 'scale', (['self.data_x', '(-1)', '(1)', '(0)', '(255)'], {}), '(self.data_x, -1, 1, 0, 255)\n', (1330, 1358), False, 'from utils.data_ops import scale\n'), ((2561, 2584), 'keras.layers.Flatten', 'Flatten', ([], {'name': '"""Flatten"""'}), "(name='Flatten')\n", (2568, 2584), False, 'from keras.layers import Input, Dense, BatchNormalization, LeakyReLU, Flatten, Activation, Reshape\n'), ((2910, 2947), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {'name': '"""Sigmoid"""'}), "('sigmoid', name='Sigmoid')\n", (2920, 2947), False, 'from keras.layers import Input, Dense, BatchNormalization, LeakyReLU, Flatten, Activation, Reshape\n'), ((3571, 3602), 'keras.layers.Activation', 'Activation', (['"""tanh"""'], {'name': '"""Tanh"""'}), "('tanh', name='Tanh')\n", (3581, 3602), False, 'from keras.layers import Input, Dense, BatchNormalization, LeakyReLU, Flatten, Activation, Reshape\n'), ((3620, 3654), 'keras.layers.Reshape', 'Reshape', (['img_shape'], {'name': '"""Reshape"""'}), "(img_shape, name='Reshape')\n", (3627, 3654), False, 'from keras.layers import Input, Dense, BatchNormalization, LeakyReLU, Flatten, Activation, Reshape\n'), ((4921, 4957), 'os.path.exists', 'os.path.exists', (['self.checkpoint_path'], {}), '(self.checkpoint_path)\n', (4935, 4957), False, 'import os\n'), ((4972, 5005), 'os.makedirs', 'os.makedirs', (['self.checkpoint_path'], {}), '(self.checkpoint_path)\n', (4983, 5005), False, 'import os\n'), ((5021, 5053), 'os.path.exists', 'os.path.exists', (['self.result_path'], {}), '(self.result_path)\n', (5035, 5053), False, 'import os\n'), ((5068, 5097), 'os.makedirs', 'os.makedirs', (['self.result_path'], {}), '(self.result_path)\n', (5079, 5097), False, 'import os\n'), ((5113, 5143), 'os.path.exists', 'os.path.exists', (['self.loss_path'], {}), '(self.loss_path)\n', (5127, 5143), False, 'import os\n'), ((5158, 5185), 'os.makedirs', 'os.makedirs', (['self.loss_path'], {}), '(self.loss_path)\n', (5169, 5185), False, 'import os\n'), ((7447, 7507), 'utils.saving.save_model', 'save_model', (['self.checkpoint_path', '"""/gen"""', 'self.genNet', 'epoch'], {}), "(self.checkpoint_path, '/gen', self.genNet, epoch)\n", (7457, 7507), False, 'from utils.saving import save_img_grid, save_loss_log, save_model\n'), ((7520, 7580), 'utils.saving.save_model', 'save_model', (['self.checkpoint_path', '"""/dis"""', 'self.disNet', 'epoch'], {}), "(self.checkpoint_path, '/dis', self.disNet, epoch)\n", (7530, 7580), False, 'from utils.saving import save_img_grid, save_loss_log, save_model\n'), ((7593, 7650), 'utils.saving.save_model', 'save_model', (['self.checkpoint_path', '"""/gan"""', 'self.gan', 'epoch'], {}), "(self.checkpoint_path, '/gan', self.gan, epoch)\n", (7603, 7650), False, 'from utils.saving import save_img_grid, save_loss_log, save_model\n'), ((7805, 7861), 'utils.saving.save_loss_log', 'save_loss_log', (['self.loss_path', '"""/loss"""', 'training_history'], {}), "(self.loss_path, '/loss', training_history)\n", (7818, 7861), False, 'from utils.saving import save_img_grid, save_loss_log, save_model\n'), ((3505, 3523), 'numpy.prod', 'np.prod', (['img_shape'], {}), '(img_shape)\n', (3512, 3523), True, 'import numpy as np\n'), ((3955, 3982), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0002)', 'beta_1': '(0.5)'}), '(lr=0.0002, beta_1=0.5)\n', (3959, 3982), False, 'from keras.optimizers import Adam\n'), ((4336, 4363), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0002)', 'beta_1': '(0.5)'}), '(lr=0.0002, beta_1=0.5)\n', (4340, 4363), False, 'from keras.optimizers import Adam\n'), ((5338, 5408), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)'], {'size': '(self.sample_count, self.noise_dim)'}), '(-1.0, 1.0, size=(self.sample_count, self.noise_dim))\n', (5355, 5408), True, 'import numpy as np\n'), ((5618, 5684), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.data_x.shape[0]'], {'size': 'self.sample_count'}), '(0, self.data_x.shape[0], size=self.sample_count)\n', (5635, 5684), True, 'import numpy as np\n'), ((6265, 6298), 'numpy.zeros', 'np.zeros', (['(real_imgs.shape[0], 1)'], {}), '((real_imgs.shape[0], 1))\n', (6273, 6298), True, 'import numpy as np\n'), ((6325, 6357), 'numpy.ones', 'np.ones', (['(real_imgs.shape[0], 1)'], {}), '((real_imgs.shape[0], 1))\n', (6332, 6357), True, 'import numpy as np\n'), ((6887, 6916), 'numpy.ones', 'np.ones', (['(self.batch_size, 1)'], {}), '((self.batch_size, 1))\n', (6894, 6916), True, 'import numpy as np\n'), ((7119, 7141), 'numpy.array', 'np.array', (['g_loss_epoch'], {}), '(g_loss_epoch)\n', (7127, 7141), True, 'import numpy as np\n'), ((7213, 7235), 'numpy.array', 'np.array', (['d_loss_epoch'], {}), '(d_loss_epoch)\n', (7221, 7235), True, 'import numpy as np\n'), ((6071, 6142), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)'], {'size': '(real_imgs.shape[0], self.noise_dim)'}), '(-1.0, 1.0, size=(real_imgs.shape[0], self.noise_dim))\n', (6088, 6142), True, 'import numpy as np\n'), ((6778, 6846), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)'], {'size': '(self.batch_size, self.noise_dim)'}), '(-1.0, 1.0, size=(self.batch_size, self.noise_dim))\n', (6795, 6846), True, 'import numpy as np\n')] |
import sys
from collections import defaultdict
import numpy as np
import os
sys.path.append("../RQ1_1/")
import utils as ut
def get_basic(tool_combs,single_tool_base_path,proj,ver,mix_unmodified):
method_cate = defaultdict(list) #index of Unmodified_ranking
method_value = dict() #suspicious value
method_clean = defaultdict(list) # real category list
for tool in tool_combs:
if not os.path.exists(single_tool_base_path + tool):
print(single_tool_base_path + tool + "DOES NOT EXISIT!")
result_file = single_tool_base_path + tool + "/" + proj + "-" + ver + "/aggregatedSusInfo.profl"
if os.path.exists(result_file):
with open(result_file) as f:
for line in f:
items = line.strip().split("|")
method_name = items[3]
cate = items[2].split("PatchCategory.")[1]
if cate == "Unmodified":
cate = mix_unmodified
value = items[1]
method_cate[method_name].append(unmodified_ranking.index(cate))
method_clean[method_name].append(cate)
method_value[method_name] = value
return method_cate,method_value,method_clean
#method_cate_number: number of tool category,for example, if M1 aggregation is
#cleanfix, count how many tools are also cleanfix for M1
def get_method_info(method_cate,method_value,method_clean):
method_final_cate = dict() # final category
method_cate_number = dict() # number of cleanfix/Noisyfix/NoneFix for all tools
for m in method_cate:
min_number = np.array(method_cate[m]).min()
cat = unmodified_ranking[min_number] #the aggregated category for this method
method_final_cate[m] = cat
cat_list = method_clean[m]
cat_number = cat_list.count(cat)
if cat_number > 0:
method_cate_number[m] = cat_number
return method_final_cate,method_cate_number
def get_info_for_ranking(method_value,method_final_cate,method_clean_number):
category_values = defaultdict(list) #each category with a list of suspicious values
for m in method_final_cate:
category_values[method_final_cate[m]].append(float(method_value[m]))
return category_values
def get_buggy(buggy_method_path):
buggy = []
with open(buggy_method_path) as f:
for line in f:
if "^^^^^^" in line:
buggy.append(line.split("^^^^^^")[1].strip())
return buggy
def update_ranking_by_cate_number(cate_number_dict,bug,method_cate_number,method_value,method_final_cate):
bug_cate_number = method_cate_number[bug]
bug_value = method_value[bug]
bug_cate = method_final_cate[bug]
cate_number_list = np.sort(np.array(cate_number_dict[bug_cate + "+" + bug_value]))
offset = list(cate_number_list).index(bug_cate_number)
return offset
def get_final_ranking(buggy_methods,method_final_cate,method_value,unmodified_ranking,category_values,buggy_SBFL_ranking,cate_number_dict,method_cate_number):
rankings = []
global unidebug_plusplus
for bug in buggy_methods:
if bug in method_value:
bug_cat = method_final_cate[bug]
bug_valu = float(method_value[bug])
bug_ranking = 0
for m in unmodified_ranking:
if bug_cat != m:
bug_ranking = bug_ranking + len(category_values[m])
else:
m_list = category_values[m]
m_list = np.array(m_list)
m_list_sort = np.sort(m_list) #ascending order
index = list(m_list_sort).index(bug_valu)
bug_ranking = len(m_list_sort) - index + bug_ranking
offset = update_ranking_by_cate_number(cate_number_dict,bug,method_cate_number,method_value,method_final_cate)
if(unidebug_plusplus is "False"):
pass # When the third command argument is "False", output represents UniDebug+
else:
bug_ranking = bug_ranking - offset # Comment this line to change from UniDebug++ to UniDebug+
rankings.append(str(bug_ranking))
break
else:
if bug in buggy_SBFL_ranking:
rankings.append(buggy_SBFL_ranking[bug])
return rankings
def get_SBFL_ranking(single_tool_base_path,buggy_methods,proj,ver):
buggy_SBFL_ranking = dict()
attempts = ["ACS/", "Simfix/", "FixMiner/", "TBarFixer/"]
stop = False
for tool in attempts:
if not stop:
try:
file = single_tool_base_path + tool + proj + "-" + ver + "/generalSusInfo.profl"
with open(file) as f:
for line in f:
method_name = line.split("|")[2].strip()
if method_name in buggy_methods:
value = line.split("|")[0].lstrip("0")
buggy_SBFL_ranking[method_name] = value
stop = True
except Exception as e:
print(e)
pass
return buggy_SBFL_ranking
# get the dict: (category + susvalue):[1,2,3,4] "1,2,3,4" represent the number
# of tools with same category as buggy method
# For example, buggy-method-1 will have 3 tools assinging nonfix to it,
# 2 represents that for another method-2 (also nonefix) has 2 tools assinging
# nonefix to method-2
# And buggy-method-1 and method-2 has the same susvalue
def get_cate_number_info(buggy_methods,method_final_cate,method_value,method_cate_number):
cate_meth_dict = defaultdict(set) # value:set(meth1,meth2,meth3...)
for buggy_m in buggy_methods: #all bugs
if buggy_m in method_value:
buggy_value = method_value[buggy_m]
buggy_cat = method_final_cate[buggy_m]
for meth in method_value:
if buggy_value == method_value[meth] and buggy_cat == method_final_cate[meth]:
cate_meth_dict[buggy_cat + "+" + str(buggy_value)].add(meth)
cate_number_dict = defaultdict(list)
for cat_and_value in cate_meth_dict:
mtd_list = cate_meth_dict[cat_and_value]
for mtd in mtd_list:
cate_number_dict[cat_and_value].append(method_cate_number[mtd])
return cate_number_dict
def read_comb(comb_file):
co_list = []
with open(comb_file) as f:
for line in f:
co_list.append(line.strip())
return co_list
def write_results(result_list,comb_file,comb,max_top1):
global first_write
with open(comb_file + ".result",'a') as f:
if(first_write is False):
first_write = True
f.write("<Format = Top1 Top3 Top5 MFR MAR from Tool(s)>")
f.write("\n")
if int(result_list[0]) > max_top1:
value = int(result_list[0])
print("\t- Best Top1 found! (previous max vs now)", str(max_top1), value)
max_top1 = value
for r in result_list:
f.write(str(r) + " ")
f.write("from " + comb)
f.write("\n")
return max_top1
first_write = False
single_tool_base_path = "../../Data/ExperimentalData/ProFL-"
projects = ["Lang","Time","Math","Chart"]
vers = [65,27,106,26]
result_list = [["" for x in range(0,vers[y])] for y in range(0,len(projects))] #initialilize final results
comb_file = sys.argv[1] #what tools for aggregation: for example, "SimFix PraPR FixMiner"
mix_unmodified = sys.argv[2] #four mixed options: "CleanFix","NoisyFix","NoneFix","NegFix"
unidebug_plusplus = "True"
if(len(sys.argv) > 3):
unidebug_plusplus = sys.argv[3]
unmodified_ranking = ["CleanFix","NoisyFix","NoneFix","NegFix"]
combs_from_file = read_comb(comb_file)
max_top1 = 0
index = 0
for comb in combs_from_file:
tool_combs = comb.split()
for current_iteration_number in range(0,len(projects)): #each project
proj = projects[current_iteration_number]
vs = vers[current_iteration_number]
for ver in range(1,vs + 1):
try:
ver = str(ver)
buggy_method_path = "../../Data/FaultyMethods/" + proj + "/" + ver + ".txt"
buggy_methods = get_buggy(buggy_method_path)
buggy_SBFL_ranking = get_SBFL_ranking(single_tool_base_path,buggy_methods,proj,ver)
method_cate,method_value,method_all_cate = get_basic(tool_combs,single_tool_base_path,proj,ver,mix_unmodified)
method_final_cate,method_cate_number = get_method_info(method_cate,method_value,method_all_cate)
cate_number_dict = get_cate_number_info(buggy_methods,method_final_cate,method_value,method_cate_number)
category_values = get_info_for_ranking(method_value,method_final_cate,method_cate_number)
final_ranking = get_final_ranking(buggy_methods,method_final_cate,method_value,unmodified_ranking,category_values,buggy_SBFL_ranking,cate_number_dict,method_cate_number)
final_r_string = ",".join(final_ranking)
result_list[current_iteration_number][int(ver) - 1] = final_r_string
except:
pass
#print("Could not process", projects[index], proj, "-" ,ver)
final_result, true_ver = ut.get_static_final(vers,projects,result_list) # get top-1,3,5... for each projects
final_result = ut.get_final(final_result,true_ver) #get final result (16 repair tools)
index = index + 1
print("Combination", index, "metric results =", final_result)
max_top1 = write_results(final_result,comb_file,comb,max_top1)
print("Best Top-1 results were", max_top1)
| [
"sys.path.append",
"os.path.exists",
"collections.defaultdict",
"utils.get_final",
"numpy.sort",
"numpy.array",
"utils.get_static_final"
] | [((76, 104), 'sys.path.append', 'sys.path.append', (['"""../RQ1_1/"""'], {}), "('../RQ1_1/')\n", (91, 104), False, 'import sys\n'), ((216, 233), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (227, 233), False, 'from collections import defaultdict\n'), ((331, 348), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (342, 348), False, 'from collections import defaultdict\n'), ((2107, 2124), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2118, 2124), False, 'from collections import defaultdict\n'), ((5741, 5757), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (5752, 5757), False, 'from collections import defaultdict\n'), ((6260, 6277), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6271, 6277), False, 'from collections import defaultdict\n'), ((9481, 9529), 'utils.get_static_final', 'ut.get_static_final', (['vers', 'projects', 'result_list'], {}), '(vers, projects, result_list)\n', (9500, 9529), True, 'import utils as ut\n'), ((9587, 9623), 'utils.get_final', 'ut.get_final', (['final_result', 'true_ver'], {}), '(final_result, true_ver)\n', (9599, 9623), True, 'import utils as ut\n'), ((645, 672), 'os.path.exists', 'os.path.exists', (['result_file'], {}), '(result_file)\n', (659, 672), False, 'import os\n'), ((2793, 2847), 'numpy.array', 'np.array', (["cate_number_dict[bug_cate + '+' + bug_value]"], {}), "(cate_number_dict[bug_cate + '+' + bug_value])\n", (2801, 2847), True, 'import numpy as np\n'), ((413, 457), 'os.path.exists', 'os.path.exists', (['(single_tool_base_path + tool)'], {}), '(single_tool_base_path + tool)\n', (427, 457), False, 'import os\n'), ((1656, 1680), 'numpy.array', 'np.array', (['method_cate[m]'], {}), '(method_cate[m])\n', (1664, 1680), True, 'import numpy as np\n'), ((3574, 3590), 'numpy.array', 'np.array', (['m_list'], {}), '(m_list)\n', (3582, 3590), True, 'import numpy as np\n'), ((3625, 3640), 'numpy.sort', 'np.sort', (['m_list'], {}), '(m_list)\n', (3632, 3640), True, 'import numpy as np\n')] |
from transformers import AutoModelForSequenceClassification, Trainer, TrainingArguments, AutoTokenizer
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
from datasets import load_dataset
import random
import logging
import sys
import argparse
import os
import torch
import json
import numpy as np
# Set up logging
logger = logging.getLogger(__name__)
logging.basicConfig(
level=logging.getLevelName("DEBUG"),
handlers=[logging.StreamHandler(sys.stdout)],
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
try:
output_data_dir = os.environ["SM_OUTPUT_DIR"]
except KeyError:
output_data_dir = "/opt/ml/output"
try:
model_dir = os.environ["SM_MODEL_DIR"]
except KeyError:
model_dir = "/opt/ml/model"
try:
training_dir = os.environ["SM_CHANNEL_TRAINING"]
except KeyError:
training_dir = "/opt/ml/input/data/training"
parser = argparse.ArgumentParser()
# hyperparameters sent by the client are passed as command-line arguments to the script.
parser.add_argument("--epochs", type=int, default=1)
parser.add_argument("--learning_rate", type=float, default=5e-5)
parser.add_argument("--max_seq_length", type=int, default=64)
parser.add_argument("--train-batch-size", type=int, default=16)
parser.add_argument("--eval-batch-size", type=int, default=16)
parser.add_argument("--warmup_steps", type=int, default=10)
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("--model_name", type=str, default="bert-base-uncased")
parser.add_argument("--text_column", type=str)
parser.add_argument("--label_column", type=str)
# Data, model, and output directories
parser.add_argument("--output-data-dir", type=str, default=output_data_dir)
parser.add_argument("--model-dir", type=str, default= model_dir)
#parser.add_argument("--n_gpus", type=str, default=os.environ["SM_NUM_GPUS"])
parser.add_argument("--training_dir", type=str, default= training_dir)
parser.add_argument("--test_dir", type=str, default= training_dir)
args, _ = parser.parse_known_args()
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
def set_seed(seed):
"""
Sets random seeds for training.
:param seed: Integer used for seed.
:return: void
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def tokenize(batch):
return tokenizer(batch['text'], max_length=args.max_seq_length, padding='max_length', truncation=True)
def _get_dataset(data_dir, data_file_name, text_column, label_column):
"""generate dataset for model training"""
dataset = load_dataset('csv', data_files={'train': os.path.join(data_dir, data_file_name)})
# set format for pytorch
dataset.rename_column_(label_column, "labels")
dataset.rename_column_(text_column, "text")
dataset = dataset.map(tokenize, batched=True, batch_size=len(dataset))
dataset.set_format('torch', columns=['labels', 'attention_mask', 'input_ids'])
return dataset['train']
def train(args):
"""Model training"""
set_seed(args.seed)
train_dataset = _get_dataset(args.training_dir, "train.csv", args.text_column, args.label_column)
valid_dataset = _get_dataset(args.training_dir, "valid.csv", args.text_column, args.label_column)
# compute metrics function for binary classification
def compute_metrics(pred):
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average="binary")
acc = accuracy_score(labels, preds)
return {"accuracy": acc, "f1": f1, "precision": precision, "recall": recall}
# download model from model hub
model = AutoModelForSequenceClassification.from_pretrained(args.model_name)
# define training args
training_args = TrainingArguments(
output_dir=args.output_data_dir,
num_train_epochs=args.epochs,
per_device_train_batch_size=args.train_batch_size,
per_device_eval_batch_size=args.eval_batch_size,
warmup_steps=args.warmup_steps,
seed = args.seed,
save_steps = 500,
save_total_limit = 2,
evaluation_strategy="steps",
eval_steps = 50,
logging_steps=50,
logging_dir=args.output_data_dir,#f"{args.output_data_dir}/logs",
learning_rate=float(args.learning_rate),
)
# create Trainer instance
trainer = Trainer(
model=model,
args=training_args,
compute_metrics=compute_metrics,
train_dataset=train_dataset,
eval_dataset=valid_dataset,
)
# train model
trainer.train()
# evaluate model
eval_result = trainer.evaluate(eval_dataset=valid_dataset)
# writes eval result to file which can be accessed later in s3 ouput
with open(os.path.join(args.output_data_dir, "eval_results.txt"), "w") as writer:
print(f"***** Eval results *****")
for key, value in sorted(eval_result.items()):
writer.write(f"{key} = {value}\n")
# Saves the model to s3
trainer.save_model(args.model_dir)
def model_fn(model_dir):
"""Load model"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(os.listdir(model_dir))
model = AutoModelForSequenceClassification.from_pretrained(model_dir)
return model.to(device)
def input_fn(request_body, request_content_type):
"""An input_fn that loads a pickled tensor"""
if request_content_type == "application/json":
data = json.loads(request_body)
if isinstance(data, str):
data = [data]
elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], str):
pass
else:
raise ValueError("Unsupported input type. Input type can be a string or an non-empty list. \
I got {}".format(data))
tokenized_inputs = tokenizer.batch_encode_plus(data, max_length=args.max_seq_length, padding='max_length', truncation=True, return_tensors="pt")
tokenized_inputs.pop("token_type_ids")
return tokenized_inputs
raise ValueError("Unsupported content type: {}".format(request_content_type))
def predict_fn(input_data, model):
"""Model prediction"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
model.eval()
sm = torch.nn.Softmax(dim=1)
input_data = input_data.to(device)
with torch.no_grad():
output = model(**input_data)
output = sm(output['logits'])
y = output.detach().numpy()[0]
return y
if __name__ == "__main__":
train(args)
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"sklearn.metrics.accuracy_score",
"logging.getLevelName",
"torch.nn.Softmax",
"torch.no_grad",
"os.path.join",
"sklearn.metrics.precision_recall_fscore_support",
"json.loads",
"random.seed",
"torch.manual_seed",
"logging.StreamHandler",
"transf... | [((351, 378), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (368, 378), False, 'import logging\n'), ((917, 942), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (940, 942), False, 'import argparse\n'), ((2067, 2113), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['args.model_name'], {}), '(args.model_name)\n', (2096, 2113), False, 'from transformers import AutoModelForSequenceClassification, Trainer, TrainingArguments, AutoTokenizer\n'), ((2249, 2266), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2260, 2266), False, 'import random\n'), ((2271, 2291), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2285, 2291), True, 'import numpy as np\n'), ((2296, 2319), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (2313, 2319), False, 'import torch\n'), ((2327, 2352), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2350, 2352), False, 'import torch\n'), ((3797, 3864), 'transformers.AutoModelForSequenceClassification.from_pretrained', 'AutoModelForSequenceClassification.from_pretrained', (['args.model_name'], {}), '(args.model_name)\n', (3847, 3864), False, 'from transformers import AutoModelForSequenceClassification, Trainer, TrainingArguments, AutoTokenizer\n'), ((4519, 4653), 'transformers.Trainer', 'Trainer', ([], {'model': 'model', 'args': 'training_args', 'compute_metrics': 'compute_metrics', 'train_dataset': 'train_dataset', 'eval_dataset': 'valid_dataset'}), '(model=model, args=training_args, compute_metrics=compute_metrics,\n train_dataset=train_dataset, eval_dataset=valid_dataset)\n', (4526, 4653), False, 'from transformers import AutoModelForSequenceClassification, Trainer, TrainingArguments, AutoTokenizer\n'), ((5377, 5438), 'transformers.AutoModelForSequenceClassification.from_pretrained', 'AutoModelForSequenceClassification.from_pretrained', (['model_dir'], {}), '(model_dir)\n', (5427, 5438), False, 'from transformers import AutoModelForSequenceClassification, Trainer, TrainingArguments, AutoTokenizer\n'), ((6532, 6555), 'torch.nn.Softmax', 'torch.nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (6548, 6555), False, 'import torch\n'), ((411, 440), 'logging.getLevelName', 'logging.getLevelName', (['"""DEBUG"""'], {}), "('DEBUG')\n", (431, 440), False, 'import logging\n'), ((2362, 2394), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (2388, 2394), False, 'import torch\n'), ((3550, 3614), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['labels', 'preds'], {'average': '"""binary"""'}), "(labels, preds, average='binary')\n", (3581, 3614), False, 'from sklearn.metrics import accuracy_score, precision_recall_fscore_support\n'), ((3629, 3658), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['labels', 'preds'], {}), '(labels, preds)\n', (3643, 3658), False, 'from sklearn.metrics import accuracy_score, precision_recall_fscore_support\n'), ((5342, 5363), 'os.listdir', 'os.listdir', (['model_dir'], {}), '(model_dir)\n', (5352, 5363), False, 'import os\n'), ((5634, 5658), 'json.loads', 'json.loads', (['request_body'], {}), '(request_body)\n', (5644, 5658), False, 'import json\n'), ((6604, 6619), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6617, 6619), False, 'import torch\n'), ((456, 489), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (477, 489), False, 'import logging\n'), ((4921, 4975), 'os.path.join', 'os.path.join', (['args.output_data_dir', '"""eval_results.txt"""'], {}), "(args.output_data_dir, 'eval_results.txt')\n", (4933, 4975), False, 'import os\n'), ((5294, 5319), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5317, 5319), False, 'import torch\n'), ((6447, 6472), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6470, 6472), False, 'import torch\n'), ((2702, 2740), 'os.path.join', 'os.path.join', (['data_dir', 'data_file_name'], {}), '(data_dir, data_file_name)\n', (2714, 2740), False, 'import os\n')] |
from .context import lux
import pytest
import pandas as pd
import numpy as np
from lux.utils import date_utils
from lux.executor.PandasExecutor import PandasExecutor
def test_dateformatter():
ldf = pd.read_csv("lux/data/car.csv")
ldf["Year"] = pd.to_datetime(ldf["Year"], format='%Y') # change pandas dtype for the column "Year" to datetype
timestamp = np.datetime64('2019-08-26')
ldf.maintain_metadata()
assert(date_utils.date_formatter(timestamp,ldf) == '2019')
ldf["Year"][0] = np.datetime64('1970-03-01') # make month non unique
assert (date_utils.date_formatter(timestamp, ldf) == '2019-8')
ldf["Year"][0] = np.datetime64('1970-03-03') # make day non unique
assert (date_utils.date_formatter(timestamp, ldf) == '2019-8-26')
def test_period_selection():
ldf = pd.read_csv("lux/data/car.csv")
ldf["Year"] = pd.to_datetime(ldf["Year"], format='%Y')
ldf["Year"] = pd.DatetimeIndex(ldf["Year"]).to_period(freq='A')
ldf.set_intent([lux.Clause(attribute = ["Horsepower", "Weight", "Acceleration"]), lux.Clause(attribute ="Year")])
PandasExecutor.execute(ldf.current_vis, ldf)
assert all([type(vlist.data) == lux.core.frame.LuxDataFrame for vlist in ldf.current_vis])
assert all(ldf.current_vis[2].data.columns == ["Year", 'Acceleration'])
def test_period_filter():
ldf = pd.read_csv("lux/data/car.csv")
ldf["Year"] = pd.to_datetime(ldf["Year"], format='%Y')
ldf["Year"] = pd.DatetimeIndex(ldf["Year"]).to_period(freq='A')
ldf.set_intent([lux.Clause(attribute ="Acceleration"), lux.Clause(attribute ="Horsepower")])
PandasExecutor.execute(ldf.current_vis, ldf)
ldf._repr_html_()
assert isinstance(ldf.recommendation['Filter'][2]._inferred_intent[2].value, pd.Period)
def test_period_to_altair():
chart = None
df = pd.read_csv("lux/data/car.csv")
df["Year"] = pd.to_datetime(df["Year"], format='%Y')
df["Year"] = pd.DatetimeIndex(df["Year"]).to_period(freq='A')
df.set_intent([lux.Clause(attribute ="Acceleration"), lux.Clause(attribute ="Horsepower")])
PandasExecutor.execute(df.current_vis, df)
df._repr_html_()
exported_code = df.recommendation['Filter'][2].to_Altair()
assert 'Year = 1971' in exported_code
def test_refresh_inplace():
df = pd.DataFrame({'date': ['2020-01-01', '2020-02-01', '2020-03-01', '2020-04-01'], 'value': [10.5,15.2,20.3,25.2]})
with pytest.warns(UserWarning,match="Lux detects that the attribute 'date' may be temporal."):
df._repr_html_()
assert df.data_type_lookup["date"]=="temporal"
from lux.vis.Vis import Vis
vis = Vis(["date","value"],df)
df['date'] = pd.to_datetime(df['date'],format="%Y-%m-%d")
df.maintain_metadata()
assert df.data_type['temporal'][0] == 'date'
vis.refresh_source(df)
assert vis.mark == "line"
assert vis.get_attr_by_channel("x")[0].attribute == "date"
assert vis.get_attr_by_channel("y")[0].attribute == "value" | [
"pandas.DataFrame",
"lux.vis.Vis.Vis",
"pandas.read_csv",
"numpy.datetime64",
"pytest.warns",
"pandas.DatetimeIndex",
"pandas.to_datetime",
"lux.executor.PandasExecutor.PandasExecutor.execute",
"lux.utils.date_utils.date_formatter"
] | [((200, 231), 'pandas.read_csv', 'pd.read_csv', (['"""lux/data/car.csv"""'], {}), "('lux/data/car.csv')\n", (211, 231), True, 'import pandas as pd\n'), ((247, 287), 'pandas.to_datetime', 'pd.to_datetime', (["ldf['Year']"], {'format': '"""%Y"""'}), "(ldf['Year'], format='%Y')\n", (261, 287), True, 'import pandas as pd\n'), ((358, 385), 'numpy.datetime64', 'np.datetime64', (['"""2019-08-26"""'], {}), "('2019-08-26')\n", (371, 385), True, 'import numpy as np\n'), ((490, 517), 'numpy.datetime64', 'np.datetime64', (['"""1970-03-01"""'], {}), "('1970-03-01')\n", (503, 517), True, 'import numpy as np\n'), ((627, 654), 'numpy.datetime64', 'np.datetime64', (['"""1970-03-03"""'], {}), "('1970-03-03')\n", (640, 654), True, 'import numpy as np\n'), ((782, 813), 'pandas.read_csv', 'pd.read_csv', (['"""lux/data/car.csv"""'], {}), "('lux/data/car.csv')\n", (793, 813), True, 'import pandas as pd\n'), ((829, 869), 'pandas.to_datetime', 'pd.to_datetime', (["ldf['Year']"], {'format': '"""%Y"""'}), "(ldf['Year'], format='%Y')\n", (843, 869), True, 'import pandas as pd\n'), ((1054, 1098), 'lux.executor.PandasExecutor.PandasExecutor.execute', 'PandasExecutor.execute', (['ldf.current_vis', 'ldf'], {}), '(ldf.current_vis, ldf)\n', (1076, 1098), False, 'from lux.executor.PandasExecutor import PandasExecutor\n'), ((1299, 1330), 'pandas.read_csv', 'pd.read_csv', (['"""lux/data/car.csv"""'], {}), "('lux/data/car.csv')\n", (1310, 1330), True, 'import pandas as pd\n'), ((1346, 1386), 'pandas.to_datetime', 'pd.to_datetime', (["ldf['Year']"], {'format': '"""%Y"""'}), "(ldf['Year'], format='%Y')\n", (1360, 1386), True, 'import pandas as pd\n'), ((1550, 1594), 'lux.executor.PandasExecutor.PandasExecutor.execute', 'PandasExecutor.execute', (['ldf.current_vis', 'ldf'], {}), '(ldf.current_vis, ldf)\n', (1572, 1594), False, 'from lux.executor.PandasExecutor import PandasExecutor\n'), ((1754, 1785), 'pandas.read_csv', 'pd.read_csv', (['"""lux/data/car.csv"""'], {}), "('lux/data/car.csv')\n", (1765, 1785), True, 'import pandas as pd\n'), ((1800, 1839), 'pandas.to_datetime', 'pd.to_datetime', (["df['Year']"], {'format': '"""%Y"""'}), "(df['Year'], format='%Y')\n", (1814, 1839), True, 'import pandas as pd\n'), ((2000, 2042), 'lux.executor.PandasExecutor.PandasExecutor.execute', 'PandasExecutor.execute', (['df.current_vis', 'df'], {}), '(df.current_vis, df)\n', (2022, 2042), False, 'from lux.executor.PandasExecutor import PandasExecutor\n'), ((2198, 2317), 'pandas.DataFrame', 'pd.DataFrame', (["{'date': ['2020-01-01', '2020-02-01', '2020-03-01', '2020-04-01'], 'value':\n [10.5, 15.2, 20.3, 25.2]}"], {}), "({'date': ['2020-01-01', '2020-02-01', '2020-03-01',\n '2020-04-01'], 'value': [10.5, 15.2, 20.3, 25.2]})\n", (2210, 2317), True, 'import pandas as pd\n'), ((2511, 2537), 'lux.vis.Vis.Vis', 'Vis', (["['date', 'value']", 'df'], {}), "(['date', 'value'], df)\n", (2514, 2537), False, 'from lux.vis.Vis import Vis\n'), ((2551, 2596), 'pandas.to_datetime', 'pd.to_datetime', (["df['date']"], {'format': '"""%Y-%m-%d"""'}), "(df['date'], format='%Y-%m-%d')\n", (2565, 2596), True, 'import pandas as pd\n'), ((419, 460), 'lux.utils.date_utils.date_formatter', 'date_utils.date_formatter', (['timestamp', 'ldf'], {}), '(timestamp, ldf)\n', (444, 460), False, 'from lux.utils import date_utils\n'), ((552, 593), 'lux.utils.date_utils.date_formatter', 'date_utils.date_formatter', (['timestamp', 'ldf'], {}), '(timestamp, ldf)\n', (577, 593), False, 'from lux.utils import date_utils\n'), ((687, 728), 'lux.utils.date_utils.date_formatter', 'date_utils.date_formatter', (['timestamp', 'ldf'], {}), '(timestamp, ldf)\n', (712, 728), False, 'from lux.utils import date_utils\n'), ((2317, 2411), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""Lux detects that the attribute \'date\' may be temporal."""'}), '(UserWarning, match=\n "Lux detects that the attribute \'date\' may be temporal.")\n', (2329, 2411), False, 'import pytest\n'), ((886, 915), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["ldf['Year']"], {}), "(ldf['Year'])\n", (902, 915), True, 'import pandas as pd\n'), ((1403, 1432), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["ldf['Year']"], {}), "(ldf['Year'])\n", (1419, 1432), True, 'import pandas as pd\n'), ((1855, 1883), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["df['Year']"], {}), "(df['Year'])\n", (1871, 1883), True, 'import pandas as pd\n')] |
import numpy
import csb.numeric
import csb.test as test
from csb.statmech.ensembles import BoltzmannEnsemble, TsallisEnsemble, CompositeEnsemble
@test.functional
class TestEnergy(test.Case):
def testBoltzmann(self):
e = numpy.linspace(-50, 1000, 1000)
be = BoltzmannEnsemble(beta=1,)
te = be.energy(e)
for i in range(len(e)):
self.assertEqual(e[i], te[i])
be = BoltzmannEnsemble(beta=0.001,)
te = be.energy(e)
for i in range(len(e)):
self.assertEqual(e[i] * 0.001, te[i])
def testTsallis(self):
e = numpy.linspace(-50, 1000, 1000)
tsallis = TsallisEnsemble(q=1.,)
te = tsallis.energy(e)
for i in range(len(e)):
self.assertEqual(e[i], te[i])
tsallis = TsallisEnsemble(q=1.1, e_min= -50.)
te = tsallis.energy(e)
q = 1.1
ee = q / (q - 1.) * csb.numeric.log(1 + (q - 1) * (e + 50.)) - 50
for i in range(len(e)):
self.assertAlmostEqual(ee[i], te[i], delta=1e-5)
def testComposite(self):
e1 = numpy.linspace(-50, 1000, 1000)
e2 = numpy.linspace(-30, 3000, 1000)
q = 1.1
beta = 0.1
ee = q / (q - 1.) * csb.numeric.log(1 + (q - 1) * (e1 + 50.)) - 50
ee += e2 * beta
ce = CompositeEnsemble([TsallisEnsemble(q=q, e_min= -50.),
BoltzmannEnsemble(beta=beta,)])
cee = ce.energy([e1, e2])
for i in range(len(e1)):
self.assertAlmostEqual(ee[i], cee[i], delta=1e-5)
if __name__ == '__main__':
test.Console()
| [
"numpy.linspace",
"csb.statmech.ensembles.BoltzmannEnsemble",
"csb.statmech.ensembles.TsallisEnsemble",
"csb.test.Console"
] | [((1668, 1682), 'csb.test.Console', 'test.Console', ([], {}), '()\n', (1680, 1682), True, 'import csb.test as test\n'), ((237, 268), 'numpy.linspace', 'numpy.linspace', (['(-50)', '(1000)', '(1000)'], {}), '(-50, 1000, 1000)\n', (251, 268), False, 'import numpy\n'), ((283, 308), 'csb.statmech.ensembles.BoltzmannEnsemble', 'BoltzmannEnsemble', ([], {'beta': '(1)'}), '(beta=1)\n', (300, 308), False, 'from csb.statmech.ensembles import BoltzmannEnsemble, TsallisEnsemble, CompositeEnsemble\n'), ((433, 462), 'csb.statmech.ensembles.BoltzmannEnsemble', 'BoltzmannEnsemble', ([], {'beta': '(0.001)'}), '(beta=0.001)\n', (450, 462), False, 'from csb.statmech.ensembles import BoltzmannEnsemble, TsallisEnsemble, CompositeEnsemble\n'), ((621, 652), 'numpy.linspace', 'numpy.linspace', (['(-50)', '(1000)', '(1000)'], {}), '(-50, 1000, 1000)\n', (635, 652), False, 'import numpy\n'), ((672, 694), 'csb.statmech.ensembles.TsallisEnsemble', 'TsallisEnsemble', ([], {'q': '(1.0)'}), '(q=1.0)\n', (687, 694), False, 'from csb.statmech.ensembles import BoltzmannEnsemble, TsallisEnsemble, CompositeEnsemble\n'), ((828, 863), 'csb.statmech.ensembles.TsallisEnsemble', 'TsallisEnsemble', ([], {'q': '(1.1)', 'e_min': '(-50.0)'}), '(q=1.1, e_min=-50.0)\n', (843, 863), False, 'from csb.statmech.ensembles import BoltzmannEnsemble, TsallisEnsemble, CompositeEnsemble\n'), ((1131, 1162), 'numpy.linspace', 'numpy.linspace', (['(-50)', '(1000)', '(1000)'], {}), '(-50, 1000, 1000)\n', (1145, 1162), False, 'import numpy\n'), ((1176, 1207), 'numpy.linspace', 'numpy.linspace', (['(-30)', '(3000)', '(1000)'], {}), '(-30, 3000, 1000)\n', (1190, 1207), False, 'import numpy\n'), ((1376, 1409), 'csb.statmech.ensembles.TsallisEnsemble', 'TsallisEnsemble', ([], {'q': 'q', 'e_min': '(-50.0)'}), '(q=q, e_min=-50.0)\n', (1391, 1409), False, 'from csb.statmech.ensembles import BoltzmannEnsemble, TsallisEnsemble, CompositeEnsemble\n'), ((1443, 1471), 'csb.statmech.ensembles.BoltzmannEnsemble', 'BoltzmannEnsemble', ([], {'beta': 'beta'}), '(beta=beta)\n', (1460, 1471), False, 'from csb.statmech.ensembles import BoltzmannEnsemble, TsallisEnsemble, CompositeEnsemble\n')] |
# Copyright (c) 2014, <NAME>, <NAME>
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from .parameterized import Parameterized
from .param import Param
class Remapping(Parameterized):
def mapping(self):
"""
The return value of this function gives the values which the re-mapped
parameters should take. Implement in sub-classes.
"""
raise NotImplementedError
def callback(self):
raise NotImplementedError
def __str__(self):
return self.name
def parameters_changed(self):
#ensure all out parameters have the correct value, as specified by our mapping
index = self._highest_parent_.constraints[self]
self._highest_parent_.param_array[index] = self.mapping()
[p.notify_observers(which=self) for p in self.tied_parameters]
class Fix(Remapping):
pass
class Tie(Parameterized):
"""
The new parameter tie framework. (under development)
All the parameters tied together get a new parameter inside the *Tie* object.
Its value should always be equal to all the tied parameters, and its gradient
is the sum of all the tied parameters.
=====Implementation Details=====
The *Tie* object should only exist on the top of param tree (the highest parent).
self.label_buf:
It uses a label buffer that has the same length as all the parameters (self._highest_parent_.param_array).
The buffer keeps track of all the tied parameters. All the tied parameters have a label (an interger) higher
than 0, and the parameters that have the same label are tied together.
self.buf_index:
An auxiliary index list for the global index of the tie parameter inside the *Tie* object.
================================
TODO:
* EVERYTHING
"""
def __init__(self, name='tie'):
super(Tie, self).__init__(name)
self.tied_param = None
# The buffer keeps track of tie status
self.label_buf = None
# The global indices of the 'tied' param
self.buf_idx = None
# A boolean array indicating non-tied parameters
self._tie_ = None
def getTieFlag(self, p=None):
if self.tied_param is None:
if self._tie_ is None or self._tie_.size != self._highest_parent_.param_array.size:
self._tie_ = np.ones((self._highest_parent_.param_array.size,),dtype=np.bool)
if p is not None:
return self._tie_[p._highest_parent_._raveled_index_for(p)]
return self._tie_
def _init_labelBuf(self):
if self.label_buf is None:
self.label_buf = np.zeros(self._highest_parent_.param_array.shape, dtype=np.int)
if self._tie_ is None or self._tie_.size != self._highest_parent_.param_array.size:
self._tie_ = np.ones((self._highest_parent_.param_array.size,),dtype=np.bool)
def _updateTieFlag(self):
if self._tie_.size != self.label_buf.size:
self._tie_ = np.ones((self._highest_parent_.param_array.size,),dtype=np.bool)
self._tie_[self.label_buf>0] = False
self._tie_[self.buf_idx] = True
def add_tied_parameter(self, p, p2=None):
"""
Tie the list of parameters p together (p2==None) or
Tie the list of parameters p with the list of parameters p2 (p2!=None)
"""
self._init_labelBuf()
if p2 is None:
idx = self._highest_parent_._raveled_index_for(p)
val = self._sync_val_group(idx)
if np.all(self.label_buf[idx]==0):
# None of p has been tied before.
tie_idx = self._expandTieParam(1)
print(tie_idx)
tie_id = self.label_buf.max()+1
self.label_buf[tie_idx] = tie_id
else:
b = self.label_buf[idx]
ids = np.unique(b[b>0])
tie_id, tie_idx = self._merge_tie_param(ids)
self._highest_parent_.param_array[tie_idx] = val
idx = self._highest_parent_._raveled_index_for(p)
self.label_buf[idx] = tie_id
else:
pass
self._updateTieFlag()
def _merge_tie_param(self, ids):
"""Merge the tie parameters with ids in the list."""
if len(ids)==1:
id_final_idx = self.buf_idx[self.label_buf[self.buf_idx]==ids[0]][0]
return ids[0],id_final_idx
id_final = ids[0]
ids_rm = ids[1:]
label_buf_param = self.label_buf[self.buf_idx]
idx_param = [np.where(label_buf_param==i)[0][0] for i in ids_rm]
self._removeTieParam(idx_param)
[np.put(self.label_buf, np.where(self.label_buf==i), id_final) for i in ids_rm]
id_final_idx = self.buf_idx[self.label_buf[self.buf_idx]==id_final][0]
return id_final, id_final_idx
def _sync_val_group(self, idx):
self._highest_parent_.param_array[idx] = self._highest_parent_.param_array[idx].mean()
return self._highest_parent_.param_array[idx][0]
def _expandTieParam(self, num):
"""Expand the tie param with the number of *num* parameters"""
if self.tied_param is None:
new_buf = np.empty((num,))
else:
new_buf = np.empty((self.tied_param.size+num,))
new_buf[:self.tied_param.size] = self.tied_param.param_array.copy()
self.remove_parameter(self.tied_param)
self.tied_param = Param('tied',new_buf)
self.add_parameter(self.tied_param)
buf_idx_new = self._highest_parent_._raveled_index_for(self.tied_param)
self._expand_label_buf(self.buf_idx, buf_idx_new)
self.buf_idx = buf_idx_new
return self.buf_idx[-num:]
def _removeTieParam(self, idx):
"""idx within tied_param"""
new_buf = np.empty((self.tied_param.size-len(idx),))
bool_list = np.ones((self.tied_param.size,),dtype=np.bool)
bool_list[idx] = False
new_buf[:] = self.tied_param.param_array[bool_list]
self.remove_parameter(self.tied_param)
self.tied_param = Param('tied',new_buf)
self.add_parameter(self.tied_param)
buf_idx_new = self._highest_parent_._raveled_index_for(self.tied_param)
self._shrink_label_buf(self.buf_idx, buf_idx_new, bool_list)
self.buf_idx = buf_idx_new
def _expand_label_buf(self, idx_old, idx_new):
"""Expand label buffer accordingly"""
if idx_old is None:
self.label_buf = np.zeros(self._highest_parent_.param_array.shape, dtype=np.int)
else:
bool_old = np.zeros((self.label_buf.size,),dtype=np.bool)
bool_old[idx_old] = True
bool_new = np.zeros((self._highest_parent_.param_array.size,),dtype=np.bool)
bool_new[idx_new] = True
label_buf_new = np.zeros(self._highest_parent_.param_array.shape, dtype=np.int)
label_buf_new[np.logical_not(bool_new)] = self.label_buf[np.logical_not(bool_old)]
label_buf_new[idx_new[:len(idx_old)]] = self.label_buf[idx_old]
self.label_buf = label_buf_new
def _shrink_label_buf(self, idx_old, idx_new, bool_list):
bool_old = np.zeros((self.label_buf.size,),dtype=np.bool)
bool_old[idx_old] = True
bool_new = np.zeros((self._highest_parent_.param_array.size,),dtype=np.bool)
bool_new[idx_new] = True
label_buf_new = np.empty(self._highest_parent_.param_array.shape, dtype=np.int)
label_buf_new[np.logical_not(bool_new)] = self.label_buf[np.logical_not(bool_old)]
label_buf_new[idx_new] = self.label_buf[idx_old[bool_list]]
self.label_buf = label_buf_new
def _check_change(self):
changed = False
if self.tied_param is not None:
for i in range(self.tied_param.size):
b0 = self.label_buf==self.label_buf[self.buf_idx[i]]
b = self._highest_parent_.param_array[b0]!=self.tied_param[i]
if b.sum()==0:
print('XXX')
continue
elif b.sum()==1:
print('!!!')
val = self._highest_parent_.param_array[b0][b][0]
self._highest_parent_.param_array[b0] = val
else:
print('@@@')
self._highest_parent_.param_array[b0] = self.tied_param[i]
changed = True
return changed
def parameters_changed(self):
#ensure all out parameters have the correct value, as specified by our mapping
changed = self._check_change()
if changed:
self._highest_parent_._trigger_params_changed()
self.collate_gradient()
def collate_gradient(self):
if self.tied_param is not None:
self.tied_param.gradient = 0.
[np.put(self.tied_param.gradient, i, self._highest_parent_.gradient[self.label_buf==self.label_buf[self.buf_idx[i]]].sum())
for i in range(self.tied_param.size)]
def propagate_val(self):
if self.tied_param is not None:
for i in range(self.tied_param.size):
self._highest_parent_.param_array[self.label_buf==self.label_buf[self.buf_idx[i]]] = self.tied_param[i]
| [
"numpy.empty",
"numpy.logical_not",
"numpy.zeros",
"numpy.ones",
"numpy.unique",
"numpy.where",
"numpy.all"
] | [((5972, 6019), 'numpy.ones', 'np.ones', (['(self.tied_param.size,)'], {'dtype': 'np.bool'}), '((self.tied_param.size,), dtype=np.bool)\n', (5979, 6019), True, 'import numpy as np\n'), ((7295, 7342), 'numpy.zeros', 'np.zeros', (['(self.label_buf.size,)'], {'dtype': 'np.bool'}), '((self.label_buf.size,), dtype=np.bool)\n', (7303, 7342), True, 'import numpy as np\n'), ((7394, 7460), 'numpy.zeros', 'np.zeros', (['(self._highest_parent_.param_array.size,)'], {'dtype': 'np.bool'}), '((self._highest_parent_.param_array.size,), dtype=np.bool)\n', (7402, 7460), True, 'import numpy as np\n'), ((7517, 7580), 'numpy.empty', 'np.empty', (['self._highest_parent_.param_array.shape'], {'dtype': 'np.int'}), '(self._highest_parent_.param_array.shape, dtype=np.int)\n', (7525, 7580), True, 'import numpy as np\n'), ((2693, 2756), 'numpy.zeros', 'np.zeros', (['self._highest_parent_.param_array.shape'], {'dtype': 'np.int'}), '(self._highest_parent_.param_array.shape, dtype=np.int)\n', (2701, 2756), True, 'import numpy as np\n'), ((2874, 2939), 'numpy.ones', 'np.ones', (['(self._highest_parent_.param_array.size,)'], {'dtype': 'np.bool'}), '((self._highest_parent_.param_array.size,), dtype=np.bool)\n', (2881, 2939), True, 'import numpy as np\n'), ((3058, 3123), 'numpy.ones', 'np.ones', (['(self._highest_parent_.param_array.size,)'], {'dtype': 'np.bool'}), '((self._highest_parent_.param_array.size,), dtype=np.bool)\n', (3065, 3123), True, 'import numpy as np\n'), ((3606, 3638), 'numpy.all', 'np.all', (['(self.label_buf[idx] == 0)'], {}), '(self.label_buf[idx] == 0)\n', (3612, 3638), True, 'import numpy as np\n'), ((5296, 5312), 'numpy.empty', 'np.empty', (['(num,)'], {}), '((num,))\n', (5304, 5312), True, 'import numpy as np\n'), ((5349, 5388), 'numpy.empty', 'np.empty', (['(self.tied_param.size + num,)'], {}), '((self.tied_param.size + num,))\n', (5357, 5388), True, 'import numpy as np\n'), ((6596, 6659), 'numpy.zeros', 'np.zeros', (['self._highest_parent_.param_array.shape'], {'dtype': 'np.int'}), '(self._highest_parent_.param_array.shape, dtype=np.int)\n', (6604, 6659), True, 'import numpy as np\n'), ((6697, 6744), 'numpy.zeros', 'np.zeros', (['(self.label_buf.size,)'], {'dtype': 'np.bool'}), '((self.label_buf.size,), dtype=np.bool)\n', (6705, 6744), True, 'import numpy as np\n'), ((6804, 6870), 'numpy.zeros', 'np.zeros', (['(self._highest_parent_.param_array.size,)'], {'dtype': 'np.bool'}), '((self._highest_parent_.param_array.size,), dtype=np.bool)\n', (6812, 6870), True, 'import numpy as np\n'), ((6935, 6998), 'numpy.zeros', 'np.zeros', (['self._highest_parent_.param_array.shape'], {'dtype': 'np.int'}), '(self._highest_parent_.param_array.shape, dtype=np.int)\n', (6943, 6998), True, 'import numpy as np\n'), ((7603, 7627), 'numpy.logical_not', 'np.logical_not', (['bool_new'], {}), '(bool_new)\n', (7617, 7627), True, 'import numpy as np\n'), ((7646, 7670), 'numpy.logical_not', 'np.logical_not', (['bool_old'], {}), '(bool_old)\n', (7660, 7670), True, 'import numpy as np\n'), ((2405, 2470), 'numpy.ones', 'np.ones', (['(self._highest_parent_.param_array.size,)'], {'dtype': 'np.bool'}), '((self._highest_parent_.param_array.size,), dtype=np.bool)\n', (2412, 2470), True, 'import numpy as np\n'), ((3946, 3965), 'numpy.unique', 'np.unique', (['b[b > 0]'], {}), '(b[b > 0])\n', (3955, 3965), True, 'import numpy as np\n'), ((4752, 4781), 'numpy.where', 'np.where', (['(self.label_buf == i)'], {}), '(self.label_buf == i)\n', (4760, 4781), True, 'import numpy as np\n'), ((7025, 7049), 'numpy.logical_not', 'np.logical_not', (['bool_new'], {}), '(bool_new)\n', (7039, 7049), True, 'import numpy as np\n'), ((7068, 7092), 'numpy.logical_not', 'np.logical_not', (['bool_old'], {}), '(bool_old)\n', (7082, 7092), True, 'import numpy as np\n'), ((4628, 4658), 'numpy.where', 'np.where', (['(label_buf_param == i)'], {}), '(label_buf_param == i)\n', (4636, 4658), True, 'import numpy as np\n')] |
'''
The LaserAtomSystem class definition.
'''
from LASED.time_evolution import *
from LASED.density_matrix import *
from LASED.index import *
from LASED.generate_sub_states import *
from LASED.save_to_csv import *
from LASED.rotation import *
import numpy as np
class LaserAtomSystem:
"""A physical system composed of a laser field acting on an atomic system.
Attributes:
Q_decay (list): List of ints describing the selection rules of the decay. Selection rules are set to +1, -1, and 0.
rho_t (list): List of flattened 2D arrays over the time interval simulated for. This is initialised as empty as no time evolution has taken place.
E (list): List of State objects which are the excited states of the system.
G (list): List of State objects which are the ground states of the system.
tau (float): Lifetime of transition in nanoseconds between excited and ground state.
Q (list): List of laser polarisations. This can be +1, 0, -1 for right-hand circular, left-hand circular, and linear polarisation. If more than one polarisation is in the list then the system will be excited with a linear combination of the polarisations.
laser_wavelength (float): Wavelength of transition from ground to excited state in metres.
laser_intensity (float): Intensity of the laser in mW/mm^2.
laser_power (float): Power of the laser in mW. This is needed for Gaussian averaging of beam profile.
tau_f (float): Upper state lifetime to states outside of laser coupling in nanoseconds.
tau_b (float): Ground state lifetime to states outside of laser coupling in nanoseconds.
rho_0 (list of list): 2D array creating the density matrix at t = 0.
rabi_scaling (float): The normalisation of the Rabi frequency. The half-Rabi frequency is divided by this number. Use this if there are more than one polarisations to normalise the population.
rabi_factors (list): Elements of this list are multiplies by the half-Rabi frequency for each polarisation. This can be used to obtain elliptical polarisation.
"""
Q_decay = [1, 0 ,-1]
rho_t = []
time = []
def __init__(self, E, G, tau, Q, laser_wavelength, laser_intensity = None,
laser_power = None, tau_f = None, tau_b = None, rabi_scaling = None,
rabi_factors = None):
"""
Inits LaserAtomSystem.
"""
self.E = E # list of excited States
self.G = G # list of ground States
self.tau = tau # lifteime in ns/rad, N.B NIST database uses A_ki in rad/s
self.Q = Q # laser radiation polarisation
self.laser_wavelength = laser_wavelength # wavelength of the laser in nm
self.tau_f = tau_f # lifetime of upper state decay to other states not coupled to by laser (can be non-radiative) in ns
self.tau_b = tau_b # lifetime of ground state decay to other states not coupled to by the laser in ns
self.laser_intensity = laser_intensity # in mW/mm^2
self.laser_power = laser_power # in mW
self.rho_0 = np.zeros((self.n*self.n, 1), dtype = complex) # flattened density matrix
self.rabi_scaling = rabi_scaling # Normalisation factor of rabi frequencies for different polarisations
self.rabi_factors = rabi_factors # multiply these by rabi frequency for different polarisations
@property
def n(self):
""" Total number of substates.
Returns:
int: Number of substates.
"""
return int(len(self.G)+len(self.E))
@property
def rho_e0(self):
""" Upper state density matrix for the initial condition.
Returns:
ndarray: Upper state density matrix composed of all states defined in E.
"""
return getSingleStateMatrix(self.rho_0, self.n, self.E)
@property
def rho_g0(self):
""" Lower state density matrix for the initial condition.
Returns:
ndarray: Lower state density matrix composed of all states defined in G.
"""
return getSingleStateMatrix(self.rho_0, self.n, self.G)
@property
def rho_et(self):
""" Upper state density matrix for all of the time evolution.
Returns:
list of ndarray: A list of upper state density matrices for the time evolution.
"""
rho_et = []
flipped_rho_t = np.transpose(self.rho_t) # Flip to loop over all rho
for rho in flipped_rho_t:
new_rho = np.zeros((self.n*self.n, 1), dtype = complex) # Placeholder
for i, element in enumerate(new_rho):
new_rho[i, 0] = rho[i]
rho_et.append(getSingleStateMatrix(new_rho, self.n, self.E))
return rho_et
@property
def rho_gt(self):
""" Lower state density matrix for all of the time evolution.
Returns:
list of ndarray: A list oflower state density matrices for the time evolution.
"""
rho_et = []
flipped_rho_t = np.transpose(self.rho_t) # Flip to loop over all rho
for rho in flipped_rho_t:
new_rho = np.zeros((self.n*self.n, 1), dtype = complex) # Placeholder
for i, element in enumerate(new_rho):
new_rho[i, 0] = rho[i]
rho_et.append(getSingleStateMatrix(new_rho, self.n, self.G))
return rho_et
def __repr__(self):
E_str = [e.label for e in self.E]
G_str = [g.label for g in self.G]
return f"LaserAtomSystem({E_str}, {G_str}, {self.tau}, {self.Q}, {self.Q_decay}, {self.laser_wavelength}, {self.tau_f}, {self.laser_intensity}, {self.laser_power})"
def Rho_0(self, i, j):
"""Accessor for an element in rho_0.
Parameters:
i (State): First state index.
j (State): Second state index.
Returns:
complex: element of the laser-atom system density matrix at t=0.
Example:
print(Rho_0(one, two))
"""
row = index(i, j, self.n)
return self.rho_0[row, 0]
def setRho_0(self, i, j, value):
"""Sets a value to an element of rho_0.
Parameters:
i (State): First state index
j (State): Second state index
value (np.complex): Sets the value of rho_ij to the complex value here
"""
if(value > 1):
print("Cannot set an element of a density matrix > 1!")
return
else:
row = index(i, j, self.n)
self.rho_0[row, 0] = value
def appendDensityMatrixToRho_0(self, density_rho, state_type):
"""Sets the laser-atom system density matrix at t=0 to the matrix given.
Parameters:
density_rho (ndarray): 2D array of the system density matrix.
state_type (char): Defines whether the density matrix is for the ground or excited state. Either an "e" or "g" for excited and ground state density matrices respectively.
Note:
Density matrix input must be square and the size of the matrix must match with E or G.
"""
size = len(density_rho)
if(size == len(self.G) and state_type == "g"):
sub_states = self.G
elif(size == len(self.E) and state_type == "e"):
sub_states = self.E
else:
print("Size of density_rho does not match with state_type given.")
return
appendDensityMatrixToFlatCoupledMatrix(self.rho_0, density_rho, sub_states, self.n)
def clearRho_0(self):
"""Makes all values of rho_0 zero.
"""
self.rho_0 = np.zeros((self.n*self.n, 1), dtype = complex)
def Rho_t(self, i, j):
"""Accessor for an element in rho_t.
Parameters:
i (State): First state index
j (State): Second state index
Returns:
list: Array of an element in laser-atom system for all of the simulation time
Example:
print(Rho_t(one, two)) prints element rho_12 if one and two are State objects corresponding
to label 1 and 2 respectively.
"""
return self.rho_t[index(i, j, self.n)]
def rotateRho_0(self, alpha, beta, gamma):
""" Rotate rho_0 by the Euler angles alpha, beta, and gamma.
Parameters:
alpha: rotation (in radians) around z-axis
beta: rotation (in radians) about the y'-axis
gamma: rotation (in radians) about the z''-axis
"""
self.rho_0 = rotateFlatDensityMatrix(self.rho_0, self.n, self.E, self.G, alpha, beta, gamma)
def rotateRho_t(self, alpha, beta, gamma):
""" Rotate rho_0 by the Euler angles alpha, beta, and gamma.
Parameters:
alpha: rotation (in radians) around z-axis
beta: rotation (in radians) about the y'-axis
gamma: rotation (in radians) about the z''-axis
"""
rotated_rho_t = []
# Flip to loop over all rho
for rho in np.transpose(self.rho_t):
new_rho = np.zeros((self.n*self.n, 1), dtype = complex) # Placeholder
for i, element in enumerate(new_rho):
new_rho[i, 0] = rho[i]
new_rho = rotateFlatDensityMatrix(new_rho, self.n, self.E, self.G, alpha, beta, gamma)
rotated_rho_t.append(new_rho)
# Flip this back to the structure of rho_t
self.rho_t = np.transpose(rotated_rho_t)[0]
def timeEvolution(self, time, beam_profile_averaging = None, doppler_averaging = None,
print_eq = None, detuning = None, atomic_velocity = None, r_sigma = None,
n_beam_averaging = None, doppler_width = None, doppler_detunings = None,
pretty_print_eq = None, pretty_print_eq_tex = None,
pretty_print_eq_pdf = None, pretty_print_eq_filename = None):
"""Evolves the laser-atom system over time.
Produces a list of flattened 2D density matrices which correspond to the time array. This is stored in rho_t.
Parameters:
time (list): Array of the times in the time evolution in nanoseconds e.g. time = [0, 1, 2] to simulate up to 2 ns
beam_profile_averaging (bool): Turn on averaging over a Gaussian TEM00 laser beam profile. Must have n_beam_averaging to state the number of averages to take over the beam profile and r_sigma to characterise the standard debviation of the Gaussian beam.
doppler_averaging (bool): Turn on averaging over the doppler profile of the atoms. Must have doppler_width and doppler_detunings as well.
print_eq (bool): Turn on printing the coupled differential equatiuons numerically.
atomic_velocity (float): The velocity component of the atoms in the direction of the laser beam in metres/second. This is used for doppler shifting the energy levels.
r_sigma (float): The radial distance to the 2D standard deviation in millimetres of the Gaussian beam profile of the laser.
n_beam_averaging (int): The number of times the beam profile will be split to average over. The higher the number, the more accurate the beam profile averaging but it will be slower.
doppler_width (float): The doppler width of the beam profile in Grad/s.
doppler_detunings (list): List of the doppler detunings creating a doppler profile. This list will be averaged over. Must go from a -ve detuning to a +ve detuning. All detunings are in Grad/s.
pretty_print_eq (bool): Turn on printing of the coupled differential equations symbolically using Sympy. Only available using an IPython environment e.g. Jupyter.
pretty_print_eq_tex (bool): Produces a .tex file with the equations of motion printed symbolically using Sympy. Must input a filename for the .tex file with the keyword "pretty_print_eq_filename".
pretty_print_eq_pdf (bool): Produces a .pdf file with the equations of motion printed symbolically using Sympy and pdflatex. Must have pdflatex installed on your system. Must input a filename for the .tex file with the keyword "pretty_print_eq_filename".
Note:
Must have laser_power attribute for Gaussian averaging of the beam and must have laser_intensity attribute when not Gaussian averaging. Also, cannot print equations in doppler or Gaussian averaging.
"""
self.time = time # Save the time
n = self.n
E = self.E
G = self.G
Q = self.Q
tau = self.tau
laser_power = self.laser_power
laser_intensity = self.laser_intensity
laser_wavelength = self.laser_wavelength
rho_0 = self.rho_0
tau_f = self.tau_f
tau_b = self.tau_b
rabi_scaling = self.rabi_scaling
rabi_factors = self.rabi_factors
# If rho0 is not populated then set equal ground state populations
if(not rho_0.any()):
print("Populating ground states equally as the initial condition.")
population = 1/len(G)
for g in G:
self.setRho_0(g, g, population)
# Resize rho_t
self.rho_t = [ [0 for j in range(len(time))] for i in range(self.n*self.n)]
if((beam_profile_averaging) and (doppler_averaging)):
if(laser_power):
timeEvolutionGaussianAndDopplerAveraging(n, E, G, Q, self.Q_decay, tau, laser_power, r_sigma, n_beam_averaging,
laser_wavelength, doppler_width, doppler_detunings, time, rho_0, self.rho_t,
tau_f = tau_f, tau_b = tau_b, detuning = detuning, rabi_scaling = rabi_scaling,
rabi_factors = rabi_factors, print_eq = print_eq, atomic_velocity = atomic_velocity,
pretty_print_eq = pretty_print_eq, pretty_print_eq_tex = pretty_print_eq_tex,
pretty_print_eq_pdf = pretty_print_eq_pdf, pretty_print_eq_filename = pretty_print_eq_filename)
else:
print("Need to have laser_power attribute in LaserAtomSystem to use beam profile avergaing! Equate <LaserAtomSystem>.laser_power to a power in milliWatts.")
elif(beam_profile_averaging):
if(laser_power):
timeEvolutionGaussianAveraging(n, E, G, Q, self.Q_decay, tau, laser_power, r_sigma, n_beam_averaging, laser_wavelength,
time, rho_0, self.rho_t, tau_f = tau_f, tau_b = tau_b, detuning = detuning,
rabi_scaling = rabi_scaling, rabi_factors = rabi_factors, print_eq = print_eq,
atomic_velocity = atomic_velocity, pretty_print_eq = pretty_print_eq,
pretty_print_eq_tex = pretty_print_eq_tex, pretty_print_eq_pdf = pretty_print_eq_pdf,
pretty_print_eq_filename = pretty_print_eq_filename)
else:
print("Need to have laser_power attribute in LaserAtomSystem to use beam profile avergaing! Equate <LaserAtomSystem>.laser_power to the power of the laser in mW.")
elif(doppler_averaging):
if(laser_intensity):
timeEvolutionDopplerAveraging(n, E, G, Q, self.Q_decay, tau, laser_intensity, laser_wavelength, doppler_width,
doppler_detunings, time, rho_0, self.rho_t, tau_f = tau_f, tau_b = tau_b,
detuning = detuning, rabi_scaling = rabi_scaling, rabi_factors = rabi_factors,
print_eq = print_eq, atomic_velocity = atomic_velocity, pretty_print_eq = pretty_print_eq,
pretty_print_eq_tex = pretty_print_eq_tex, pretty_print_eq_pdf = pretty_print_eq_pdf,
pretty_print_eq_filename = pretty_print_eq_filename)
else:
print("Need to have laser_intensity attribute in LaserAtomSystem! Equate <LaserAtomSystem>.laser_intensity to the intensity of the laser in mW/mm^2.")
else:
if(laser_intensity):
timeEvolution(n, E, G, Q, self.Q_decay, tau, laser_intensity, laser_wavelength, time, rho_0, self.rho_t,
tau_f = tau_f, tau_b = tau_b, detuning = detuning, rabi_scaling = rabi_scaling,
rabi_factors = rabi_factors, print_eq = print_eq, atomic_velocity = atomic_velocity,
pretty_print_eq = pretty_print_eq, pretty_print_eq_tex = pretty_print_eq_tex,
pretty_print_eq_pdf = pretty_print_eq_pdf, pretty_print_eq_filename = pretty_print_eq_filename)
else:
print("Need to have laser_intensity attribute in LaserAtomSystem! Equate <LaserAtomSystem>.laser_intensity to the intensity of the laser in mW/mm^2.")
def saveToCSV(self, filename, precision = None):
"""Saves rho_t as a csv file.
Parameters:
filename (string): Name of the csv file created.
precision (int): Precision of numbers in decimal places.
Returns:
Void.
"""
saveRhotAsCSV(self.n, self.E, self.G, self.time, self.rho_t, filename, precision = None)
| [
"numpy.transpose",
"numpy.zeros"
] | [((3101, 3146), 'numpy.zeros', 'np.zeros', (['(self.n * self.n, 1)'], {'dtype': 'complex'}), '((self.n * self.n, 1), dtype=complex)\n', (3109, 3146), True, 'import numpy as np\n'), ((4413, 4437), 'numpy.transpose', 'np.transpose', (['self.rho_t'], {}), '(self.rho_t)\n', (4425, 4437), True, 'import numpy as np\n'), ((5040, 5064), 'numpy.transpose', 'np.transpose', (['self.rho_t'], {}), '(self.rho_t)\n', (5052, 5064), True, 'import numpy as np\n'), ((7640, 7685), 'numpy.zeros', 'np.zeros', (['(self.n * self.n, 1)'], {'dtype': 'complex'}), '((self.n * self.n, 1), dtype=complex)\n', (7648, 7685), True, 'import numpy as np\n'), ((9024, 9048), 'numpy.transpose', 'np.transpose', (['self.rho_t'], {}), '(self.rho_t)\n', (9036, 9048), True, 'import numpy as np\n'), ((4523, 4568), 'numpy.zeros', 'np.zeros', (['(self.n * self.n, 1)'], {'dtype': 'complex'}), '((self.n * self.n, 1), dtype=complex)\n', (4531, 4568), True, 'import numpy as np\n'), ((5150, 5195), 'numpy.zeros', 'np.zeros', (['(self.n * self.n, 1)'], {'dtype': 'complex'}), '((self.n * self.n, 1), dtype=complex)\n', (5158, 5195), True, 'import numpy as np\n'), ((9072, 9117), 'numpy.zeros', 'np.zeros', (['(self.n * self.n, 1)'], {'dtype': 'complex'}), '((self.n * self.n, 1), dtype=complex)\n', (9080, 9117), True, 'import numpy as np\n'), ((9435, 9462), 'numpy.transpose', 'np.transpose', (['rotated_rho_t'], {}), '(rotated_rho_t)\n', (9447, 9462), True, 'import numpy as np\n')] |
import csv
import io
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn import preprocessing
import matplotlib as mpl
import matplotlib.pyplot as plt
from google.colab import files
train = pd.read_csv(io.StringIO(uploadTrain['asteroid_training.csv'].decode('utf-8'))).replace('\n','', regex=True).values
test = pd.read_csv(io.StringIO(uploadedTest['asteroid_testing.csv'].decode('utf-8'))).replace('\n','', regex=True).values
trainX = train[:, 3:] #respective reflectance values
trainY = train[:,2] #label (composition type) of each asteroids in training set
testX = test[:, 3:] #respective reflectance values
testY = test[:,2] #label (composition type) of each asteroids in testing set
#Supervised Neural Network model. 3 Layers.
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(46, activation='relu')) #relu layer
model.add(tf.keras.layers.Dense(46, activation='relu')) #relu layer
model.add(tf.keras.layers.Dense(4, activation='softmax')) #softmax layer for prediciton
model.compile(tf.train.AdamOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
#actual training and validation
history = model.fit(trainX, trainY, epochs=200, batch_size=46,
validation_data=(testX, testY))
# basic graph of accuracy and loss.
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model Accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
print('PREDICTION TEST ON CERES ASTEROID')
def matchOutputToClassCeres():
predictions = model.predict(testX)
print(predictions[0])
predictionNum =np.argmax(predictions[0])
print(predictionNum)
print('PREDICITOIN TEST')
if (predictionNum == 0):
print('I think Ceres belong to C class')
elif (predictionNum == 1):
print('I think Ceres belong to E class')
elif (predictionNum == 2):
print('I think Ceres belong to X class')
elif (predictionNum == 3):
print('I think Ceres belong to S class')
print('TRUE VALUE')
print('Ceres belong to C class')
matchOutputToClassCeres()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"tensorflow.keras.layers.Dense",
"numpy.argmax",
"matplotlib.pyplot.legend",
"tensorflow.train.AdamOptimizer",
"tensorflow.keras.Sequential",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((771, 792), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), '()\n', (790, 792), True, 'import tensorflow as tf\n'), ((1325, 1357), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['acc']"], {}), "(history.history['acc'])\n", (1333, 1357), True, 'import matplotlib.pyplot as plt\n'), ((1358, 1394), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_acc']"], {}), "(history.history['val_acc'])\n", (1366, 1394), True, 'import matplotlib.pyplot as plt\n'), ((1395, 1422), 'matplotlib.pyplot.title', 'plt.title', (['"""Model Accuracy"""'], {}), "('Model Accuracy')\n", (1404, 1422), True, 'import matplotlib.pyplot as plt\n'), ((1423, 1445), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (1433, 1445), True, 'import matplotlib.pyplot as plt\n'), ((1446, 1465), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (1456, 1465), True, 'import matplotlib.pyplot as plt\n'), ((1466, 1513), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {'loc': '"""upper left"""'}), "(['train', 'test'], loc='upper left')\n", (1476, 1513), True, 'import matplotlib.pyplot as plt\n'), ((1514, 1524), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1522, 1524), True, 'import matplotlib.pyplot as plt\n'), ((1526, 1559), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {}), "(history.history['loss'])\n", (1534, 1559), True, 'import matplotlib.pyplot as plt\n'), ((1560, 1597), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (1568, 1597), True, 'import matplotlib.pyplot as plt\n'), ((1598, 1621), 'matplotlib.pyplot.title', 'plt.title', (['"""model loss"""'], {}), "('model loss')\n", (1607, 1621), True, 'import matplotlib.pyplot as plt\n'), ((1622, 1640), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (1632, 1640), True, 'import matplotlib.pyplot as plt\n'), ((1641, 1660), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (1651, 1660), True, 'import matplotlib.pyplot as plt\n'), ((1661, 1708), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {'loc': '"""upper left"""'}), "(['train', 'test'], loc='upper left')\n", (1671, 1708), True, 'import matplotlib.pyplot as plt\n'), ((1709, 1719), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1717, 1719), True, 'import matplotlib.pyplot as plt\n'), ((803, 847), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(46)'], {'activation': '"""relu"""'}), "(46, activation='relu')\n", (824, 847), True, 'import tensorflow as tf\n'), ((871, 915), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(46)'], {'activation': '"""relu"""'}), "(46, activation='relu')\n", (892, 915), True, 'import tensorflow as tf\n'), ((939, 985), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(4)'], {'activation': '"""softmax"""'}), "(4, activation='softmax')\n", (960, 985), True, 'import tensorflow as tf\n'), ((1031, 1060), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.001)'], {}), '(0.001)\n', (1053, 1060), True, 'import tensorflow as tf\n'), ((1873, 1898), 'numpy.argmax', 'np.argmax', (['predictions[0]'], {}), '(predictions[0])\n', (1882, 1898), True, 'import numpy as np\n')] |
import unittest
import numpy as np
import alphaBetaLab.abHighResAlphaMatrix as hram
class testAbHighResAlphaMatrix(unittest.TestCase):
def testWrapAroundDateline(self):
xs = np.arange(-180, 180, 1)
ys = np.arange(-90, 90, 1)
alphas = np.zeros((len(ys), len(xs)))
alphas[:,1] = 1
alphas[:,4] = .9
alphas[:,-1] = .7
alphas[:,356] = .5
ha = hram.abHighResAlphaMatrix(xs, ys, alphas)
self.assertEqual(360, len(ha.xs))
self.assertEqual(180, len(ha.ys))
self.assertEqual((180, 360), ha.alphas.shape)
ha.wrapAroundDateline()
self.assertEqual(370, len(ha.xs))
self.assertEqual(180, len(ha.ys))
self.assertEqual((180, 370), ha.alphas.shape)
self.assertTrue(np.all(ha.alphas[1,:10] == ha.alphas[1,-10:]))
self.assertEqual(.5, ha.alphas[10, 1])
self.assertEqual(.7, ha.alphas[20, 4])
self.assertEqual(1, ha.alphas[20, 6])
self.assertEqual(.9, ha.alphas[20, 9])
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"alphaBetaLab.abHighResAlphaMatrix.abHighResAlphaMatrix",
"numpy.arange",
"numpy.all"
] | [((969, 984), 'unittest.main', 'unittest.main', ([], {}), '()\n', (982, 984), False, 'import unittest\n'), ((182, 205), 'numpy.arange', 'np.arange', (['(-180)', '(180)', '(1)'], {}), '(-180, 180, 1)\n', (191, 205), True, 'import numpy as np\n'), ((215, 236), 'numpy.arange', 'np.arange', (['(-90)', '(90)', '(1)'], {}), '(-90, 90, 1)\n', (224, 236), True, 'import numpy as np\n'), ((374, 415), 'alphaBetaLab.abHighResAlphaMatrix.abHighResAlphaMatrix', 'hram.abHighResAlphaMatrix', (['xs', 'ys', 'alphas'], {}), '(xs, ys, alphas)\n', (399, 415), True, 'import alphaBetaLab.abHighResAlphaMatrix as hram\n'), ((716, 763), 'numpy.all', 'np.all', (['(ha.alphas[1, :10] == ha.alphas[1, -10:])'], {}), '(ha.alphas[1, :10] == ha.alphas[1, -10:])\n', (722, 763), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import murnaghan2017 as m
def main():
"""
Reads in energy data and polynomial fit parameters and passes to plot
funtion.
Currently assumes that a and c are the lattice parameters, but this
could be changed or generalized.
"""
# read data from energies.dat and poly2d_parameters.dat
energy_volume_data = np.loadtxt('energies.dat')
a = energy_volume_data[:,0]
b = energy_volume_data[:,1]
c = energy_volume_data[:,2]
vols = energy_volume_data[:,3]
E = energy_volume_data[:,4] # Hartree
with open('poly2d_parameters.dat') as poly2d_file:
printed_results = poly2d_file.readlines()
for i in range(len(printed_results)):
line = printed_results[i]
if 'E_0 (Ha):' in line:
E0 = float(line.split()[-1]) # not used?
if 'lattice parameters (Bohr):' in line:
minimum = np.array(map(float, line.split()[-2:]))
if 'polynomial coefficents' in line:
next_line = printed_results[i+1]
coeff = np.array(map(float, next_line.split()))
# plot data only
plot_data(a, c, E)
plt.title('Raw data')
plt.show(block=False)
# plot data and fit
fig, ax = plot_data(a, c, E)
plt.title('Raw data, fit, and minimum')
plot_fit(fig, ax, a, c, coeff, minimum=minimum)
plt.show(block=False)
raw_input('...')
def plot_data(x, y, z):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('a (bohr)')
ax.set_ylabel('c (bohr)')
ax.set_zlabel('energy (Ha)')
ax.scatter(x,y,z)
return fig, ax
def plot_fit(fig, ax, x, y, co, minimum=None):
"""
co: coefficients for fit
minimum: local (global?) minumum of polynomial
should be called after plot_data
"""
x_fit = np.linspace(x[0], x[-1], 100)
y_fit = np.linspace(y[0], y[-1], 100)
X,Y = np.meshgrid(x_fit, y_fit)
Z = m.poly2d(co, X, Y)
ax.plot_surface(X, Y, Z, cmap='YlGnBu', alpha=0.7)
if minimum is not None:
ax.scatter(minimum[0], minimum[1], m.poly2d(co, minimum[0], minimum[1]), color='r')
if __name__=='__main__':
main()
| [
"matplotlib.pyplot.title",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"numpy.linspace",
"murnaghan2017.poly2d"
] | [((430, 456), 'numpy.loadtxt', 'np.loadtxt', (['"""energies.dat"""'], {}), "('energies.dat')\n", (440, 456), True, 'import numpy as np\n'), ((1257, 1278), 'matplotlib.pyplot.title', 'plt.title', (['"""Raw data"""'], {}), "('Raw data')\n", (1266, 1278), True, 'import matplotlib.pyplot as plt\n'), ((1283, 1304), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (1291, 1304), True, 'import matplotlib.pyplot as plt\n'), ((1367, 1406), 'matplotlib.pyplot.title', 'plt.title', (['"""Raw data, fit, and minimum"""'], {}), "('Raw data, fit, and minimum')\n", (1376, 1406), True, 'import matplotlib.pyplot as plt\n'), ((1463, 1484), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (1471, 1484), True, 'import matplotlib.pyplot as plt\n'), ((1543, 1555), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1553, 1555), True, 'import matplotlib.pyplot as plt\n'), ((1940, 1969), 'numpy.linspace', 'np.linspace', (['x[0]', 'x[-1]', '(100)'], {}), '(x[0], x[-1], 100)\n', (1951, 1969), True, 'import numpy as np\n'), ((1982, 2011), 'numpy.linspace', 'np.linspace', (['y[0]', 'y[-1]', '(100)'], {}), '(y[0], y[-1], 100)\n', (1993, 2011), True, 'import numpy as np\n'), ((2022, 2047), 'numpy.meshgrid', 'np.meshgrid', (['x_fit', 'y_fit'], {}), '(x_fit, y_fit)\n', (2033, 2047), True, 'import numpy as np\n'), ((2056, 2074), 'murnaghan2017.poly2d', 'm.poly2d', (['co', 'X', 'Y'], {}), '(co, X, Y)\n', (2064, 2074), True, 'import murnaghan2017 as m\n'), ((2202, 2238), 'murnaghan2017.poly2d', 'm.poly2d', (['co', 'minimum[0]', 'minimum[1]'], {}), '(co, minimum[0], minimum[1])\n', (2210, 2238), True, 'import murnaghan2017 as m\n')] |
import netsquid as ns
import netsquid.components.instructions as instr
import numpy as np
from netsquid.components import QuantumProgram
from netsquid.protocols import NodeProtocol, Signals
from netsquid.nodes import Node, Network, DirectConnection
from netsquid.components.qprocessor import QuantumProcessor, PhysicalInstruction
from netsquid.components import QuantumChannel, QuantumProgram, ClassicalChannel, FibreDelayModel
class GenerateEntanglement(QuantumProgram):
"""
Program to create two qubits and entangle them.
"""
default_num_qubits = 2
def program(self):
q1, q2 = self.get_qubit_indices(2)
self.apply(instr.INSTR_INIT, [q1, q2])
self.apply(instr.INSTR_H, q1)
self.apply(instr.INSTR_CNOT, [q1, q2])
yield self.run()
class EncodeQubitProgram(QuantumProgram):
"""
Program to encode a bit according to a secret key and a basis.
"""
default_num_qubits = 1
def __init__(self, base, bit):
super().__init__()
self.base = base
self.bit = bit
def program(self):
q1, = self.get_qubit_indices(1)
self.apply(instr.INSTR_INIT, q1)
if self.bit == 1:
self.apply(instr.INSTR_X, q1)
if self.base == 1:
self.apply(instr.INSTR_H, q1)
yield self.run()
class KeyReceiverProtocol(NodeProtocol):
"""
Protocol for the receiver of the key.
"""
def __init__(self, node, key_size=10, port_names=("qubitIO", "classicIO")):
super().__init__(node)
self.node = node
self.q_port = port_names[0]
self.c_port = port_names[1]
self.key_size = key_size
self.key = None
def run(self):
# Select random bases
bases = 1 + np.random.randint(3, size=self.key_size)
results = []
for i in range(self.key_size):
# Await a qubit from Alice
yield self.await_port_input(self.node.ports[self.q_port])
# Measure in random basis
if bases[i] == 1:
res = self.node.qmemory.execute_instruction(instr.INSTR_MEASURE_X, output_key="M")
if bases[i] == 2:
res = self.node.qmemory.execute_instruction(instr.INSTR_MEASURE, output_key="M")
if bases[i] == 3:
res = self.node.qmemory.execute_instruction(instr.INSTR_MEASURE_X, output_key="M")
yield self.await_program(self.node.qmemory)
if bases[i] == 3:
results.append(1 - res[0]['M'][0])
if bases[i] == 2:
results.append(1 - res[0]['M'][0])
if bases[i] == 1:
results.append(res[0]['M'][0])
self.node.qmemory.reset()
# Send ACK to Alice to trigger next qubit send (except in last transmit)
if i < self.key_size - 1:
self.node.ports[self.c_port].tx_output('ACK')
# All qubits arrived, send bases
self.node.ports[self.c_port].tx_output(bases)
# Await matched indices from Alice and process key
yield self.await_port_input(self.node.ports[self.c_port])
matched_indices = self.node.ports[self.c_port].rx_input().items
final_key = []
for i in matched_indices:
final_key.append(results[i])
self.key = final_key
self.send_signal(signal_label=Signals.SUCCESS, result=final_key)
class InitStateProgram(QuantumProgram):
"""
Program to create a qubit and transform it to the |1> state.
"""
default_num_qubits = 2
def program(self):
q1, q2, = self.get_qubit_indices(2)
self.apply(instr.INSTR_INIT, q1)
self.apply(instr.INSTR_X, q1)
yield self.run()
class KeySenderProtocol(NodeProtocol):
"""
Protocol for the sender of the key.
"""
def __init__(self, node, key_size=10, port_names=("qubitIO", "classicIO")):
super().__init__(node)
self.node = node
self.q_port = port_names[0]
self.c_port = port_names[1]
self.key_size = key_size
self.key = None
def run(self):
secret_key = np.random.randint(2, size=self.key_size)
bases = list(np.random.randint(3, size=self.key_size))
results = []
# Transmit encoded qubits to Bob
for i, bit in enumerate(secret_key):
self.node.qmemory.execute_program(GenerateEntanglement(), qubit_mapping=[0, 1])
yield self.await_program(self.node.qmemory)
q = self.node.qmemory.pop(1)
self.node.ports[self.q_port].tx_output(q)
if bases[i] == 0:
res = self.node.qmemory.execute_instruction(instr.INSTR_MEASURE, output_key="M")
if bases[i] == 1:
res = self.node.qmemory.execute_instruction(instr.INSTR_MEASURE_X, output_key="M")
if bases[i] == 2:
res = self.node.qmemory.execute_instruction(instr.INSTR_MEASURE, output_key="M")
yield self.await_program(self.node.qmemory)
if bases[i] == 2:
results.append(1 - res[0]['M'][0])
else:
results.append(res[0]['M'][0])
if i < self.key_size - 1:
yield self.await_port_input(self.node.ports[self.c_port])
# Await response from Bob
yield self.await_port_input(self.node.ports[self.c_port])
bob_bases = self.node.ports[self.c_port].rx_input().items[0]
matched_indices = []
for i in range(self.key_size):
if bob_bases[i] == bases[i]:
matched_indices.append(i)
self.node.ports[self.c_port].tx_output(matched_indices)
final_key = []
for i in matched_indices:
final_key.append(results[i])
self.key = final_key
self.send_signal(signal_label=Signals.SUCCESS, result=final_key)
def create_processor():
"""Factory to create a quantum processor for each end node.
Has three memory positions and the physical instructions necessary
for teleportation.
"""
physical_instructions = [
PhysicalInstruction(instr.INSTR_INIT, duration=3, parallel=True),
PhysicalInstruction(instr.INSTR_H, duration=1, parallel=True),
PhysicalInstruction(instr.INSTR_X, duration=1, parallel=True),
PhysicalInstruction(instr.INSTR_Z, duration=1, parallel=True),
PhysicalInstruction(instr.INSTR_CNOT, duration=1, parallel=True),
PhysicalInstruction(instr.INSTR_MEASURE, duration=7, parallel=False),
PhysicalInstruction(instr.INSTR_MEASURE_X, duration=10, parallel=False)
]
processor = QuantumProcessor("quantum_processor",
num_positions=5,
phys_instructions=physical_instructions)
return processor
def generate_network():
"""
Generate the network. For BB84, we need a quantum and classical channel.
"""
network = Network("BB92etwork")
alice = Node("alice", qmemory=create_processor())
bob = Node("bob", qmemory=create_processor())
network.add_nodes([alice, bob])
p_ab, p_ba = network.add_connection(alice,
bob,
label="q_chan",
channel_to=QuantumChannel('AqB', delay=10),
channel_from=QuantumChannel('BqA', delay=10),
port_name_node1="qubitIO",
port_name_node2="qubitIO")
# Map the qubit input port from the above channel to the memory index 0 on Bob"s
# side
alice.ports[p_ab].forward_input(alice.qmemory.ports["qin0"])
bob.ports[p_ba].forward_input(bob.qmemory.ports["qin0"])
network.add_connection(alice,
bob,
label="c_chan",
channel_to=ClassicalChannel('AcB', delay=10),
channel_from=ClassicalChannel('BcA', delay=10),
port_name_node1="classicIO",
port_name_node2="classicIO")
return network
if __name__ == '__main__':
n = generate_network()
node_a = n.get_node("alice")
node_b = n.get_node("bob")
p1 = KeySenderProtocol(node_a, key_size=100)
p2 = KeyReceiverProtocol(node_b, key_size=100)
p1.start()
p2.start()
# ns.logger.setLevel(4)
stats = ns.sim_run()
print(len(p1.key))
print(p1.key)
print(p2.key)
| [
"netsquid.sim_run",
"netsquid.components.ClassicalChannel",
"netsquid.components.qprocessor.PhysicalInstruction",
"netsquid.components.qprocessor.QuantumProcessor",
"netsquid.components.QuantumChannel",
"numpy.random.randint",
"netsquid.nodes.Network"
] | [((6824, 6924), 'netsquid.components.qprocessor.QuantumProcessor', 'QuantumProcessor', (['"""quantum_processor"""'], {'num_positions': '(5)', 'phys_instructions': 'physical_instructions'}), "('quantum_processor', num_positions=5, phys_instructions=\n physical_instructions)\n", (6840, 6924), False, 'from netsquid.components.qprocessor import QuantumProcessor, PhysicalInstruction\n'), ((7152, 7173), 'netsquid.nodes.Network', 'Network', (['"""BB92etwork"""'], {}), "('BB92etwork')\n", (7159, 7173), False, 'from netsquid.nodes import Node, Network, DirectConnection\n'), ((8710, 8722), 'netsquid.sim_run', 'ns.sim_run', ([], {}), '()\n', (8720, 8722), True, 'import netsquid as ns\n'), ((4267, 4307), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': 'self.key_size'}), '(2, size=self.key_size)\n', (4284, 4307), True, 'import numpy as np\n'), ((6283, 6347), 'netsquid.components.qprocessor.PhysicalInstruction', 'PhysicalInstruction', (['instr.INSTR_INIT'], {'duration': '(3)', 'parallel': '(True)'}), '(instr.INSTR_INIT, duration=3, parallel=True)\n', (6302, 6347), False, 'from netsquid.components.qprocessor import QuantumProcessor, PhysicalInstruction\n'), ((6358, 6419), 'netsquid.components.qprocessor.PhysicalInstruction', 'PhysicalInstruction', (['instr.INSTR_H'], {'duration': '(1)', 'parallel': '(True)'}), '(instr.INSTR_H, duration=1, parallel=True)\n', (6377, 6419), False, 'from netsquid.components.qprocessor import QuantumProcessor, PhysicalInstruction\n'), ((6430, 6491), 'netsquid.components.qprocessor.PhysicalInstruction', 'PhysicalInstruction', (['instr.INSTR_X'], {'duration': '(1)', 'parallel': '(True)'}), '(instr.INSTR_X, duration=1, parallel=True)\n', (6449, 6491), False, 'from netsquid.components.qprocessor import QuantumProcessor, PhysicalInstruction\n'), ((6502, 6563), 'netsquid.components.qprocessor.PhysicalInstruction', 'PhysicalInstruction', (['instr.INSTR_Z'], {'duration': '(1)', 'parallel': '(True)'}), '(instr.INSTR_Z, duration=1, parallel=True)\n', (6521, 6563), False, 'from netsquid.components.qprocessor import QuantumProcessor, PhysicalInstruction\n'), ((6574, 6638), 'netsquid.components.qprocessor.PhysicalInstruction', 'PhysicalInstruction', (['instr.INSTR_CNOT'], {'duration': '(1)', 'parallel': '(True)'}), '(instr.INSTR_CNOT, duration=1, parallel=True)\n', (6593, 6638), False, 'from netsquid.components.qprocessor import QuantumProcessor, PhysicalInstruction\n'), ((6649, 6717), 'netsquid.components.qprocessor.PhysicalInstruction', 'PhysicalInstruction', (['instr.INSTR_MEASURE'], {'duration': '(7)', 'parallel': '(False)'}), '(instr.INSTR_MEASURE, duration=7, parallel=False)\n', (6668, 6717), False, 'from netsquid.components.qprocessor import QuantumProcessor, PhysicalInstruction\n'), ((6728, 6799), 'netsquid.components.qprocessor.PhysicalInstruction', 'PhysicalInstruction', (['instr.INSTR_MEASURE_X'], {'duration': '(10)', 'parallel': '(False)'}), '(instr.INSTR_MEASURE_X, duration=10, parallel=False)\n', (6747, 6799), False, 'from netsquid.components.qprocessor import QuantumProcessor, PhysicalInstruction\n'), ((1826, 1866), 'numpy.random.randint', 'np.random.randint', (['(3)'], {'size': 'self.key_size'}), '(3, size=self.key_size)\n', (1843, 1866), True, 'import numpy as np\n'), ((4330, 4370), 'numpy.random.randint', 'np.random.randint', (['(3)'], {'size': 'self.key_size'}), '(3, size=self.key_size)\n', (4347, 4370), True, 'import numpy as np\n'), ((7522, 7553), 'netsquid.components.QuantumChannel', 'QuantumChannel', (['"""AqB"""'], {'delay': '(10)'}), "('AqB', delay=10)\n", (7536, 7553), False, 'from netsquid.components import QuantumChannel, QuantumProgram, ClassicalChannel, FibreDelayModel\n'), ((7609, 7640), 'netsquid.components.QuantumChannel', 'QuantumChannel', (['"""BqA"""'], {'delay': '(10)'}), "('BqA', delay=10)\n", (7623, 7640), False, 'from netsquid.components import QuantumChannel, QuantumProgram, ClassicalChannel, FibreDelayModel\n'), ((8155, 8188), 'netsquid.components.ClassicalChannel', 'ClassicalChannel', (['"""AcB"""'], {'delay': '(10)'}), "('AcB', delay=10)\n", (8171, 8188), False, 'from netsquid.components import QuantumChannel, QuantumProgram, ClassicalChannel, FibreDelayModel\n'), ((8231, 8264), 'netsquid.components.ClassicalChannel', 'ClassicalChannel', (['"""BcA"""'], {'delay': '(10)'}), "('BcA', delay=10)\n", (8247, 8264), False, 'from netsquid.components import QuantumChannel, QuantumProgram, ClassicalChannel, FibreDelayModel\n')] |
#!/usr/bin/python
"""
Builder Class generic Male-Female comparison plot
"""
## MIT License
##
## Copyright (c) 2017, <NAME>
## Permission is hereby granted, free of charge, to any person obtaining a ## copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
## The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__author__ = '<NAME>'
__copyright__ = 'copyright '
__credits__ = ['<NAME>']
__license__ = "MIT"
__version__ = ''
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = ''
from .abcOverallPlotBuilder import abcOverallPlotBuilder
import numpy as np
class BuilderGenericOverallMFPlot(abcOverallPlotBuilder):
def draw_lines(self):
for k in range(self.coordinates['number_of_models']):
self.plot.line(self.coordinates['xval'],
self.coordinates['male_yval'][k],
line_width = self.settings['line_width'],
legend = self.settings['mf_male_label'][k],
line_color = self.settings['mf_male_color'][k])
self.plot.circle(self.coordinates['xval'],
self.coordinates['male_yval'][k],
size = 3,
line_color= self.settings['mf_male_color'][k],
legend= self.settings['mf_male_label'][k])
self.plot.line(self.coordinates['xval'],
self.coordinates['female_yval'][k],
line_width = self.settings['line_width'],
legend = self.settings['mf_female_label'][k],
line_color = self.settings['mf_female_color'][k])
self.plot.circle(self.coordinates['xval'],
self.coordinates['female_yval'][k],
size = 3,
line_color=self.settings['mf_female_color'][k],
legend= self.settings['mf_female_label'][k])
self.plot.legend.click_policy = "hide"
def draw_error_intervals(self):
for k in range(self.coordinates['number_of_models']):
x_data = np.asarray(self.coordinates['xval'])
band_x = np.append(x_data, x_data[::-1])
m_band_y = np.append(self.coordinates['male_empirical_lower_bound'][k],
self.coordinates['male_empirical_upper_bound'][k][::-1])
f_band_y = np.append(self.coordinates['female_empirical_lower_bound'][k],
self.coordinates['female_empirical_upper_bound'][k][::-1])
self.plot.patch(band_x,
m_band_y,
color= self.settings['mf_male_color'][k],
legend=self.settings['mf_male_label'][k],
fill_alpha= self.settings['transparency'])
self.plot.patch(band_x,
f_band_y,
color= self.settings['mf_female_color'][k],
legend=self.settings['mf_female_label'][k],
fill_alpha= self.settings['transparency'])
def draw_data_lines(self):
self.plot.line(self.coordinates['xval'],
self.coordinates['male_ground_truth'],
line_color=self.settings['mf_male_color'][0],
legend=self.settings['mf_male_data_label'][0],
line_width=self.settings['line_width'],
line_dash=self.settings['data_line_style'])
self.plot.line(self.coordinates['xval'],
self.coordinates['female_ground_truth'],
line_color=self.settings['mf_female_color'][0],
legend=self.settings['mf_female_data_label'][0],
line_width=self.settings['line_width'],
line_dash=self.settings['data_line_style'])
def draw_target(self):
pass
def draw_misc(self):
pass
if __name__ == "__main__":
print('This is an abstract base class for building plots')
| [
"numpy.append",
"numpy.asarray"
] | [((3006, 3042), 'numpy.asarray', 'np.asarray', (["self.coordinates['xval']"], {}), "(self.coordinates['xval'])\n", (3016, 3042), True, 'import numpy as np\n'), ((3064, 3095), 'numpy.append', 'np.append', (['x_data', 'x_data[::-1]'], {}), '(x_data, x_data[::-1])\n', (3073, 3095), True, 'import numpy as np\n'), ((3119, 3241), 'numpy.append', 'np.append', (["self.coordinates['male_empirical_lower_bound'][k]", "self.coordinates['male_empirical_upper_bound'][k][::-1]"], {}), "(self.coordinates['male_empirical_lower_bound'][k], self.\n coordinates['male_empirical_upper_bound'][k][::-1])\n", (3128, 3241), True, 'import numpy as np\n'), ((3289, 3415), 'numpy.append', 'np.append', (["self.coordinates['female_empirical_lower_bound'][k]", "self.coordinates['female_empirical_upper_bound'][k][::-1]"], {}), "(self.coordinates['female_empirical_lower_bound'][k], self.\n coordinates['female_empirical_upper_bound'][k][::-1])\n", (3298, 3415), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
stars = np.genfromtxt("planetfits_revised.csv", delimiter=",", names=True, dtype=None)
#nplanets = [(1,3), (1,4), (4,4)]
nplanets = [(1,6), (5,6), (5,5), (6,6)]
# iterate over: [5,e,f,n][n,p] fits, and 1-3, 4, 5, 6, 1-4, 5-6, 1-6 planet systems?
for minplanets, maxplanets in nplanets:
nn_yes_per = [star["per"] for star in stars if ((star["nn_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
nn_yes_a = [star["a"] for star in stars if ((star["nn_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
nn_yes_k = [star["K"] for star in stars if ((star["nn_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
nn_yes_mass = [star["PlanetMass"] for star in stars if ((star["nn_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
nn_no_per = [star["per"] for star in stars if ((star["nn_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
nn_no_a = [star["a"] for star in stars if ((star["nn_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
nn_no_k= [star["K"] for star in stars if ((star["nn_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
nn_no_mass = [star["PlanetMass"] for star in stars if ((star["nn_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
nn_mar_per = [star["per"] for star in stars if ((star["nn_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
nn_mar_a = [star["a"] for star in stars if ((star["nn_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
nn_mar_k = [star["K"] for star in stars if ((star["nn_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
nn_mar_mass = [star["PlanetMass"] for star in stars if ((star["nn_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
fig, nnam = plt.subplots()
nnam.scatter(stars["a"], stars["PlanetMass"], color="gray", s=1)
nnam.scatter(nn_yes_a, nn_yes_mass, label="Recovered", color="blue")
nnam.scatter(nn_mar_a, nn_mar_mass, label="Marginal", color="red")
nnam.scatter(nn_no_a, nn_no_mass, label="Excluded", color="black")
#nnam.plot(xs, y1, label="Earth density")
nnam.set_xscale('log')
nnam.set_yscale('log')
nnam.set_xlabel("Semi-Major Axis (au)")
nnam.set_xlim(6e-2,3e1)
nnam.set_ylabel("Mass (Earth-Masses)")
nnam.set_ylim(8e-2,1e4)
nnam.set_title("Fitting: Period, K, Time of Conjunction; ecc = 0 ("+str(minplanets)+"-"+str(maxplanets)+" planet systems)")
nnam.legend(loc=2)
filename = "nnam" + str(minplanets) + str(maxplanets) + ".png"
plt.savefig(filename)
fig, nnpk = plt.subplots()
nnpk.scatter(stars["per"], stars["K"], color="gray", s=1)
nnpk.scatter(nn_yes_per, nn_yes_k, label="Recovered", color="blue")
nnpk.scatter(nn_mar_per, nn_mar_k, label="Marginal", color="red")
nnpk.scatter(nn_no_per, nn_no_k, label="Excluded", color="black")
#nnpk.plot(xs, y1, label="Earth density")
nnpk.set_xscale('log')
nnpk.set_yscale('log')
nnpk.set_ylabel("Semi-Amplitude (m/s)")
nnpk.set_ylim(8e-4,2e2)
nnpk.set_xlabel("Period (Days)")
nnpk.set_xlim(6,1e5)
nnpk.set_title("Fitting: Period, K, Time of Conjunction; ecc = 0 ("+str(minplanets)+"-"+str(maxplanets)+" planet systems)")
nnpk.legend(loc=2)
filename = "nnpk" + str(minplanets) + str(maxplanets) + ".png"
plt.savefig(filename)
#plt.show()
en_yes_per = [star["per"] for star in stars if ((star["en_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
en_yes_a = [star["a"] for star in stars if ((star["en_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
en_yes_k = [star["K"] for star in stars if ((star["en_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
en_yes_mass = [star["PlanetMass"] for star in stars if ((star["en_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
en_no_per = [star["per"] for star in stars if ((star["en_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
en_no_a = [star["a"] for star in stars if ((star["en_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
en_no_k= [star["K"] for star in stars if ((star["en_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
en_no_mass = [star["PlanetMass"] for star in stars if ((star["en_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
en_mar_per = [star["per"] for star in stars if ((star["en_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
en_mar_a = [star["a"] for star in stars if ((star["en_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
en_mar_k = [star["K"] for star in stars if ((star["en_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
en_mar_mass = [star["PlanetMass"] for star in stars if ((star["en_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
fig, enam = plt.subplots()
enam.scatter(stars["a"], stars["PlanetMass"], color="gray", s=1)
enam.scatter(en_yes_a, en_yes_mass, label="Recovered", color="blue")
enam.scatter(en_mar_a, en_mar_mass, label="Marginal", color="red")
enam.scatter(en_no_a, en_no_mass, label="Excluded", color="black")
#enam.plot(xs, y1, label="Earth density")
enam.set_xscale('log')
enam.set_yscale('log')
enam.set_xlabel("Semi-Major Axis (au)")
enam.set_xlim(6e-2,3e1)
enam.set_ylabel("Mass (Earth-Masses)")
enam.set_ylim(8e-2,1e4)
enam.set_title("Fitting: Period, K, Time of Conjunction ("+str(minplanets)+"-"+str(maxplanets)+" planet systems)")
enam.legend(loc=2)
filename = "enam" + str(minplanets) + str(maxplanets) + ".png"
plt.savefig(filename)
fig, enpk = plt.subplots()
enpk.scatter(stars["per"], stars["K"], color="gray", s=1)
enpk.scatter(en_yes_per, en_yes_k, label="Recovered", color="blue")
enpk.scatter(en_mar_per, en_mar_k, label="Marginal", color="red")
enpk.scatter(en_no_per, en_no_k, label="Excluded", color="black")
#enpk.plot(xs, y1, label="Earth density")
enpk.set_xscale('log')
enpk.set_yscale('log')
enpk.set_ylabel("Semi-Amplitude (m/s)")
enpk.set_ylim(8e-4,2e2)
enpk.set_xlabel("Period (Days)")
enpk.set_xlim(6,1e5)
enpk.set_title("Fitting: Period, K, Time of Conjunction ("+str(minplanets)+"-"+str(maxplanets)+" planet systems)")
enpk.legend(loc=2)
filename = "enpk" + str(minplanets) + str(maxplanets) + ".png"
plt.savefig(filename)
#plt.show()
f5n_yes_per = [star["per"] for star in stars if ((star["5n_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
f5n_yes_a = [star["a"] for star in stars if ((star["5n_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
f5n_yes_k = [star["K"] for star in stars if ((star["5n_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
f5n_yes_mass = [star["PlanetMass"] for star in stars if ((star["5n_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
f5n_no_per = [star["per"] for star in stars if ((star["5n_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
f5n_no_a = [star["a"] for star in stars if ((star["5n_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
f5n_no_k= [star["K"] for star in stars if ((star["5n_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
f5n_no_mass = [star["PlanetMass"] for star in stars if ((star["5n_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
f5n_mar_per = [star["per"] for star in stars if ((star["5n_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
f5n_mar_a = [star["a"] for star in stars if ((star["5n_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
f5n_mar_k = [star["K"] for star in stars if ((star["5n_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
f5n_mar_mass = [star["PlanetMass"] for star in stars if ((star["5n_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
fig, f5nam = plt.subplots()
f5nam.scatter(stars["a"], stars["PlanetMass"], color="gray", s=1)
f5nam.scatter(f5n_yes_a, f5n_yes_mass, label="Recovered", color="blue")
f5nam.scatter(f5n_mar_a, f5n_mar_mass, label="Marginal", color="red")
f5nam.scatter(f5n_no_a, f5n_no_mass, label="Excluded", color="black")
#f5nam.plot(xs, y1, label="Earth density")
f5nam.set_xscale('log')
f5nam.set_yscale('log')
f5nam.set_xlabel("Semi-Major Axis (au)")
f5nam.set_xlim(6e-2,3e1)
f5nam.set_ylabel("Mass (Earth-Masses)")
f5nam.set_ylim(8e-2,1e4)
f5nam.set_title("Fitting: Period, K, Time of Conjunction, Ecc (max 0.5) ("+str(minplanets)+"-"+str(maxplanets)+" planet systems)")
f5nam.legend(loc=2)
filename = "5nam" + str(minplanets) + str(maxplanets) + ".png"
plt.savefig(filename)
fig, f5npk = plt.subplots()
f5npk.scatter(stars["per"], stars["K"], color="gray", s=1)
f5npk.scatter(f5n_yes_per, f5n_yes_k, label="Recovered", color="blue")
f5npk.scatter(f5n_mar_per, f5n_mar_k, label="Marginal", color="red")
f5npk.scatter(f5n_no_per, f5n_no_k, label="Excluded", color="black")
#f5npk.plot(xs, y1, label="Earth density")
f5npk.set_xscale('log')
f5npk.set_yscale('log')
f5npk.set_ylabel("Semi-Amplitude (m/s)")
f5npk.set_ylim(8e-4,2e2)
f5npk.set_xlabel("Period (Days)")
f5npk.set_xlim(6,1e5)
f5npk.set_title("Fitting: Period, K, Time of Conjunction, Ecc (max 0.5) ("+str(minplanets)+"-"+str(maxplanets)+" planet systems)")
f5npk.legend(loc=2)
filename = "5npk" + str(minplanets) + str(maxplanets) + ".png"
plt.savefig(filename)
fn_yes_per = [star["per"] for star in stars if ((star["fn_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
fn_yes_a = [star["a"] for star in stars if ((star["fn_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
fn_yes_k = [star["K"] for star in stars if ((star["fn_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
fn_yes_mass = [star["PlanetMass"] for star in stars if ((star["fn_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
fn_no_per = [star["per"] for star in stars if ((star["fn_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
fn_no_a = [star["a"] for star in stars if ((star["fn_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
fn_no_k= [star["K"] for star in stars if ((star["fn_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
fn_no_mass = [star["PlanetMass"] for star in stars if ((star["fn_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
fn_no_mass = [star["PlanetMass"] for star in stars if ((star["fn_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
fn_mar_per = [star["per"] for star in stars if ((star["fn_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
fn_mar_a = [star["a"] for star in stars if ((star["fn_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
fn_mar_k = [star["K"] for star in stars if ((star["fn_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
fn_mar_mass = [star["PlanetMass"] for star in stars if ((star["fn_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))]
fig, fnam = plt.subplots()
fnam.scatter(stars["a"], stars["PlanetMass"], color="gray", s=1)
fnam.scatter(fn_yes_a, fn_yes_mass, label="Recovered", color="blue")
fnam.scatter(fn_mar_a, fn_mar_mass, label="Marginal", color="red")
fnam.scatter(fn_no_a, fn_no_mass, label="Excluded", color="black")
#fnam.plot(xs, y1, label="Earth density")
fnam.set_xscale('log')
fnam.set_yscale('log')
fnam.set_xlabel("Semi-Major Axis (au)")
fnam.set_xlim(6e-2,3e1)
fnam.set_ylabel("Mass (Earth-Masses)")
fnam.set_ylim(8e-2,1e4)
fnam.set_title("Fitting: Period, K, Time of Conjunction, Ecc ("+str(minplanets)+"-"+str(maxplanets)+" planet systems)")
fnam.legend(loc=2)
filename = "fnam" + str(minplanets) + str(maxplanets) + ".png"
plt.savefig(filename)
fig, fnpk = plt.subplots()
fnpk.scatter(stars["per"], stars["K"], color="gray", s=1)
fnpk.scatter(fn_yes_per, fn_yes_k, label="Recovered", color="blue")
fnpk.scatter(fn_mar_per, fn_mar_k, label="Marginal", color="red")
fnpk.scatter(fn_no_per, fn_no_k, label="Excluded", color="black")
#fnpk.plot(xs, y1, label="Earth density")
fnpk.set_xscale('log')
fnpk.set_yscale('log')
fnpk.set_ylabel("Semi-Amplitude (m/s)")
fnpk.set_ylim(8e-4,2e2)
fnpk.set_xlabel("Period (Days)")
fnpk.set_xlim(6,1e5)
fnpk.set_title("Fitting: Period, K, Time of Conjunction, Ecc ("+str(minplanets)+"-"+str(maxplanets)+" planet systems)")
fnpk.legend(loc=2)
filename = "fnpk" + str(minplanets) + str(maxplanets) + ".png"
plt.savefig(filename)
| [
"matplotlib.pyplot.subplots",
"numpy.genfromtxt",
"matplotlib.pyplot.savefig"
] | [((60, 138), 'numpy.genfromtxt', 'np.genfromtxt', (['"""planetfits_revised.csv"""'], {'delimiter': '""","""', 'names': '(True)', 'dtype': 'None'}), "('planetfits_revised.csv', delimiter=',', names=True, dtype=None)\n", (73, 138), True, 'import numpy as np\n'), ((2114, 2128), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2126, 2128), True, 'import matplotlib.pyplot as plt\n'), ((2833, 2854), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (2844, 2854), True, 'import matplotlib.pyplot as plt\n'), ((2870, 2884), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2882, 2884), True, 'import matplotlib.pyplot as plt\n'), ((3570, 3591), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (3581, 3591), True, 'import matplotlib.pyplot as plt\n'), ((5385, 5399), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5397, 5399), True, 'import matplotlib.pyplot as plt\n'), ((6095, 6116), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (6106, 6116), True, 'import matplotlib.pyplot as plt\n'), ((6132, 6146), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6144, 6146), True, 'import matplotlib.pyplot as plt\n'), ((6823, 6844), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (6834, 6844), True, 'import matplotlib.pyplot as plt\n'), ((8651, 8665), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8663, 8665), True, 'import matplotlib.pyplot as plt\n'), ((9395, 9416), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (9406, 9416), True, 'import matplotlib.pyplot as plt\n'), ((9433, 9447), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9445, 9447), True, 'import matplotlib.pyplot as plt\n'), ((10158, 10179), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (10169, 10179), True, 'import matplotlib.pyplot as plt\n'), ((12112, 12126), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (12124, 12126), True, 'import matplotlib.pyplot as plt\n'), ((12827, 12848), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (12838, 12848), True, 'import matplotlib.pyplot as plt\n'), ((12864, 12878), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (12876, 12878), True, 'import matplotlib.pyplot as plt\n'), ((13560, 13581), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (13571, 13581), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 28 10:53:05 2018
@author: <NAME>
"""
#
import glob
import cv2
from PIL import Image
import os
import sys
import dlib
from skimage import io
from imutils import face_utils
import pandas as pd
import numpy as np
#################################With Video#######################################
imgEye = cv2.imread("/home/senscript/Music/Face-Overlay-AR-master/eye3.png",-1)
cv2.imshow("eye",imgEye)
orig_mask = imgEye[:,:,3]
cv2.imshow("orig_mask", orig_mask)
orig_mask_inv = cv2.bitwise_not(orig_mask)
imgEye = imgEye[:,:,0:3]
predictor = dlib.shape_predictor("predictor_eye_combine_landmarks.dat")
detector = dlib.simple_object_detector("detector_eye.svm1")
detector2 = dlib.get_frontal_face_detector()
# win_det = dlib.image_window()
# win_det.set_image(detector)
## Now let's run the detector and shape_predictor over the images in the faces
## folder and display the results.
# cap = cv2.VideoCapture("/home/ananthu/Downloads/outpy.avi")
frame = cv2.imread("/home/senscript/Desktop/FaceSwap_last/face_swapping/hh.jpg")
frame1=frame.copy()
# im_pil = Image.fromarray(cap)
i = 0
# ret, frame = cap.read()
# print(frame)
# frame = cv2.imread("/home/ananthu/Desktop/Amar.jpg")
dets2 = detector2(frame)
dets = detector(frame)
print("*******", dets)
# print("Number of pair of eyes detected: {}".format(len(dets)))
# print("Number of faces detected: {}".format(len(dets2)))
def place_eye(crop_img1, crop_img2):
# eyeOverlayHeight=14
# eyeOverlayWidth=14
eyeOverlayHeight, eyeOverlayWidth, channels = crop_img1.shape
# print("sdfsadfadf",eyeOverlayHeight,eyeOverlayWidth)
# h, w, c = imgEye.shape[:3]
# print("**", h, w, c)
eyeOverlay = cv2.resize(imgEye, (eyeOverlayWidth, eyeOverlayHeight), interpolation=cv2.INTER_AREA)
# cv2.imshow("eyeOverlay", eyeOverlay)
# print("######", eyeOverlay.shape)
mask = cv2.resize(orig_mask, (eyeOverlayWidth, eyeOverlayHeight), interpolation=cv2.INTER_AREA)
mask_inv = cv2.resize(orig_mask_inv, (eyeOverlayWidth, eyeOverlayHeight), interpolation=cv2.INTER_AREA)
mask_inv = cv2.cvtColor(mask_inv, cv2.COLOR_GRAY2BGR)
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
roi = frame[x1:x2, y1:y2]
face_part = (roi * (1 / 255.0)) * (mask_inv * (1 / 255.0))
overlay_part = (eyeOverlay * (1 / 255.0)) * (mask * (1 / 255.0))
cv2.imshow("roi", mask)
height, width, channel = eyeOverlay.shape[:3]
# print("roi shape :::: ", roi.shape)
# roi_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)
cv2.imwrite("mask.jpg",roi)
cv2.imshow("mask", roi)
# print("roi_bg shape :::: ", roi_bg.shape)
# roi_fg= cv2.bitwise_and(eyeOverlay, eyeOverlay, mask=mask)
# roi_fg = cv2.fastNlMeansDenoisingColored(roi_fg1, None, 20, 20, 10, 21)
# cv2.imshow("fg", roi_fg)
dst=cv2.addWeighted(face_part, 255.0, overlay_part, 255.0, 0.0)
# cv2.imshow("dst", dst)
frame[(shape[0][1] + 3) - int(0.1 * einter):(shape[0][1] + 3) + int(0.1 * einter),
shape[0][0] - int(0.1 * einter):shape[0][0] + int(0.1 * einter)] = dst
################################################################################################################
eyeOverlayHeight2, eyeOverlayWidth2, channels2 = crop_img2.shape
# print("sdfsadfadf", eyeOverlayHeight, eyeOverlayWidth)
# h, w, c = imgEye.shape[:3]
# print("**", h, w, c)
eyeOverlay2 = cv2.resize(imgEye, (eyeOverlayWidth2, eyeOverlayHeight2), interpolation=cv2.INTER_AREA)
# cv2.imshow("eyeOverlay", eyeOverlay)
# print("######", eyeOverlay.shape)
mask2 = cv2.resize(orig_mask, (eyeOverlayWidth2, eyeOverlayHeight2), interpolation=cv2.INTER_AREA)
mask_inv2 = cv2.resize(orig_mask_inv, (eyeOverlayWidth2, eyeOverlayHeight2), interpolation=cv2.INTER_AREA)
mask_inv2 = cv2.cvtColor(mask_inv2, cv2.COLOR_GRAY2BGR)
mask2 = cv2.cvtColor(mask2, cv2.COLOR_GRAY2BGR)
roi2 = frame[xx1:xx2, yy1:yy2]
cv2.imshow("roi", roi)
face_part2 = (roi2 * (1 / 255.0)) * (mask_inv2 * (1 / 255.0))
overlay_part2 = (eyeOverlay2 * (1 / 255.0)) * (mask2 * (1 / 255.0))
height, width, channel = eyeOverlay.shape[:3]
# print("roi shape :::: ", roi.shape)
# roi_bg2 = cv2.bitwise_and(roi2, roi2, mask=mask_inv2)
cv2.imshow("mask", face_part2)
# print("roi_bg shape :::: ", roi_bg.shape)
# roi_fg2 = cv2.add(roi_fg2t, np.array([30.0]))
# roi_fg = cv2.bitwise_and(eyeOverlay2, eyeOverlay2, mask=mask2)
# cv2.imshow("fg", roi_fg)
dst2 = cv2.addWeighted(face_part2, 255.0, overlay_part2, 255.0, 0.0)
cv2.imshow("dst", overlay_part2)
#
frame[(shape[1][1] + 3) - int(0.1 * einter):(shape[1][1] + 3) + int(0.1 * einter),
shape[1][0] - int(0.1 * einter):shape[1][0] + int(0.1 * einter)] = dst2
return frame
if (dets):
for k, d in enumerate(dets):
print("kd", k, d)
# print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(k, d.left(), d.top(), d.right(),
# d.bottom()))
shape = predictor(frame, d)
# print("Shape : ", shape)
# print("Part 0: {}, Part 1: {} ...".format(shape.part(0), shape.part(1)))
shape = face_utils.shape_to_np(shape)
# print("Shapeeee : ", shape)
# cv2.rectangle(frame,(d.left(),d.top()),(d.right(),d.bottom()),(255,0,0),1)
einter = np.sqrt((shape[0][0] - shape[1][0]) ** 2 + (shape[0][1] - shape[1][1]) ** 2)
# print("einter : ", einter)
# cv2.circle(frame, (shape[0][0], shape[0][1] + 3), 1, (255, 0, 0), -1)
# cv2.circle(frame, (shape[0][0], shape[0][1] + 3), int(0.1 * einter), (0, 0, 255))
# print((shape[0][0], shape[0][1] + 3))
# cv2.imshow("",frame[(shape[0][1] + 3)-int(0.1 * einter):(shape[0][1] + 3)+int(0.1 * einter), shape[0][0]-int(0.1 * einter):shape[0][0]+int(0.1 * einter)])
# cv2.circle(frame, (shape[1][0], shape[1][1] + 3), 1, (255, 0, 0), -1)
# cv2.circle(frame, (shape[1][0], shape[1][1] + 3), int(0.1 * einter), (0, 0, 255))
# print(0.1 * einter)
crop_img1 = frame[(shape[0][1] + 3) - int(0.1 * einter):(shape[0][1] + 3) + int(0.1 * einter),
shape[0][0] - int(0.1 * einter):shape[0][0] + int(0.1 * einter)]
x1 = (shape[0][1] + 3) - int(0.1 * einter)
x2 = (shape[0][1] + 3) + int(0.1 * einter)
y1 = shape[0][0] - int(0.1 * einter)
y2 = shape[0][0] + int(0.1 * einter)
crop_img2 = frame[(shape[1][1] + 3) - int(0.1 * einter):(shape[1][1] + 3) + int(0.1 * einter),
shape[1][0] - int(0.1 * einter):shape[1][0] + int(0.1 * einter)]
xx1 = (shape[1][1] + 3) - int(0.1 * einter)
xx2 = (shape[1][1] + 3) + int(0.1 * einter)
yy1 = shape[1][0] - int(0.1 * einter)
yy2 = shape[1][0] + int(0.1 * einter)
# cv2.imshow("cropped", crop_img1)
# cv2.waitKey(0)
abc = place_eye(crop_img1, crop_img2)
img_gray1 = cv2.cvtColor(abc, cv2.COLOR_BGR2GRAY)
mask1 = np.zeros_like(img_gray1)
# detector1 = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(
"/home/senscript/Desktop/FaceSwap_last/face_swapping/shape_predictor_81_face_landmarks.dat")
faces1 = detector2(img_gray1)
index_dst = []
for face1 in faces1:
landmarks1 = predictor(img_gray1, face1)
landmarks_points1 = []
for n1 in range(36, 42):
x = landmarks1.part(n1).x
y = landmarks1.part(n1).y
landmarks_points1.append((x, y))
np.array(landmarks_points1)
face_points1 = []
points1 = np.array(landmarks_points1, np.int32)
#
convexhull1 = cv2.convexHull(points1)
# cv2.polylines(img, [points], True, (255, 0, 0), 3)
cv2.fillConvexPoly(mask1, convexhull1, 255)
fg = cv2.bitwise_or(abc, abc, mask=mask1)
mask_inv = cv2.bitwise_not(mask1)
bk = cv2.bitwise_or(frame1, frame1, mask=mask_inv)
final = cv2.bitwise_or(fg, bk)
cv2.imshow("out.jpg", final)
cv2.waitKey(0)
# cv2.imshow("fra", frame)
# cv2.imwrite("out.jpg", abc)
# cv2.imshow("frame", abc)
# cv2.imwrite("frame.jpg",abc)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# cap.release()
# cv2.destroyAllWindows()
| [
"numpy.zeros_like",
"cv2.bitwise_not",
"cv2.cvtColor",
"cv2.imwrite",
"cv2.waitKey",
"cv2.imshow",
"cv2.addWeighted",
"cv2.imread",
"imutils.face_utils.shape_to_np",
"cv2.bitwise_or",
"dlib.simple_object_detector",
"dlib.get_frontal_face_detector",
"cv2.convexHull",
"numpy.array",
"cv2.f... | [((351, 422), 'cv2.imread', 'cv2.imread', (['"""/home/senscript/Music/Face-Overlay-AR-master/eye3.png"""', '(-1)'], {}), "('/home/senscript/Music/Face-Overlay-AR-master/eye3.png', -1)\n", (361, 422), False, 'import cv2\n'), ((422, 447), 'cv2.imshow', 'cv2.imshow', (['"""eye"""', 'imgEye'], {}), "('eye', imgEye)\n", (432, 447), False, 'import cv2\n'), ((474, 508), 'cv2.imshow', 'cv2.imshow', (['"""orig_mask"""', 'orig_mask'], {}), "('orig_mask', orig_mask)\n", (484, 508), False, 'import cv2\n'), ((526, 552), 'cv2.bitwise_not', 'cv2.bitwise_not', (['orig_mask'], {}), '(orig_mask)\n', (541, 552), False, 'import cv2\n'), ((593, 652), 'dlib.shape_predictor', 'dlib.shape_predictor', (['"""predictor_eye_combine_landmarks.dat"""'], {}), "('predictor_eye_combine_landmarks.dat')\n", (613, 652), False, 'import dlib\n'), ((664, 712), 'dlib.simple_object_detector', 'dlib.simple_object_detector', (['"""detector_eye.svm1"""'], {}), "('detector_eye.svm1')\n", (691, 712), False, 'import dlib\n'), ((725, 757), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (755, 757), False, 'import dlib\n'), ((1005, 1077), 'cv2.imread', 'cv2.imread', (['"""/home/senscript/Desktop/FaceSwap_last/face_swapping/hh.jpg"""'], {}), "('/home/senscript/Desktop/FaceSwap_last/face_swapping/hh.jpg')\n", (1015, 1077), False, 'import cv2\n'), ((1722, 1812), 'cv2.resize', 'cv2.resize', (['imgEye', '(eyeOverlayWidth, eyeOverlayHeight)'], {'interpolation': 'cv2.INTER_AREA'}), '(imgEye, (eyeOverlayWidth, eyeOverlayHeight), interpolation=cv2.\n INTER_AREA)\n', (1732, 1812), False, 'import cv2\n'), ((1903, 1996), 'cv2.resize', 'cv2.resize', (['orig_mask', '(eyeOverlayWidth, eyeOverlayHeight)'], {'interpolation': 'cv2.INTER_AREA'}), '(orig_mask, (eyeOverlayWidth, eyeOverlayHeight), interpolation=\n cv2.INTER_AREA)\n', (1913, 1996), False, 'import cv2\n'), ((2007, 2103), 'cv2.resize', 'cv2.resize', (['orig_mask_inv', '(eyeOverlayWidth, eyeOverlayHeight)'], {'interpolation': 'cv2.INTER_AREA'}), '(orig_mask_inv, (eyeOverlayWidth, eyeOverlayHeight),\n interpolation=cv2.INTER_AREA)\n', (2017, 2103), False, 'import cv2\n'), ((2115, 2157), 'cv2.cvtColor', 'cv2.cvtColor', (['mask_inv', 'cv2.COLOR_GRAY2BGR'], {}), '(mask_inv, cv2.COLOR_GRAY2BGR)\n', (2127, 2157), False, 'import cv2\n'), ((2169, 2207), 'cv2.cvtColor', 'cv2.cvtColor', (['mask', 'cv2.COLOR_GRAY2BGR'], {}), '(mask, cv2.COLOR_GRAY2BGR)\n', (2181, 2207), False, 'import cv2\n'), ((2375, 2398), 'cv2.imshow', 'cv2.imshow', (['"""roi"""', 'mask'], {}), "('roi', mask)\n", (2385, 2398), False, 'import cv2\n'), ((2552, 2580), 'cv2.imwrite', 'cv2.imwrite', (['"""mask.jpg"""', 'roi'], {}), "('mask.jpg', roi)\n", (2563, 2580), False, 'import cv2\n'), ((2584, 2607), 'cv2.imshow', 'cv2.imshow', (['"""mask"""', 'roi'], {}), "('mask', roi)\n", (2594, 2607), False, 'import cv2\n'), ((2842, 2901), 'cv2.addWeighted', 'cv2.addWeighted', (['face_part', '(255.0)', 'overlay_part', '(255.0)', '(0.0)'], {}), '(face_part, 255.0, overlay_part, 255.0, 0.0)\n', (2857, 2901), False, 'import cv2\n'), ((3424, 3516), 'cv2.resize', 'cv2.resize', (['imgEye', '(eyeOverlayWidth2, eyeOverlayHeight2)'], {'interpolation': 'cv2.INTER_AREA'}), '(imgEye, (eyeOverlayWidth2, eyeOverlayHeight2), interpolation=cv2\n .INTER_AREA)\n', (3434, 3516), False, 'import cv2\n'), ((3608, 3703), 'cv2.resize', 'cv2.resize', (['orig_mask', '(eyeOverlayWidth2, eyeOverlayHeight2)'], {'interpolation': 'cv2.INTER_AREA'}), '(orig_mask, (eyeOverlayWidth2, eyeOverlayHeight2), interpolation=\n cv2.INTER_AREA)\n', (3618, 3703), False, 'import cv2\n'), ((3715, 3813), 'cv2.resize', 'cv2.resize', (['orig_mask_inv', '(eyeOverlayWidth2, eyeOverlayHeight2)'], {'interpolation': 'cv2.INTER_AREA'}), '(orig_mask_inv, (eyeOverlayWidth2, eyeOverlayHeight2),\n interpolation=cv2.INTER_AREA)\n', (3725, 3813), False, 'import cv2\n'), ((3826, 3869), 'cv2.cvtColor', 'cv2.cvtColor', (['mask_inv2', 'cv2.COLOR_GRAY2BGR'], {}), '(mask_inv2, cv2.COLOR_GRAY2BGR)\n', (3838, 3869), False, 'import cv2\n'), ((3882, 3921), 'cv2.cvtColor', 'cv2.cvtColor', (['mask2', 'cv2.COLOR_GRAY2BGR'], {}), '(mask2, cv2.COLOR_GRAY2BGR)\n', (3894, 3921), False, 'import cv2\n'), ((3961, 3983), 'cv2.imshow', 'cv2.imshow', (['"""roi"""', 'roi'], {}), "('roi', roi)\n", (3971, 3983), False, 'import cv2\n'), ((4278, 4308), 'cv2.imshow', 'cv2.imshow', (['"""mask"""', 'face_part2'], {}), "('mask', face_part2)\n", (4288, 4308), False, 'import cv2\n'), ((4523, 4584), 'cv2.addWeighted', 'cv2.addWeighted', (['face_part2', '(255.0)', 'overlay_part2', '(255.0)', '(0.0)'], {}), '(face_part2, 255.0, overlay_part2, 255.0, 0.0)\n', (4538, 4584), False, 'import cv2\n'), ((4589, 4621), 'cv2.imshow', 'cv2.imshow', (['"""dst"""', 'overlay_part2'], {}), "('dst', overlay_part2)\n", (4599, 4621), False, 'import cv2\n'), ((5415, 5491), 'numpy.sqrt', 'np.sqrt', (['((shape[0][0] - shape[1][0]) ** 2 + (shape[0][1] - shape[1][1]) ** 2)'], {}), '((shape[0][0] - shape[1][0]) ** 2 + (shape[0][1] - shape[1][1]) ** 2)\n', (5422, 5491), True, 'import numpy as np\n'), ((6925, 6962), 'cv2.cvtColor', 'cv2.cvtColor', (['abc', 'cv2.COLOR_BGR2GRAY'], {}), '(abc, cv2.COLOR_BGR2GRAY)\n', (6937, 6962), False, 'import cv2\n'), ((6975, 6999), 'numpy.zeros_like', 'np.zeros_like', (['img_gray1'], {}), '(img_gray1)\n', (6988, 6999), True, 'import numpy as np\n'), ((7067, 7190), 'dlib.shape_predictor', 'dlib.shape_predictor', (['"""/home/senscript/Desktop/FaceSwap_last/face_swapping/shape_predictor_81_face_landmarks.dat"""'], {}), "(\n '/home/senscript/Desktop/FaceSwap_last/face_swapping/shape_predictor_81_face_landmarks.dat'\n )\n", (7087, 7190), False, 'import dlib\n'), ((5252, 5281), 'imutils.face_utils.shape_to_np', 'face_utils.shape_to_np', (['shape'], {}), '(shape)\n', (5274, 5281), False, 'from imutils import face_utils\n'), ((7666, 7689), 'cv2.convexHull', 'cv2.convexHull', (['points1'], {}), '(points1)\n', (7680, 7689), False, 'import cv2\n'), ((7759, 7802), 'cv2.fillConvexPoly', 'cv2.fillConvexPoly', (['mask1', 'convexhull1', '(255)'], {}), '(mask1, convexhull1, 255)\n', (7777, 7802), False, 'import cv2\n'), ((7816, 7852), 'cv2.bitwise_or', 'cv2.bitwise_or', (['abc', 'abc'], {'mask': 'mask1'}), '(abc, abc, mask=mask1)\n', (7830, 7852), False, 'import cv2\n'), ((7872, 7894), 'cv2.bitwise_not', 'cv2.bitwise_not', (['mask1'], {}), '(mask1)\n', (7887, 7894), False, 'import cv2\n'), ((7908, 7953), 'cv2.bitwise_or', 'cv2.bitwise_or', (['frame1', 'frame1'], {'mask': 'mask_inv'}), '(frame1, frame1, mask=mask_inv)\n', (7922, 7953), False, 'import cv2\n'), ((7970, 7992), 'cv2.bitwise_or', 'cv2.bitwise_or', (['fg', 'bk'], {}), '(fg, bk)\n', (7984, 7992), False, 'import cv2\n'), ((8001, 8029), 'cv2.imshow', 'cv2.imshow', (['"""out.jpg"""', 'final'], {}), "('out.jpg', final)\n", (8011, 8029), False, 'import cv2\n'), ((8038, 8052), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (8049, 8052), False, 'import cv2\n'), ((7515, 7542), 'numpy.array', 'np.array', (['landmarks_points1'], {}), '(landmarks_points1)\n', (7523, 7542), True, 'import numpy as np\n'), ((7595, 7632), 'numpy.array', 'np.array', (['landmarks_points1', 'np.int32'], {}), '(landmarks_points1, np.int32)\n', (7603, 7632), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Audio utilities
<NAME>, 2020-02-05
"""
# general modules
import numpy as np
import contextlib
import progressbar
# basic audio modules
import pyaudio
import wave
# additional module for wav editing
from pydub import AudioSegment
#from pydub.playback import play
#import librosa
import pyrubberband
#import soundfile as sf
def audioread(audiofile, starttime=0.0, duration=float('inf'), verbose=False):
"""
read audio with specified starting time and duration
default settings:
startfime: 0, start 0.0 sec (at the beginning)
duration: float(''inf), duration in time (sec.) covers the entire audio file
"""
f = wave.open(audiofile)
# get parameters
params = list(f.getparams())
framerate = params[2] # sampling rate, e.g. 16000, or 8000
nframes = params[3]
# skip frames before starting frame
startframe = round(framerate*starttime)
f.setpos(startframe)
# get the number of frames/samples actually to be read
if duration < float('inf'):
nframes_to_read= int(min(round(framerate*duration), nframes-startframe))
else:
nframes_to_read = int(nframes - startframe)
# update the # of frames to be the # of frames to be read only
params[3] = nframes_to_read
# read frames
data = f.readframes(nframes_to_read)
# data = np.fromstring(data, 'int16')
data = np.frombuffer(data, 'int16')
# close file
f.close()
if verbose:
time_to_read = nframes_to_read/framerate
endtime = starttime + time_to_read
endframe = startframe + nframes_to_read
print('read ' + '%.2f' % time_to_read + ' sec.: ' + '%.2f' % starttime +
' ~ ' + '%.2f' % endtime + ' sec. (' + str(startframe) + ' ~ ' +
str(endframe) + ' frame)')
return data, params
def normalize_wav(audiofile, audiofile_norm, eps=0.01, verbose=False):
f = wave.open(audiofile)
# get parameters
params = list(f.getparams())
sampwidth = params[1]
nframes = params[3]
# read frames
data = f.readframes(nframes)
#data = np.fromstring(data, 'int16')
data = np.frombuffer(data, 'int16')
lim = 2 ** (sampwidth*8-1)
dmax = lim / max(abs(data))
data2 = np.array([int(i*(1-eps)) for i in data*dmax], dtype='int16')
# write data
f = wave.open(audiofile_norm, 'w')
f.setparams(tuple(params))
f.writeframes(data2)
f.close()
if verbose:
print('wrote normalized {} (eps: {}) to {}'.format(
audiofile, eps, audiofile_norm))
def audiowrite(audiofile, data, params):
"""
write audio file
"""
# make sure the nframes matches the data length
# so no need to update 'params' before calling this function
params[3] = len(data)
# enable to read scaled data
if not isinstance(data[0], np.int16):
dmax = 2 ** (params[1]*8-1)
data = np.asarray([int(i) for i in data*dmax], dtype='int16')
# write data
f = wave.open(audiofile, 'w')
f.setparams(tuple(params))
f.writeframes(data)
f.close()
def audioplay(audiofile, chunktime=0.05, starttime=0.0, duration=float('inf'),
showprogress=True, verbose=False):
"""
play audio with specified starting time and duration
default settings:
chunktime: 0.05, load audio 0.05 sec at a time
startfime: 0, start 0.0 sec (at the beginning)
duration: float(''inf), duration in time (sec.) covers the entire audio file
"""
f = wave.open(audiofile, 'r')
p = pyaudio.PyAudio()
# get parameters
sampwidth = f.getsampwidth() # sample width in bytes, e.g. 2
nchannels = f.getnchannels() # 1 for mono, 2 for stereo
framerate = f.getframerate() # sampling rate, e.g. 16000, or 8000
nframes = f.getnframes()
# open stream
stream = p.open(format = p.get_format_from_width(sampwidth),
channels = nchannels, rate = framerate,
output = True)
chunksize = round(framerate*chunktime)
# skip frames before starting frame
startframe = round(framerate*starttime)
f.setpos(startframe)
# get the number of frames/samples actually to be played
if duration < float('inf'):
nframes_to_play= min(round(framerate*duration), nframes-startframe)
else:
nframes_to_play = nframes - startframe
# read and play audio data
nchunks = int(nframes_to_play/chunksize)
lastchunk = nframes - nchunks*chunksize
if lastchunk > 0:
additional_chunk = 1
# initiate the progress bar
if showprogress:
bar = progressbar.ProgressBar(maxval=nchunks + additional_chunk, \
widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
bar.start()
# loop over chunks except the last one
for i in range(nchunks):
#print('chunk ' + str(i) + ': ' + str(i*chunksize) + ' ~ ' + str((i+1)*chunksize-1))
data = f.readframes(chunksize)
stream.write(data)
if showprogress: bar.update(i+1)
# the last one chunk
if lastchunk > 0:
#print('chunk ' + str(i+1) + ': ' + str((i+1)*chunksize) + ' ~ ' + str(nframes_to_play-1))
data = f.readframes(chunksize)
stream.write(data)
if showprogress: bar.update(i+1)
# close bar
if showprogress: bar.finish()
#stop stream
stream.stop_stream()
stream.close()
# close PyAudio
p.terminate()
# close file
f.close()
if verbose:
#print('audio duration: ' + '%.2f' % (nframes/framerate) + ' sec. (' + str(nframes) + ' frames)')
time_to_play = nframes_to_play/framerate
endtime = starttime + time_to_play
endframe = startframe + nframes_to_play
#print('\n')
print('played ' + '%.2f' % time_to_play + ' sec.: ' + '%.2f' % starttime +
' ~ ' + '%.2f' % endtime + ' sec. (' + str(startframe) + ' ~ '
+ str(endframe) + ' frame)')
def soundsc(data, para, dmax=0, nchunks=20, showprogress=True):
"""
play scaled sound given data frame
"""
p = pyaudio.PyAudio()
# get parameters
nchannels = para[0]
sampwidth = para[1]
framerate = para[2]
nframes = len(data)
# open stream
stream = p.open(format = p.get_format_from_width(sampwidth),
channels = nchannels, rate = framerate,
output = True)
# set the default dmax
if dmax == 0:
dmax = 2 ** (sampwidth*8-1)
# scale data
data_raw = np.asarray([int(i) for i in data*dmax], dtype='int16')
# cut into chunks
nsecs = nframes/framerate
if nsecs > nchunks:
chunksize = int(nframes/nchunks)
else:
nchunks = int(np.ceil(nsecs))
chunksize = framerate
nframes_in_chunk = [chunksize] * nchunks
nframes_in_chunk[-1] = nframes - (nchunks-1)*chunksize
# initiate the progress bar
if showprogress:
bar = progressbar.ProgressBar(maxval=nchunks, \
widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
bar.start()
# play the sound
for i in range(nchunks):
if i != nchunks-1:
#print('chunk ' + str(i+1) + '/' + str(nchunks) + ': [' +
# str(i*chunksize) + ' ~ ' + str((i+1)*chunksize) + ') ...')
stream.write(data_raw[i*chunksize:(i+1)*chunksize], nframes_in_chunk[i])
else:
#print('chunk ' + str(i+1) + '/' + str(nchunks) + ': [' +
# str(i*chunksize) + ' ~ ' + str(nframes) + ') ...')
stream.write(data_raw[i*chunksize:], nframes_in_chunk[i])
if showprogress: bar.update(i+1)
# close bar
if showprogress: bar.finish()
def wav_duration(filename):
"""
get wav file duration in seconds
"""
with contextlib.closing(wave.open(filename,'r')) as f:
frames = f.getnframes()
rate = f.getframerate()
duration = frames / float(rate)
#print(duration)
return duration
def change_speed_with_pitch(sound, speed=1.0):
# Manually override the frame_rate. This tells the computer how many
# samples to play per second
sound_with_altered_frame_rate = sound._spawn(sound.raw_data, overrides={
"frame_rate": int(sound.frame_rate * speed)})
# convert the sound with altered frame rate to a standard frame rate
# so that regular playback programs will work right. They often only
# know how to play audio at standard frame rate (like 44.1k)
sound_with_altered_frame_rate.set_frame_rate(sound.frame_rate)
return sound_with_altered_frame_rate
def change_speed_only(sound, tempo_ratio):
y = np.array(sound.get_array_of_samples())
if sound.channels == 2:
y = y.reshape((-1, 2))
sample_rate = sound.frame_rate
y_fast = pyrubberband.time_stretch(y, sample_rate, tempo_ratio)
channels = 2 if (y_fast.ndim == 2 and y_fast.shape[1] == 2) else 1
y = np.int16(y_fast * 2 ** 15)
new_seg = AudioSegment(y.tobytes(), frame_rate=sample_rate, sample_width=2, channels=channels)
return new_seg
def list_flat(l):
"""
convert 2-layer list to 1-layer (flatern list)
"""
return [item for sublist in l for item in sublist]
def extract_wav_channel(wav_in, wav_out, channel=0, verbose=False):
if channel=='left' or channel=='l': channel=0
if channel=='right' or channel=='r': channel=1
with wave.open(wav_in, 'r') as f:
params = list(f.getparams())
nchannels = params[0]
sampwidth = params[1]
nframes = params[3]
data = f.readframes(nframes) # range: [0,2^(4*sampwidth)-1]
if channel+1 > nchannels:
raise Exception('No channel {} since {} has {} channels!'.format(channel, \
wav_in, nchannels))
samples = [[] for i in range(sampwidth)]
samples_in_channel = [[] for i in range(nchannels)]
for i in range(sampwidth):
samples[i] = data[i::sampwidth] # samples[0] <-- data[0::sampwidth]
samples_in_channel[i] = samples[i][channel::nchannels]
data_in_channel = bytes(list_flat(list(map(list, zip(*samples_in_channel)))))
with wave.open(wav_out, 'w') as f:
params[0] = 1
params[3] = len(data_in_channel)
f.setparams(tuple(params))
f.writeframes(data_in_channel)
if verbose:
print('wrote {} (channel {}) to {}'.format(wav_in, channel, wav_out)) | [
"wave.open",
"numpy.ceil",
"numpy.frombuffer",
"pyrubberband.time_stretch",
"progressbar.Bar",
"progressbar.Percentage",
"pyaudio.PyAudio",
"numpy.int16"
] | [((686, 706), 'wave.open', 'wave.open', (['audiofile'], {}), '(audiofile)\n', (695, 706), False, 'import wave\n'), ((1367, 1395), 'numpy.frombuffer', 'np.frombuffer', (['data', '"""int16"""'], {}), "(data, 'int16')\n", (1380, 1395), True, 'import numpy as np\n'), ((1870, 1890), 'wave.open', 'wave.open', (['audiofile'], {}), '(audiofile)\n', (1879, 1890), False, 'import wave\n'), ((2084, 2112), 'numpy.frombuffer', 'np.frombuffer', (['data', '"""int16"""'], {}), "(data, 'int16')\n", (2097, 2112), True, 'import numpy as np\n'), ((2266, 2296), 'wave.open', 'wave.open', (['audiofile_norm', '"""w"""'], {}), "(audiofile_norm, 'w')\n", (2275, 2296), False, 'import wave\n'), ((2874, 2899), 'wave.open', 'wave.open', (['audiofile', '"""w"""'], {}), "(audiofile, 'w')\n", (2883, 2899), False, 'import wave\n'), ((3391, 3416), 'wave.open', 'wave.open', (['audiofile', '"""r"""'], {}), "(audiofile, 'r')\n", (3400, 3416), False, 'import wave\n'), ((3425, 3442), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (3440, 3442), False, 'import pyaudio\n'), ((5970, 5987), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (5985, 5987), False, 'import pyaudio\n'), ((8497, 8551), 'pyrubberband.time_stretch', 'pyrubberband.time_stretch', (['y', 'sample_rate', 'tempo_ratio'], {}), '(y, sample_rate, tempo_ratio)\n', (8522, 8551), False, 'import pyrubberband\n'), ((8628, 8654), 'numpy.int16', 'np.int16', (['(y_fast * 2 ** 15)'], {}), '(y_fast * 2 ** 15)\n', (8636, 8654), True, 'import numpy as np\n'), ((9080, 9102), 'wave.open', 'wave.open', (['wav_in', '"""r"""'], {}), "(wav_in, 'r')\n", (9089, 9102), False, 'import wave\n'), ((9778, 9801), 'wave.open', 'wave.open', (['wav_out', '"""w"""'], {}), "(wav_out, 'w')\n", (9787, 9801), False, 'import wave\n'), ((6545, 6559), 'numpy.ceil', 'np.ceil', (['nsecs'], {}), '(nsecs)\n', (6552, 6559), True, 'import numpy as np\n'), ((7555, 7579), 'wave.open', 'wave.open', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (7564, 7579), False, 'import wave\n'), ((4567, 4597), 'progressbar.Bar', 'progressbar.Bar', (['"""="""', '"""["""', '"""]"""'], {}), "('=', '[', ']')\n", (4582, 4597), False, 'import progressbar\n'), ((4604, 4628), 'progressbar.Percentage', 'progressbar.Percentage', ([], {}), '()\n', (4626, 4628), False, 'import progressbar\n'), ((6802, 6832), 'progressbar.Bar', 'progressbar.Bar', (['"""="""', '"""["""', '"""]"""'], {}), "('=', '[', ']')\n", (6817, 6832), False, 'import progressbar\n'), ((6839, 6863), 'progressbar.Percentage', 'progressbar.Percentage', ([], {}), '()\n', (6861, 6863), False, 'import progressbar\n')] |
from functools import partial
from itertools import product
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_array_almost_equal as aaae
from estimagic.differentiation.derivatives import first_derivative
from estimagic.optimization.process_constraints import process_constraints
from estimagic.optimization.reparametrize import _multiply_from_left
from estimagic.optimization.reparametrize import _multiply_from_right
from estimagic.optimization.reparametrize import convert_external_derivative_to_internal
from estimagic.optimization.reparametrize import post_replace
from estimagic.optimization.reparametrize import post_replace_jacobian
from estimagic.optimization.reparametrize import pre_replace
from estimagic.optimization.reparametrize import pre_replace_jacobian
from estimagic.optimization.reparametrize import reparametrize_from_internal
from estimagic.optimization.reparametrize import reparametrize_to_internal
to_test = list(
product(
[
"basic_probability",
"uncorrelated_covariance",
"basic_covariance",
"basic_fixed",
"basic_increasing",
"basic_equality",
"query_equality",
"basic_sdcorr",
"normalized_covariance",
],
[0, 1, 2],
)
)
def reduce_params(params, constraints):
all_locs = []
for constr in constraints:
if "query" in constr:
all_locs = ["i", "j1", "j2"]
elif isinstance(constr["loc"], tuple):
all_locs.append(constr["loc"][0])
elif isinstance(constr["loc"], list):
all_locs.append(constr["loc"][0][0])
else:
all_locs.append(constr["loc"])
all_locs = sorted(set(all_locs))
return params.loc[all_locs].copy()
@pytest.mark.parametrize("case, number", to_test)
def test_reparametrize_to_internal(example_params, all_constraints, case, number):
constraints = all_constraints[case]
params = reduce_params(example_params, constraints)
params["value"] = params[f"value{number}"]
keep = params[f"internal_value{number}"].notnull()
expected_internal_values = params[f"internal_value{number}"][keep]
expected_internal_lower = params["internal_lower"]
expected_internal_upper = params["internal_upper"]
pc, pp = process_constraints(constraints, params)
calculated_internal_values = reparametrize_to_internal(
pp["value"].to_numpy(), pp["_internal_free"].to_numpy(dtype=bool), pc
)
calculated_internal_lower = pp["_internal_lower"]
calculated_internal_upper = pp["_internal_upper"]
aaae(calculated_internal_values, expected_internal_values)
aaae(calculated_internal_lower, expected_internal_lower)
aaae(calculated_internal_upper, expected_internal_upper)
@pytest.mark.parametrize("case, number", to_test)
def test_reparametrize_from_internal(example_params, all_constraints, case, number):
constraints = all_constraints[case]
params = reduce_params(example_params, constraints)
params["value"] = params[f"value{number}"]
keep = params[f"internal_value{number}"].notnull()
pc, pp = process_constraints(constraints, params)
internal_p = params[f"internal_value{number}"][keep].to_numpy()
fixed_val = pp["_internal_fixed_value"].to_numpy()
pre_repl = pp["_pre_replacements"].to_numpy()
post_repl = pp["_post_replacements"].to_numpy()
calculated_external_value = reparametrize_from_internal(
internal=internal_p,
fixed_values=fixed_val,
pre_replacements=pre_repl,
processed_constraints=pc,
post_replacements=post_repl,
)
expected_external_value = params["value"].to_numpy()
aaae(calculated_external_value, expected_external_value)
@pytest.mark.parametrize("case, number", to_test)
def test_reparametrize_from_internal_jacobian(
example_params, all_constraints, case, number
):
constraints = all_constraints[case]
params = reduce_params(example_params, constraints)
params["value"] = params[f"value{number}"]
keep = params[f"internal_value{number}"].notnull()
pc, pp = process_constraints(constraints, params)
internal_p = params[f"internal_value{number}"][keep].to_numpy()
fixed_val = pp["_internal_fixed_value"].to_numpy()
pre_repl = pp["_pre_replacements"].to_numpy()
post_repl = pp["_post_replacements"].to_numpy()
func = partial(
reparametrize_from_internal,
**{
"fixed_values": fixed_val,
"pre_replacements": pre_repl,
"processed_constraints": pc,
"post_replacements": post_repl,
},
)
numerical_jacobian = first_derivative(func, internal_p)
# calling convert_external_derivative with identity matrix as external derivative
# is just a trick to get out the jacobian of reparametrize_from_internal
jacobian = convert_external_derivative_to_internal(
external_derivative=np.eye(len(fixed_val)),
internal_values=internal_p,
fixed_values=fixed_val,
pre_replacements=pre_repl,
processed_constraints=pc,
post_replacements=post_repl,
)
aaae(jacobian, numerical_jacobian)
@pytest.mark.parametrize("case, number", to_test)
def test_pre_replace_jacobian(example_params, all_constraints, case, number):
constraints = all_constraints[case]
params = reduce_params(example_params, constraints)
params["value"] = params[f"value{number}"]
keep = params[f"internal_value{number}"].notnull()
pc, pp = process_constraints(constraints, params)
internal_p = params[f"internal_value{number}"][keep].to_numpy()
fixed_val = pp["_internal_fixed_value"].to_numpy()
pre_repl = pp["_pre_replacements"].to_numpy()
func = partial(
pre_replace, **{"fixed_values": fixed_val, "pre_replacements": pre_repl}
)
numerical_deriv = first_derivative(func, internal_p)
numerical_deriv[np.isnan(numerical_deriv)] = 0
deriv = pre_replace_jacobian(pre_repl, len(internal_p))
aaae(deriv, numerical_deriv)
@pytest.mark.parametrize("case, number", to_test)
def test_post_replace_jacobian(example_params, all_constraints, case, number):
constraints = all_constraints[case]
params = reduce_params(example_params, constraints)
params["value"] = params[f"value{number}"]
keep = params[f"internal_value{number}"].notnull()
pc, pp = process_constraints(constraints, params)
internal_p = params[f"internal_value{number}"][keep].to_numpy()
fixed_val = pp["_internal_fixed_value"].to_numpy()
pre_repl = pp["_pre_replacements"].to_numpy()
post_repl = pp["_post_replacements"].to_numpy()
external = pre_replace(internal_p, fixed_val, pre_repl)
external[np.isnan(external)] = 0 # if not set to zero the numerical differentiation
# fails due to potential np.nan.
func = partial(post_replace, **{"post_replacements": post_repl})
numerical_deriv = first_derivative(func, external)
deriv = post_replace_jacobian(post_repl)
aaae(deriv, numerical_deriv)
def test_linear_constraint():
params = pd.DataFrame(
index=pd.MultiIndex.from_product([["a", "b", "c"], [0, 1, 2]]),
data=[[2], [1], [0], [1], [3], [4], [1], [1], [1.0]],
columns=["value"],
)
params["lower_bound"] = [-1] + [-np.inf] * 8
params["upper_bound"] = [1] + [np.inf] * 8
constraints = [
{"loc": "a", "type": "linear", "weights": [1, -2, 0], "value": 0},
{"loc": "b", "type": "linear", "weights": 1 / 3, "upper_bound": 3},
{
"loc": "c",
"type": "linear",
"weights": 1,
"lower_bound": 0,
"upper_bound": 5,
},
{"loc": params.index, "type": "linear", "weights": 1, "value": 14},
{"loc": "c", "type": "equality"},
]
internal, external = back_and_forth_transformation_and_assert(params, constraints)
assert len(internal) == 5
def test_covariance_is_inherited_from_pairwise_equality(example_params):
params = example_params.loc[["f", "l"]].copy()
params["value"] = params["value0"]
constraints = [
{"loc": "l", "type": "covariance"},
{"locs": ["l", "f"], "type": "pairwise_equality"},
]
internal, external = back_and_forth_transformation_and_assert(params, constraints)
assert len(internal) == 10
def back_and_forth_transformation_and_assert(params, constraints):
pc, pp = process_constraints(constraints, params)
internal = reparametrize_to_internal(
pp["value"].to_numpy(), pp["_internal_free"].to_numpy(), pc
)
external = reparametrize_from_internal(
internal=internal,
fixed_values=pp["_internal_fixed_value"].to_numpy(),
pre_replacements=pp["_pre_replacements"].to_numpy(),
processed_constraints=pc,
post_replacements=pp["_post_replacements"].to_numpy(),
)
aaae(external, params["value"].to_numpy())
return internal, external
@pytest.mark.parametrize("seed", range(5))
def test_multiply_from_left_and_right(seed):
np.random.seed(seed)
mat_list = [np.random.uniform(size=(10, 10)) for i in range(5)]
a, b, c, d, e = mat_list
expected = a @ b @ c @ d @ e
calc_from_left = _multiply_from_left(mat_list)
calc_from_right = _multiply_from_right(mat_list)
aaae(calc_from_left, expected)
aaae(calc_from_right, expected)
| [
"functools.partial",
"numpy.random.uniform",
"numpy.random.seed",
"estimagic.optimization.process_constraints.process_constraints",
"numpy.isnan",
"pandas.MultiIndex.from_product",
"estimagic.optimization.reparametrize.post_replace_jacobian",
"estimagic.optimization.reparametrize._multiply_from_right"... | [((1814, 1862), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""case, number"""', 'to_test'], {}), "('case, number', to_test)\n", (1837, 1862), False, 'import pytest\n'), ((2824, 2872), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""case, number"""', 'to_test'], {}), "('case, number', to_test)\n", (2847, 2872), False, 'import pytest\n'), ((3796, 3844), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""case, number"""', 'to_test'], {}), "('case, number', to_test)\n", (3819, 3844), False, 'import pytest\n'), ((5233, 5281), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""case, number"""', 'to_test'], {}), "('case, number', to_test)\n", (5256, 5281), False, 'import pytest\n'), ((6102, 6150), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""case, number"""', 'to_test'], {}), "('case, number', to_test)\n", (6125, 6150), False, 'import pytest\n'), ((983, 1191), 'itertools.product', 'product', (["['basic_probability', 'uncorrelated_covariance', 'basic_covariance',\n 'basic_fixed', 'basic_increasing', 'basic_equality', 'query_equality',\n 'basic_sdcorr', 'normalized_covariance']", '[0, 1, 2]'], {}), "(['basic_probability', 'uncorrelated_covariance', 'basic_covariance',\n 'basic_fixed', 'basic_increasing', 'basic_equality', 'query_equality',\n 'basic_sdcorr', 'normalized_covariance'], [0, 1, 2])\n", (990, 1191), False, 'from itertools import product\n'), ((2340, 2380), 'estimagic.optimization.process_constraints.process_constraints', 'process_constraints', (['constraints', 'params'], {}), '(constraints, params)\n', (2359, 2380), False, 'from estimagic.optimization.process_constraints import process_constraints\n'), ((2640, 2698), 'numpy.testing.assert_array_almost_equal', 'aaae', (['calculated_internal_values', 'expected_internal_values'], {}), '(calculated_internal_values, expected_internal_values)\n', (2644, 2698), True, 'from numpy.testing import assert_array_almost_equal as aaae\n'), ((2703, 2759), 'numpy.testing.assert_array_almost_equal', 'aaae', (['calculated_internal_lower', 'expected_internal_lower'], {}), '(calculated_internal_lower, expected_internal_lower)\n', (2707, 2759), True, 'from numpy.testing import assert_array_almost_equal as aaae\n'), ((2764, 2820), 'numpy.testing.assert_array_almost_equal', 'aaae', (['calculated_internal_upper', 'expected_internal_upper'], {}), '(calculated_internal_upper, expected_internal_upper)\n', (2768, 2820), True, 'from numpy.testing import assert_array_almost_equal as aaae\n'), ((3171, 3211), 'estimagic.optimization.process_constraints.process_constraints', 'process_constraints', (['constraints', 'params'], {}), '(constraints, params)\n', (3190, 3211), False, 'from estimagic.optimization.process_constraints import process_constraints\n'), ((3471, 3634), 'estimagic.optimization.reparametrize.reparametrize_from_internal', 'reparametrize_from_internal', ([], {'internal': 'internal_p', 'fixed_values': 'fixed_val', 'pre_replacements': 'pre_repl', 'processed_constraints': 'pc', 'post_replacements': 'post_repl'}), '(internal=internal_p, fixed_values=fixed_val,\n pre_replacements=pre_repl, processed_constraints=pc, post_replacements=\n post_repl)\n', (3498, 3634), False, 'from estimagic.optimization.reparametrize import reparametrize_from_internal\n'), ((3736, 3792), 'numpy.testing.assert_array_almost_equal', 'aaae', (['calculated_external_value', 'expected_external_value'], {}), '(calculated_external_value, expected_external_value)\n', (3740, 3792), True, 'from numpy.testing import assert_array_almost_equal as aaae\n'), ((4158, 4198), 'estimagic.optimization.process_constraints.process_constraints', 'process_constraints', (['constraints', 'params'], {}), '(constraints, params)\n', (4177, 4198), False, 'from estimagic.optimization.process_constraints import process_constraints\n'), ((4437, 4603), 'functools.partial', 'partial', (['reparametrize_from_internal'], {}), "(reparametrize_from_internal, **{'fixed_values': fixed_val,\n 'pre_replacements': pre_repl, 'processed_constraints': pc,\n 'post_replacements': post_repl})\n", (4444, 4603), False, 'from functools import partial\n'), ((4703, 4737), 'estimagic.differentiation.derivatives.first_derivative', 'first_derivative', (['func', 'internal_p'], {}), '(func, internal_p)\n', (4719, 4737), False, 'from estimagic.differentiation.derivatives import first_derivative\n'), ((5195, 5229), 'numpy.testing.assert_array_almost_equal', 'aaae', (['jacobian', 'numerical_jacobian'], {}), '(jacobian, numerical_jacobian)\n', (5199, 5229), True, 'from numpy.testing import assert_array_almost_equal as aaae\n'), ((5573, 5613), 'estimagic.optimization.process_constraints.process_constraints', 'process_constraints', (['constraints', 'params'], {}), '(constraints, params)\n', (5592, 5613), False, 'from estimagic.optimization.process_constraints import process_constraints\n'), ((5800, 5885), 'functools.partial', 'partial', (['pre_replace'], {}), "(pre_replace, **{'fixed_values': fixed_val, 'pre_replacements':\n pre_repl})\n", (5807, 5885), False, 'from functools import partial\n'), ((5918, 5952), 'estimagic.differentiation.derivatives.first_derivative', 'first_derivative', (['func', 'internal_p'], {}), '(func, internal_p)\n', (5934, 5952), False, 'from estimagic.differentiation.derivatives import first_derivative\n'), ((6070, 6098), 'numpy.testing.assert_array_almost_equal', 'aaae', (['deriv', 'numerical_deriv'], {}), '(deriv, numerical_deriv)\n', (6074, 6098), True, 'from numpy.testing import assert_array_almost_equal as aaae\n'), ((6443, 6483), 'estimagic.optimization.process_constraints.process_constraints', 'process_constraints', (['constraints', 'params'], {}), '(constraints, params)\n', (6462, 6483), False, 'from estimagic.optimization.process_constraints import process_constraints\n'), ((6726, 6770), 'estimagic.optimization.reparametrize.pre_replace', 'pre_replace', (['internal_p', 'fixed_val', 'pre_repl'], {}), '(internal_p, fixed_val, pre_repl)\n', (6737, 6770), False, 'from estimagic.optimization.reparametrize import pre_replace\n'), ((6909, 6966), 'functools.partial', 'partial', (['post_replace'], {}), "(post_replace, **{'post_replacements': post_repl})\n", (6916, 6966), False, 'from functools import partial\n'), ((6989, 7021), 'estimagic.differentiation.derivatives.first_derivative', 'first_derivative', (['func', 'external'], {}), '(func, external)\n', (7005, 7021), False, 'from estimagic.differentiation.derivatives import first_derivative\n'), ((7035, 7067), 'estimagic.optimization.reparametrize.post_replace_jacobian', 'post_replace_jacobian', (['post_repl'], {}), '(post_repl)\n', (7056, 7067), False, 'from estimagic.optimization.reparametrize import post_replace_jacobian\n'), ((7073, 7101), 'numpy.testing.assert_array_almost_equal', 'aaae', (['deriv', 'numerical_deriv'], {}), '(deriv, numerical_deriv)\n', (7077, 7101), True, 'from numpy.testing import assert_array_almost_equal as aaae\n'), ((8494, 8534), 'estimagic.optimization.process_constraints.process_constraints', 'process_constraints', (['constraints', 'params'], {}), '(constraints, params)\n', (8513, 8534), False, 'from estimagic.optimization.process_constraints import process_constraints\n'), ((9121, 9141), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (9135, 9141), True, 'import numpy as np\n'), ((9295, 9324), 'estimagic.optimization.reparametrize._multiply_from_left', '_multiply_from_left', (['mat_list'], {}), '(mat_list)\n', (9314, 9324), False, 'from estimagic.optimization.reparametrize import _multiply_from_left\n'), ((9347, 9377), 'estimagic.optimization.reparametrize._multiply_from_right', '_multiply_from_right', (['mat_list'], {}), '(mat_list)\n', (9367, 9377), False, 'from estimagic.optimization.reparametrize import _multiply_from_right\n'), ((9383, 9413), 'numpy.testing.assert_array_almost_equal', 'aaae', (['calc_from_left', 'expected'], {}), '(calc_from_left, expected)\n', (9387, 9413), True, 'from numpy.testing import assert_array_almost_equal as aaae\n'), ((9418, 9449), 'numpy.testing.assert_array_almost_equal', 'aaae', (['calc_from_right', 'expected'], {}), '(calc_from_right, expected)\n', (9422, 9449), True, 'from numpy.testing import assert_array_almost_equal as aaae\n'), ((5973, 5998), 'numpy.isnan', 'np.isnan', (['numerical_deriv'], {}), '(numerical_deriv)\n', (5981, 5998), True, 'import numpy as np\n'), ((6784, 6802), 'numpy.isnan', 'np.isnan', (['external'], {}), '(external)\n', (6792, 6802), True, 'import numpy as np\n'), ((9158, 9190), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(10, 10)'}), '(size=(10, 10))\n', (9175, 9190), True, 'import numpy as np\n'), ((7175, 7231), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["[['a', 'b', 'c'], [0, 1, 2]]"], {}), "([['a', 'b', 'c'], [0, 1, 2]])\n", (7201, 7231), True, 'import pandas as pd\n')] |
import tensorflow as tf
import os
import numpy as np
from tf_metric_learning.utils.index import AnnoyDataIndex
class TripletAnnoyMiner(AnnoyDataIndex):
def __init__(
self,
base_model,
eb_size,
labels,
metric="euclidean",
save_dir=None,
progress=False,
normalize_eb=True,
normalize_fn=None,
**kwargs
):
super().__init__(eb_size, labels, metric=metric, save_dir=save_dir, progress=progress)
self.base_model = base_model
self.normalize_eb = normalize_eb
self.normalize_fn = normalize_fn
def extract_embeddings(self, data):
data = self.normalize_fn(data) if self.normalize_fn is not None else data
embeddings = self.base_model.predict(data)
if self.normalize_eb:
embeddings = tf.nn.l2_normalize(embeddings, axis=1).numpy()
return embeddings
def mine_item(self, ids, distances, label, operator, index):
labels = np.asarray([self.get_label(result) for result in ids])
indexes = np.where(labels == label)[0] if operator else np.where(labels != label)[0]
item_id = ids[indexes[index]] if len(indexes) else None
return item_id
def search_hardest_negative(self, embedding, label, n=20):
ids, distances = self.search(embedding, include_distances=True, n=n)
return self.mine_item(ids, distances, label, False, 0)
def search_easiest_negative(self, embedding, label, n=20):
ids, distances = self.search(embedding, include_distances=True, n=n)
return self.mine_item(ids, distances, label, False, -1)
def search_hardest_positive(self, embedding, label, n=20):
results = self.search(embedding, include_distances=True, n=n)
return self.mine_item(ids, distances, label, True, -1)
def search_easiest_positive(self, embedding, label, n=20):
results = self.search(embedding, include_distances=True, n=n)
return self.mine_item(ids, distances, label, True, 0)
def search_hardest_negative_image(self, image, label, n=20):
embedding = self.extract_embeddings(np.asarray([image]))[0]
return self.search_hardest_negative(embedding, label, n=n)
def search_easiest_negative_image(self, image, label, n=20):
embedding = self.extract_embeddings(np.asarray([image]))[0]
return self.search_easiest_negative(embedding, label, n=n)
def search_hardest_positive_image(self, image, label, n=20):
embedding = self.extract_embeddings(np.asarray([image]))[0]
return self.search_hardest_positive(embedding, label, n=n)
def search_easiest_positive_image(self, image, label, n=20):
embedding = self.extract_embeddings(np.asarray([image]))[0]
return self.search_easiest_positive(embedding, label, n=n)
| [
"numpy.asarray",
"numpy.where",
"tensorflow.nn.l2_normalize"
] | [((1067, 1092), 'numpy.where', 'np.where', (['(labels == label)'], {}), '(labels == label)\n', (1075, 1092), True, 'import numpy as np\n'), ((1113, 1138), 'numpy.where', 'np.where', (['(labels != label)'], {}), '(labels != label)\n', (1121, 1138), True, 'import numpy as np\n'), ((2141, 2160), 'numpy.asarray', 'np.asarray', (['[image]'], {}), '([image])\n', (2151, 2160), True, 'import numpy as np\n'), ((2342, 2361), 'numpy.asarray', 'np.asarray', (['[image]'], {}), '([image])\n', (2352, 2361), True, 'import numpy as np\n'), ((2543, 2562), 'numpy.asarray', 'np.asarray', (['[image]'], {}), '([image])\n', (2553, 2562), True, 'import numpy as np\n'), ((2744, 2763), 'numpy.asarray', 'np.asarray', (['[image]'], {}), '([image])\n', (2754, 2763), True, 'import numpy as np\n'), ((838, 876), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['embeddings'], {'axis': '(1)'}), '(embeddings, axis=1)\n', (856, 876), True, 'import tensorflow as tf\n')] |
import copy
import json
from typing import Tuple
import numpy as np
from PIL import ImageColor
from rastervision.core.box import Box
from rastervision.data import (ChipClassificationLabels, ObjectDetectionLabels)
from rastervision.utils.files import file_to_str
def add_classes_to_geojson(geojson, class_map):
"""Add missing class_names and class_ids from label GeoJSON."""
geojson = copy.deepcopy(geojson)
features = geojson['features']
for feature in features:
properties = feature.get('properties', {})
if 'class_id' not in properties:
if 'class_name' in properties:
properties['class_id'] = \
class_map.get_by_name(properties['class_name']).id
elif 'label' in properties:
# label is considered a synonym of class_name for now in order
# to interface with Raster Foundry.
properties['class_id'] = \
class_map.get_by_name(properties['label']).id
properties['class_name'] = properties['label']
else:
# if no class_id, class_name, or label, then just assume
# everything corresponds to class_id = 1.
class_id = 1
class_name = class_map.get_by_id(class_id).name
properties['class_id'] = class_id
properties['class_name'] = class_name
feature['properties'] = properties
return geojson
def load_label_store_json(uri):
"""Load JSON for LabelStore.
Returns JSON for uri
"""
return json.loads(file_to_str(uri))
def geojson_to_object_detection_labels(geojson_dict,
crs_transformer,
extent=None):
"""Convert GeoJSON to ObjectDetectionLabels object.
If extent is provided, filter out the boxes that lie "more than a little
bit" outside the extent.
Args:
geojson_dict: dict in GeoJSON format
crs_transformer: used to convert map coords in geojson to pixel coords
in labels object
extent: Box in pixel coords
Returns:
ObjectDetectionLabels
"""
features = geojson_dict['features']
boxes = []
class_ids = []
scores = []
def polygon_to_label(polygon, crs_transformer):
polygon = [crs_transformer.map_to_pixel(p) for p in polygon]
xmin, ymin = np.min(polygon, axis=0)
xmax, ymax = np.max(polygon, axis=0)
boxes.append(Box(ymin, xmin, ymax, xmax))
properties = feature['properties']
class_ids.append(properties['class_id'])
scores.append(properties.get('score', 1.0))
for feature in features:
geom_type = feature['geometry']['type']
coordinates = feature['geometry']['coordinates']
if geom_type == 'MultiPolygon':
for polygon in coordinates:
polygon_to_label(polygon[0], crs_transformer)
elif geom_type == 'Polygon':
polygon_to_label(coordinates[0], crs_transformer)
else:
raise Exception(
'Geometries of type {} are not supported in object detection \
labels.'.format(geom_type))
if len(boxes):
boxes = np.array([box.npbox_format() for box in boxes], dtype=float)
class_ids = np.array(class_ids)
scores = np.array(scores)
labels = ObjectDetectionLabels(boxes, class_ids, scores=scores)
else:
labels = ObjectDetectionLabels.make_empty()
if extent is not None:
labels = ObjectDetectionLabels.get_overlapping(
labels, extent, ioa_thresh=0.8, clip=True)
return labels
def geojson_to_chip_classification_labels(geojson_dict,
crs_transformer,
extent=None):
"""Convert GeoJSON to ChipClassificationLabels.
If extent is given, only labels that intersect with the extent are returned.
Args:
geojson_dict: dict in GeoJSON format
crs_transformer: used to convert map coords in geojson to pixel coords
in labels object
extent: Box in pixel coords
Returns:
ChipClassificationLabels
"""
features = geojson_dict['features']
labels = ChipClassificationLabels()
extent_shape = None
if extent:
extent_shape = extent.to_shapely()
def polygon_to_label(polygon, crs_transformer):
polygon = [crs_transformer.map_to_pixel(p) for p in polygon]
xmin, ymin = np.min(polygon, axis=0)
xmax, ymax = np.max(polygon, axis=0)
cell = Box(ymin, xmin, ymax, xmax)
if extent_shape and not cell.to_shapely().intersects(extent_shape):
return
properties = feature['properties']
class_id = properties['class_id']
scores = properties.get('scores')
labels.set_cell(cell, class_id, scores)
for feature in features:
geom_type = feature['geometry']['type']
coordinates = feature['geometry']['coordinates']
if geom_type == 'Polygon':
polygon_to_label(coordinates[0], crs_transformer)
else:
raise Exception(
'Geometries of type {} are not supported in chip classification \
labels.'.format(geom_type))
return labels
def color_to_triple(color: str) -> Tuple[int, int, int]:
"""Given a PIL ImageColor string, return a triple of integers
representing the red, green, and blue values.
Args:
color: A PIL ImageColor string
Returns:
An triple of integers
"""
if color is None:
r = np.random.randint(0, 0x100)
g = np.random.randint(0, 0x100)
b = np.random.randint(0, 0x100)
return (r, g, b)
else:
return ImageColor.getrgb(color)
def color_to_integer(color: str) -> int:
"""Given a PIL ImageColor string, return a packed integer.
Args:
color: A PIL ImageColor string
Returns:
An integer containing the packed RGB values.
"""
triple = color_to_triple(color)
r = triple[0] * (1 << 16)
g = triple[1] * (1 << 8)
b = triple[2] * (1 << 0)
integer = r + g + b
return integer
def rgb_to_int_array(rgb_array):
r = np.array(rgb_array[:, :, 0], dtype=np.uint32) * (1 << 16)
g = np.array(rgb_array[:, :, 1], dtype=np.uint32) * (1 << 8)
b = np.array(rgb_array[:, :, 2], dtype=np.uint32) * (1 << 0)
return r + g + b
| [
"copy.deepcopy",
"rastervision.utils.files.file_to_str",
"PIL.ImageColor.getrgb",
"rastervision.data.ObjectDetectionLabels.make_empty",
"rastervision.core.box.Box",
"numpy.max",
"numpy.min",
"rastervision.data.ChipClassificationLabels",
"numpy.array",
"rastervision.data.ObjectDetectionLabels",
"... | [((396, 418), 'copy.deepcopy', 'copy.deepcopy', (['geojson'], {}), '(geojson)\n', (409, 418), False, 'import copy\n'), ((4322, 4348), 'rastervision.data.ChipClassificationLabels', 'ChipClassificationLabels', ([], {}), '()\n', (4346, 4348), False, 'from rastervision.data import ChipClassificationLabels, ObjectDetectionLabels\n'), ((1609, 1625), 'rastervision.utils.files.file_to_str', 'file_to_str', (['uri'], {}), '(uri)\n', (1620, 1625), False, 'from rastervision.utils.files import file_to_str\n'), ((2439, 2462), 'numpy.min', 'np.min', (['polygon'], {'axis': '(0)'}), '(polygon, axis=0)\n', (2445, 2462), True, 'import numpy as np\n'), ((2484, 2507), 'numpy.max', 'np.max', (['polygon'], {'axis': '(0)'}), '(polygon, axis=0)\n', (2490, 2507), True, 'import numpy as np\n'), ((3362, 3381), 'numpy.array', 'np.array', (['class_ids'], {}), '(class_ids)\n', (3370, 3381), True, 'import numpy as np\n'), ((3399, 3415), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3407, 3415), True, 'import numpy as np\n'), ((3433, 3487), 'rastervision.data.ObjectDetectionLabels', 'ObjectDetectionLabels', (['boxes', 'class_ids'], {'scores': 'scores'}), '(boxes, class_ids, scores=scores)\n', (3454, 3487), False, 'from rastervision.data import ChipClassificationLabels, ObjectDetectionLabels\n'), ((3515, 3549), 'rastervision.data.ObjectDetectionLabels.make_empty', 'ObjectDetectionLabels.make_empty', ([], {}), '()\n', (3547, 3549), False, 'from rastervision.data import ChipClassificationLabels, ObjectDetectionLabels\n'), ((3595, 3680), 'rastervision.data.ObjectDetectionLabels.get_overlapping', 'ObjectDetectionLabels.get_overlapping', (['labels', 'extent'], {'ioa_thresh': '(0.8)', 'clip': '(True)'}), '(labels, extent, ioa_thresh=0.8, clip=True\n )\n', (3632, 3680), False, 'from rastervision.data import ChipClassificationLabels, ObjectDetectionLabels\n'), ((4575, 4598), 'numpy.min', 'np.min', (['polygon'], {'axis': '(0)'}), '(polygon, axis=0)\n', (4581, 4598), True, 'import numpy as np\n'), ((4620, 4643), 'numpy.max', 'np.max', (['polygon'], {'axis': '(0)'}), '(polygon, axis=0)\n', (4626, 4643), True, 'import numpy as np\n'), ((4659, 4686), 'rastervision.core.box.Box', 'Box', (['ymin', 'xmin', 'ymax', 'xmax'], {}), '(ymin, xmin, ymax, xmax)\n', (4662, 4686), False, 'from rastervision.core.box import Box\n'), ((5693, 5718), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)'], {}), '(0, 256)\n', (5710, 5718), True, 'import numpy as np\n'), ((5733, 5758), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)'], {}), '(0, 256)\n', (5750, 5758), True, 'import numpy as np\n'), ((5773, 5798), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)'], {}), '(0, 256)\n', (5790, 5798), True, 'import numpy as np\n'), ((5851, 5875), 'PIL.ImageColor.getrgb', 'ImageColor.getrgb', (['color'], {}), '(color)\n', (5868, 5875), False, 'from PIL import ImageColor\n'), ((6320, 6365), 'numpy.array', 'np.array', (['rgb_array[:, :, 0]'], {'dtype': 'np.uint32'}), '(rgb_array[:, :, 0], dtype=np.uint32)\n', (6328, 6365), True, 'import numpy as np\n'), ((6386, 6431), 'numpy.array', 'np.array', (['rgb_array[:, :, 1]'], {'dtype': 'np.uint32'}), '(rgb_array[:, :, 1], dtype=np.uint32)\n', (6394, 6431), True, 'import numpy as np\n'), ((6451, 6496), 'numpy.array', 'np.array', (['rgb_array[:, :, 2]'], {'dtype': 'np.uint32'}), '(rgb_array[:, :, 2], dtype=np.uint32)\n', (6459, 6496), True, 'import numpy as np\n'), ((2529, 2556), 'rastervision.core.box.Box', 'Box', (['ymin', 'xmin', 'ymax', 'xmax'], {}), '(ymin, xmin, ymax, xmax)\n', (2532, 2556), False, 'from rastervision.core.box import Box\n')] |
import collections
import warnings
import numpy as np
import decimal as dc
import nengo.utils.numpy as npext
from nengo.builder.builder import Builder
from nengo.builder.operator import Copy, DotInc, Reset, SimNoise
from nengo.dists import Distribution
from nengo.ensemble import Ensemble
from nengo.neurons import Direct
from nengo.rc import rc
from nengo.utils.builder import default_n_eval_points
BuiltEnsemble = collections.namedtuple(
'BuiltEnsemble', ['eval_points', 'encoders', 'intercepts', 'max_rates',
'scaled_encoders', 'gain', 'bias'])
def sample(dist, n_samples, rng):
if isinstance(dist, Distribution):
return dist.sample(n_samples, rng=rng)
return np.array(dist)
def gen_eval_points(ens, eval_points, rng, scale_eval_points=True):
if isinstance(eval_points, Distribution):
n_points = ens.n_eval_points
if n_points is None:
n_points = default_n_eval_points(ens.n_neurons, ens.dimensions)
eval_points = eval_points.sample(n_points, ens.dimensions, rng)
else:
if (ens.n_eval_points is not None
and eval_points.shape[0] != ens.n_eval_points):
warnings.warn("Number of eval_points doesn't match "
"n_eval_points. Ignoring n_eval_points.")
eval_points = np.array(eval_points, dtype=rc.get('precision', 'dtype'))
if scale_eval_points:
eval_points *= ens.radius # scale by ensemble radius
return eval_points
@Builder.register(Ensemble) # noqa: C901
def build_ensemble(model, ens):
# Create random number generator
rng = np.random.RandomState(model.seeds[ens])
dtype = rc.get('precision', 'dtype')
eval_points = gen_eval_points(ens, ens.eval_points, rng=rng)
# Set up signal
model.sig[ens]['in'] = model.Signal(
npext.castDecimal(np.zeros(ens.dimensions)), name="%s.signal" % ens)
model.add_op(Reset(model.sig[ens]['in']))
# Set up encoders
if isinstance(ens.neuron_type, Direct):
encoders = np.identity(ens.dimensions)
elif isinstance(ens.encoders, Distribution):
encoders = ens.encoders.sample(ens.n_neurons, ens.dimensions, rng=rng)
encoders = np.asarray(encoders, dtype=dtype)
else:
encoders = npext.array(ens.encoders, min_dims=2, dtype=dtype)
encoders = np.array([[dc.Decimal(p) for p in row] for row in encoders])
encoders /= npext.norm(encoders, axis=1, keepdims=True)
# Determine max_rates and intercepts
max_rates = sample(ens.max_rates, ens.n_neurons, rng=rng)
intercepts = sample(ens.intercepts, ens.n_neurons, rng=rng)
# Build the neurons
if ens.gain is not None and ens.bias is not None:
gain = sample(ens.gain, ens.n_neurons, rng=rng)
bias = sample(ens.bias, ens.n_neurons, rng=rng)
elif ens.gain is not None or ens.bias is not None:
# TODO: handle this instead of error
raise NotImplementedError("gain or bias set for %s, but not both. "
"Solving for one given the other is not "
"implemented yet." % ens)
else:
gain, bias = ens.neuron_type.gain_bias(max_rates, intercepts)
gain = np.array([dc.Decimal(p) for p in gain])
bias = np.array([dc.Decimal(p) for p in bias])
if isinstance(ens.neuron_type, Direct):
model.sig[ens.neurons]['in'] = model.Signal(
npext.castDecimal(np.zeros(ens.dimensions)), name='%s.neuron_in' % ens)
model.sig[ens.neurons]['out'] = model.sig[ens.neurons]['in']
model.add_op(Reset(model.sig[ens.neurons]['in']))
else:
model.sig[ens.neurons]['in'] = model.Signal(
npext.castDecimal(np.zeros(ens.n_neurons)), name="%s.neuron_in" % ens)
model.sig[ens.neurons]['out'] = model.Signal(
npext.castDecimal(np.zeros(ens.n_neurons)), name="%s.neuron_out" % ens)
model.add_op(Copy(src=model.Signal(bias, name="%s.bias" % ens),
dst=model.sig[ens.neurons]['in']))
# This adds the neuron's operator and sets other signals
model.build(ens.neuron_type, ens.neurons)
# Scale the encoders
if isinstance(ens.neuron_type, Direct):
scaled_encoders = encoders
else:
scaled_encoders = encoders * (gain / dc.Decimal(ens.radius))[:, np.newaxis]
model.sig[ens]['encoders'] = model.Signal(
scaled_encoders, name="%s.scaled_encoders" % ens)
# Inject noise if specified
if ens.noise is not None:
model.add_op(SimNoise(model.sig[ens.neurons]['in'], ens.noise))
# Create output signal, using built Neurons
model.add_op(DotInc(
model.sig[ens]['encoders'],
model.sig[ens]['in'],
model.sig[ens.neurons]['in'],
tag="%s encoding" % ens))
# Output is neural output
model.sig[ens]['out'] = model.sig[ens.neurons]['out']
model.params[ens] = BuiltEnsemble(eval_points=eval_points,
encoders=encoders,
intercepts=intercepts,
max_rates=max_rates,
scaled_encoders=scaled_encoders,
gain=gain,
bias=bias)
| [
"nengo.rc.rc.get",
"decimal.Decimal",
"nengo.builder.operator.Reset",
"numpy.asarray",
"numpy.zeros",
"numpy.identity",
"numpy.random.RandomState",
"nengo.builder.operator.DotInc",
"nengo.utils.numpy.norm",
"numpy.array",
"nengo.builder.builder.Builder.register",
"collections.namedtuple",
"n... | [((420, 554), 'collections.namedtuple', 'collections.namedtuple', (['"""BuiltEnsemble"""', "['eval_points', 'encoders', 'intercepts', 'max_rates', 'scaled_encoders',\n 'gain', 'bias']"], {}), "('BuiltEnsemble', ['eval_points', 'encoders',\n 'intercepts', 'max_rates', 'scaled_encoders', 'gain', 'bias'])\n", (442, 554), False, 'import collections\n'), ((1500, 1526), 'nengo.builder.builder.Builder.register', 'Builder.register', (['Ensemble'], {}), '(Ensemble)\n', (1516, 1526), False, 'from nengo.builder.builder import Builder\n'), ((711, 725), 'numpy.array', 'np.array', (['dist'], {}), '(dist)\n', (719, 725), True, 'import numpy as np\n'), ((1620, 1659), 'numpy.random.RandomState', 'np.random.RandomState', (['model.seeds[ens]'], {}), '(model.seeds[ens])\n', (1641, 1659), True, 'import numpy as np\n'), ((1672, 1700), 'nengo.rc.rc.get', 'rc.get', (['"""precision"""', '"""dtype"""'], {}), "('precision', 'dtype')\n", (1678, 1700), False, 'from nengo.rc import rc\n'), ((2419, 2462), 'nengo.utils.numpy.norm', 'npext.norm', (['encoders'], {'axis': '(1)', 'keepdims': '(True)'}), '(encoders, axis=1, keepdims=True)\n', (2429, 2462), True, 'import nengo.utils.numpy as npext\n'), ((1923, 1950), 'nengo.builder.operator.Reset', 'Reset', (["model.sig[ens]['in']"], {}), "(model.sig[ens]['in'])\n", (1928, 1950), False, 'from nengo.builder.operator import Copy, DotInc, Reset, SimNoise\n'), ((2038, 2065), 'numpy.identity', 'np.identity', (['ens.dimensions'], {}), '(ens.dimensions)\n', (2049, 2065), True, 'import numpy as np\n'), ((4663, 4779), 'nengo.builder.operator.DotInc', 'DotInc', (["model.sig[ens]['encoders']", "model.sig[ens]['in']", "model.sig[ens.neurons]['in']"], {'tag': "('%s encoding' % ens)"}), "(model.sig[ens]['encoders'], model.sig[ens]['in'], model.sig[ens.\n neurons]['in'], tag='%s encoding' % ens)\n", (4669, 4779), False, 'from nengo.builder.operator import Copy, DotInc, Reset, SimNoise\n'), ((931, 983), 'nengo.utils.builder.default_n_eval_points', 'default_n_eval_points', (['ens.n_neurons', 'ens.dimensions'], {}), '(ens.n_neurons, ens.dimensions)\n', (952, 983), False, 'from nengo.utils.builder import default_n_eval_points\n'), ((1184, 1285), 'warnings.warn', 'warnings.warn', (['"""Number of eval_points doesn\'t match n_eval_points. Ignoring n_eval_points."""'], {}), '(\n "Number of eval_points doesn\'t match n_eval_points. Ignoring n_eval_points."\n )\n', (1197, 1285), False, 'import warnings\n'), ((1855, 1879), 'numpy.zeros', 'np.zeros', (['ens.dimensions'], {}), '(ens.dimensions)\n', (1863, 1879), True, 'import numpy as np\n'), ((2213, 2246), 'numpy.asarray', 'np.asarray', (['encoders'], {'dtype': 'dtype'}), '(encoders, dtype=dtype)\n', (2223, 2246), True, 'import numpy as np\n'), ((2276, 2326), 'nengo.utils.numpy.array', 'npext.array', (['ens.encoders'], {'min_dims': '(2)', 'dtype': 'dtype'}), '(ens.encoders, min_dims=2, dtype=dtype)\n', (2287, 2326), True, 'import nengo.utils.numpy as npext\n'), ((3236, 3249), 'decimal.Decimal', 'dc.Decimal', (['p'], {}), '(p)\n', (3246, 3249), True, 'import decimal as dc\n'), ((3287, 3300), 'decimal.Decimal', 'dc.Decimal', (['p'], {}), '(p)\n', (3297, 3300), True, 'import decimal as dc\n'), ((3588, 3623), 'nengo.builder.operator.Reset', 'Reset', (["model.sig[ens.neurons]['in']"], {}), "(model.sig[ens.neurons]['in'])\n", (3593, 3623), False, 'from nengo.builder.operator import Copy, DotInc, Reset, SimNoise\n'), ((4546, 4595), 'nengo.builder.operator.SimNoise', 'SimNoise', (["model.sig[ens.neurons]['in']", 'ens.noise'], {}), "(model.sig[ens.neurons]['in'], ens.noise)\n", (4554, 4595), False, 'from nengo.builder.operator import Copy, DotInc, Reset, SimNoise\n'), ((1355, 1383), 'nengo.rc.rc.get', 'rc.get', (['"""precision"""', '"""dtype"""'], {}), "('precision', 'dtype')\n", (1361, 1383), False, 'from nengo.rc import rc\n'), ((2353, 2366), 'decimal.Decimal', 'dc.Decimal', (['p'], {}), '(p)\n', (2363, 2366), True, 'import decimal as dc\n'), ((3444, 3468), 'numpy.zeros', 'np.zeros', (['ens.dimensions'], {}), '(ens.dimensions)\n', (3452, 3468), True, 'import numpy as np\n'), ((3718, 3741), 'numpy.zeros', 'np.zeros', (['ens.n_neurons'], {}), '(ens.n_neurons)\n', (3726, 3741), True, 'import numpy as np\n'), ((3855, 3878), 'numpy.zeros', 'np.zeros', (['ens.n_neurons'], {}), '(ens.n_neurons)\n', (3863, 3878), True, 'import numpy as np\n'), ((4317, 4339), 'decimal.Decimal', 'dc.Decimal', (['ens.radius'], {}), '(ens.radius)\n', (4327, 4339), True, 'import decimal as dc\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# IIR testing
# model for multiplier less IIR filters with feedback close to 1
# second order is just two first order concatinated
# you can multiply with (1-2**-l) easily by bitshifting and subtracting in hardware
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from dsp_fpga_lib import zplane
f_s=900E3 # sample freq
max=24
fc=np.empty(max) # -6 dB cutoff freqs
TC=np.empty(max) # time constants
# go through all the powers of two
for l in range(max):
# H(z)=B/A=2**-l/(z-(1-2**l)) # first order
b=[2**-l,0] # first order
a=[1,-(1-(2**-l))] # first order
# b=[2**-(2*l),0,0] # second order
# a=[1,-2*(1-2**-l),(1-2**-l)**2] # second order
plt.figure(0)
flog=np.logspace(-8,1,1000)
[f,Hf]=signal.freqz(b,a,flog)
Hf=20*np.log10(abs(Hf))
fHz=(f/(2*np.pi))*f_s
plt.plot(fHz,Hf)
plt.figure(1)
zplane(b,a)
fc[l]=fHz[np.argmin(abs(Hf+3))]
TC[l]=1/(2*np.pi*fc[l])
print(f"2^{l}: fc= {fc[l]}Hz TC={TC[l]} sec ")
plt.figure(0)
plt.minorticks_on()
plt.grid(b=True,which='both',axis='both')
plt.xscale('log')
plt.xlim(10E-4,450E3)
plt.show() | [
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.xlim",
"dsp_fpga_lib.zplane",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.empty",
"numpy.logspace",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.minorticks_on",
"scipy.signal.freqz",
"matplotlib.pyplot.grid"
] | [((422, 435), 'numpy.empty', 'np.empty', (['max'], {}), '(max)\n', (430, 435), True, 'import numpy as np\n'), ((463, 476), 'numpy.empty', 'np.empty', (['max'], {}), '(max)\n', (471, 476), True, 'import numpy as np\n'), ((1132, 1145), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (1142, 1145), True, 'import matplotlib.pyplot as plt\n'), ((1146, 1165), 'matplotlib.pyplot.minorticks_on', 'plt.minorticks_on', ([], {}), '()\n', (1163, 1165), True, 'import matplotlib.pyplot as plt\n'), ((1167, 1210), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)', 'which': '"""both"""', 'axis': '"""both"""'}), "(b=True, which='both', axis='both')\n", (1175, 1210), True, 'import matplotlib.pyplot as plt\n'), ((1209, 1226), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (1219, 1226), True, 'import matplotlib.pyplot as plt\n'), ((1227, 1252), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0.001)', '(450000.0)'], {}), '(0.001, 450000.0)\n', (1235, 1252), True, 'import matplotlib.pyplot as plt\n'), ((1253, 1263), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1261, 1263), True, 'import matplotlib.pyplot as plt\n'), ((817, 830), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (827, 830), True, 'import matplotlib.pyplot as plt\n'), ((840, 864), 'numpy.logspace', 'np.logspace', (['(-8)', '(1)', '(1000)'], {}), '(-8, 1, 1000)\n', (851, 864), True, 'import numpy as np\n'), ((874, 898), 'scipy.signal.freqz', 'signal.freqz', (['b', 'a', 'flog'], {}), '(b, a, flog)\n', (886, 898), False, 'from scipy import signal\n'), ((955, 972), 'matplotlib.pyplot.plot', 'plt.plot', (['fHz', 'Hf'], {}), '(fHz, Hf)\n', (963, 972), True, 'import matplotlib.pyplot as plt\n'), ((976, 989), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (986, 989), True, 'import matplotlib.pyplot as plt\n'), ((994, 1006), 'dsp_fpga_lib.zplane', 'zplane', (['b', 'a'], {}), '(b, a)\n', (1000, 1006), False, 'from dsp_fpga_lib import zplane\n')] |
import json
import numpy as np
import os
from os import path
from typing import List, Tuple, Dict, Optional, Union
import autoarray as aa
class PointDataset:
def __init__(
self,
name: str,
positions: Union[aa.Grid2DIrregular, List[List], List[Tuple]],
positions_noise_map: Union[aa.ValuesIrregular, List[float]],
fluxes: Optional[Union[aa.ValuesIrregular, List[float]]] = None,
fluxes_noise_map: Optional[Union[aa.ValuesIrregular, List[float]]] = None,
):
"""
A collection of the data component that can be used for point-source model-fitting, for example fitting the
observed positions of a a strongly lensed quasar or supernovae or in strong lens cluster modeling, where
there may be many tens or hundreds of individual source galaxies each of which are modeled as a point source.
The name of the dataset is required for point-source model-fitting, as it pairs a point-source dataset with
its corresponding point-source in the model-fit. For example, if a dataset has the name `source_1`, it will
be paired with the `Point` model-component which has the name `source_1`. If a dataset component is not
successfully paired with a model-component, an error is raised.
Parameters
----------
name
The name of the point source dataset which is paired to a `Point` in the `Model`.
positions
The image-plane (y,x) positions of the point-source.
positions_noise_map
The noise-value of every (y,x) position, which is typically the pixel-scale of the data.
fluxes
The image-plane flux of each observed point-source of light.
fluxes_noise_map
The noise-value of every observed flux.
"""
self.name = name
if not isinstance(positions, aa.Grid2DIrregular):
positions = aa.Grid2DIrregular(grid=positions)
self.positions = positions
if not isinstance(positions_noise_map, aa.ValuesIrregular):
positions_noise_map = aa.ValuesIrregular(values=positions_noise_map)
self.positions_noise_map = positions_noise_map
if fluxes is not None:
if not isinstance(fluxes, aa.ValuesIrregular):
fluxes = aa.ValuesIrregular(values=fluxes)
self.fluxes = fluxes
if fluxes_noise_map is not None:
if not isinstance(fluxes_noise_map, aa.ValuesIrregular):
fluxes_noise_map = aa.ValuesIrregular(values=fluxes_noise_map)
self.fluxes_noise_map = fluxes_noise_map
@property
def dict(self) -> dict:
"""
A dictionary representation of this instance.
Arrays are represented as lists or lists of lists.
"""
return {
"name": self.name,
"positions": list(map(list, np.round(self.positions, 4))),
"positions_noise_map": list(self.positions_noise_map),
"fluxes": list(np.round(self.fluxes, 4))
if self.fluxes is not None
else None,
"fluxes_noise_map": list(self.fluxes_noise_map)
if self.fluxes_noise_map is not None
else None,
}
@classmethod
def from_dict(cls, dict_: dict) -> "PointDataset":
"""
Create a point source dataset from a dictionary representation.
Parameters
----------
dict_
A dictionary. Arrays are represented as lists or lists of lists.
Returns
-------
An instance
"""
return cls(
name=dict_["name"],
positions=aa.Grid2DIrregular(dict_["positions"]),
positions_noise_map=aa.ValuesIrregular(dict_["positions_noise_map"]),
fluxes=aa.ValuesIrregular(dict_["fluxes"])
if dict_["fluxes"] is not None
else None,
fluxes_noise_map=aa.ValuesIrregular(dict_["fluxes_noise_map"])
if dict_["fluxes_noise_map"] is not None
else None,
)
class PointDict(dict):
def __init__(self, point_dataset_list: List[PointDataset]):
"""
A dictionary containing the entire point-source dataset, which could be many instances of
the `PointDataset` object.
This dictionary uses the `name` of the `PointDataset` to act as the key of every entry of the dictionary,
making it straight forward to access the attributes based on the dataset name.
Parameters
----------
point_dataset_list
A list of all point-source datasets that are to be added to the point-source dictionary.
Returns
-------
Dict[PointDataset]
A dictionary where the keys are the `name` entries of each `PointDataset` and the values are
the corresponding instance of the `PointDataset` class.
"""
super().__init__()
for point_dataset in point_dataset_list:
self[point_dataset.name] = point_dataset
@property
def positions_list(self):
return [point_dataset.positions for keys, point_dataset in self.items()]
@property
def dicts(self) -> List[dict]:
"""
A list of dictionaries representing this collection of point source
datasets.
"""
return [dataset.dict for dataset in self.values()]
@classmethod
def from_dicts(cls, dicts: List[dict]) -> List[PointDataset]:
"""
Create an instance from a list of dictionaries.
Parameters
----------
dicts
Dictionaries, each representing one point source dataset.
Returns
-------
A collection of point source datasets.
"""
return cls(map(PointDataset.from_dict, dicts))
@classmethod
def from_json(cls, file_path):
with open(file_path) as infile:
dicts = json.load(infile)
return cls.from_dicts(dicts=dicts)
def output_to_json(self, file_path, overwrite=False):
file_dir = os.path.split(file_path)[0]
if not path.exists(file_dir):
os.makedirs(file_dir)
if overwrite and path.exists(file_path):
os.remove(file_path)
elif not overwrite and path.exists(file_path):
raise FileExistsError(
"The file ",
file_path,
" already exists. Set overwrite=True to overwrite this" "file",
)
with open(file_path, "w+") as f:
json.dump(self.dicts, f, indent=4)
| [
"autoarray.ValuesIrregular",
"os.remove",
"json.load",
"json.dump",
"os.makedirs",
"autoarray.Grid2DIrregular",
"os.path.exists",
"os.path.split",
"numpy.round"
] | [((1980, 2014), 'autoarray.Grid2DIrregular', 'aa.Grid2DIrregular', ([], {'grid': 'positions'}), '(grid=positions)\n', (1998, 2014), True, 'import autoarray as aa\n'), ((2159, 2205), 'autoarray.ValuesIrregular', 'aa.ValuesIrregular', ([], {'values': 'positions_noise_map'}), '(values=positions_noise_map)\n', (2177, 2205), True, 'import autoarray as aa\n'), ((6126, 6143), 'json.load', 'json.load', (['infile'], {}), '(infile)\n', (6135, 6143), False, 'import json\n'), ((6273, 6297), 'os.path.split', 'os.path.split', (['file_path'], {}), '(file_path)\n', (6286, 6297), False, 'import os\n'), ((6319, 6340), 'os.path.exists', 'path.exists', (['file_dir'], {}), '(file_dir)\n', (6330, 6340), False, 'from os import path\n'), ((6355, 6376), 'os.makedirs', 'os.makedirs', (['file_dir'], {}), '(file_dir)\n', (6366, 6376), False, 'import os\n'), ((6405, 6427), 'os.path.exists', 'path.exists', (['file_path'], {}), '(file_path)\n', (6416, 6427), False, 'from os import path\n'), ((6442, 6462), 'os.remove', 'os.remove', (['file_path'], {}), '(file_path)\n', (6451, 6462), False, 'import os\n'), ((6766, 6800), 'json.dump', 'json.dump', (['self.dicts', 'f'], {'indent': '(4)'}), '(self.dicts, f, indent=4)\n', (6775, 6800), False, 'import json\n'), ((2384, 2417), 'autoarray.ValuesIrregular', 'aa.ValuesIrregular', ([], {'values': 'fluxes'}), '(values=fluxes)\n', (2402, 2417), True, 'import autoarray as aa\n'), ((2600, 2643), 'autoarray.ValuesIrregular', 'aa.ValuesIrregular', ([], {'values': 'fluxes_noise_map'}), '(values=fluxes_noise_map)\n', (2618, 2643), True, 'import autoarray as aa\n'), ((3783, 3821), 'autoarray.Grid2DIrregular', 'aa.Grid2DIrregular', (["dict_['positions']"], {}), "(dict_['positions'])\n", (3801, 3821), True, 'import autoarray as aa\n'), ((3856, 3904), 'autoarray.ValuesIrregular', 'aa.ValuesIrregular', (["dict_['positions_noise_map']"], {}), "(dict_['positions_noise_map'])\n", (3874, 3904), True, 'import autoarray as aa\n'), ((6495, 6517), 'os.path.exists', 'path.exists', (['file_path'], {}), '(file_path)\n', (6506, 6517), False, 'from os import path\n'), ((2976, 3003), 'numpy.round', 'np.round', (['self.positions', '(4)'], {}), '(self.positions, 4)\n', (2984, 3003), True, 'import numpy as np\n'), ((3103, 3127), 'numpy.round', 'np.round', (['self.fluxes', '(4)'], {}), '(self.fluxes, 4)\n', (3111, 3127), True, 'import numpy as np\n'), ((3926, 3961), 'autoarray.ValuesIrregular', 'aa.ValuesIrregular', (["dict_['fluxes']"], {}), "(dict_['fluxes'])\n", (3944, 3961), True, 'import autoarray as aa\n'), ((4060, 4105), 'autoarray.ValuesIrregular', 'aa.ValuesIrregular', (["dict_['fluxes_noise_map']"], {}), "(dict_['fluxes_noise_map'])\n", (4078, 4105), True, 'import autoarray as aa\n')] |
# -*- coding: utf-8 -*-
"""
@date Created on Tue Mar 24 2020
@author martin_g for Eomys
"""
# Standard library imports
# Third party imports
import numpy as np
from scipy.io import wavfile
# Local application imports
from mosqito.generic.oct3spec import oct3spec
from mosqito.generic.calc_third_octave_levels import calc_third_octave_levels
def wav_to_oct3(file, calib=1, out_type='overall'):
"""Load .wav signal and output its third-octave band spectrum
Parameters
----------
file : string
full path to the signal file
calib : float
calibration factor for the signal to be in [pa]
out_type : str
determine the format of the output
- overall: overall rms value per third octave band
- time: rms value per third octave versus time (temporal
resolution = 0.5ms)
- time_iso: squared and smoothed value per third octave
versus time, ISO 532-1 implementation (temporal
resolution = 0.5ms)
Outputs
-------
spec : numpy.ndarray
Third octave band spectrum of signal sig [dB re.2e-5 Pa]
fpref : numpy.ndarray
Corresponding preferred third octave band center frequencies
"""
# TODO: Manage float32 wav file format
# TODO: Manage fs != 48000 Hz
fs, sig = wavfile.read(file)
if isinstance(sig[0], np.int16):
sig = calib * sig / (2 ** 15 - 1)
elif isinstance(sig[0], np.int32):
sig = calib * sig / (2 ** 31 - 1)
if out_type == 'overall':
spec, freq = oct3spec(sig, fs, 25, 12500, sig_type='stationary')
elif out_type == 'time':
dec_factor = int(fs / 2000)
spec, freq = oct3spec(sig, fs, 25, 12500, sig_type='time_varying', dec_factor=24)
elif out_type == 'time_iso':
spec = calc_third_octave_levels(sig,fs)
freq = np.array(
[
25,
31.5,
40,
50,
63,
80,
100,
125,
160,
200,
250,
315,
400,
500,
630,
800,
1000,
1250,
1600,
2000,
2500,
3150,
4000,
5000,
6300,
8000,
10000,
12500,
]
)
return np.squeeze(spec), freq
| [
"scipy.io.wavfile.read",
"mosqito.generic.calc_third_octave_levels.calc_third_octave_levels",
"numpy.array",
"numpy.squeeze",
"mosqito.generic.oct3spec.oct3spec"
] | [((1310, 1328), 'scipy.io.wavfile.read', 'wavfile.read', (['file'], {}), '(file)\n', (1322, 1328), False, 'from scipy.io import wavfile\n'), ((1541, 1592), 'mosqito.generic.oct3spec.oct3spec', 'oct3spec', (['sig', 'fs', '(25)', '(12500)'], {'sig_type': '"""stationary"""'}), "(sig, fs, 25, 12500, sig_type='stationary')\n", (1549, 1592), False, 'from mosqito.generic.oct3spec import oct3spec\n'), ((2377, 2393), 'numpy.squeeze', 'np.squeeze', (['spec'], {}), '(spec)\n', (2387, 2393), True, 'import numpy as np\n'), ((1679, 1747), 'mosqito.generic.oct3spec.oct3spec', 'oct3spec', (['sig', 'fs', '(25)', '(12500)'], {'sig_type': '"""time_varying"""', 'dec_factor': '(24)'}), "(sig, fs, 25, 12500, sig_type='time_varying', dec_factor=24)\n", (1687, 1747), False, 'from mosqito.generic.oct3spec import oct3spec\n'), ((1796, 1829), 'mosqito.generic.calc_third_octave_levels.calc_third_octave_levels', 'calc_third_octave_levels', (['sig', 'fs'], {}), '(sig, fs)\n', (1820, 1829), False, 'from mosqito.generic.calc_third_octave_levels import calc_third_octave_levels\n'), ((1844, 2013), 'numpy.array', 'np.array', (['[25, 31.5, 40, 50, 63, 80, 100, 125, 160, 200, 250, 315, 400, 500, 630, 800,\n 1000, 1250, 1600, 2000, 2500, 3150, 4000, 5000, 6300, 8000, 10000, 12500]'], {}), '([25, 31.5, 40, 50, 63, 80, 100, 125, 160, 200, 250, 315, 400, 500,\n 630, 800, 1000, 1250, 1600, 2000, 2500, 3150, 4000, 5000, 6300, 8000, \n 10000, 12500])\n', (1852, 2013), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.