code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""The tournament module decides which pmems to
pick from the ring in order to apply updates
to the population."""
import numpy as np
from kaplan.ring import RingEmptyError
from kaplan.mutations import generate_children
def run_tournament(t_size, num_muts, num_swaps, ring,
current_mev):
"""Run the tournament (i.e. a mating event).
Parameters
----------
t_size : int
Number of pmems to choose for the
tournament.
num_muts : int
Maximum number of mutations to apply
to each newly generated pmem.
num_swaps : int
Maximum number of swaps to do between
newly generated pmems.
ring : object
Ring object.
current_mev : int
The current mating event number. Used
to give pmems birthdays.
Returns
-------
None
"""
# check ring has enough pmems for a tournament
if t_size > ring.num_filled:
raise RingEmptyError("Not enough pmems to run a tournament.")
# choose random slots for a tournament
selected_pmems = select_pmems(t_size, ring)
print("chosen pmems:", selected_pmems)
# select parents by fitness
parents = select_parents(selected_pmems, ring)
parent1 = ring[parents[0]].dihedrals
parent2 = ring[parents[1]].dihedrals
# generate children
children = generate_children(parent1, parent2, num_muts, num_swaps)
# put children in ring
ring.update(parents[0], children[1], current_mev)
ring.update(parents[1], children[0], current_mev)
def select_pmems(number, ring):
"""Randomly selected pmems.
Parameters
----------
number : int
How many pmems to pick.
ring : object
Note
----
Maybe move this to the ring as a method. Or turn
into a generator (calling next on the ring returns
a random pmem).
"""
selection = []
while len(selection) < number:
# choose random slot
choice = np.random.randint(0, ring.num_slots)
# add slot to selection if its non-empty
if ring[choice]:
selection.append(choice)
return selection
def select_parents(selected_pmems, ring):
"""Sort pmems by fitness values, choose best 2.
Parameters
----------
selected_pmems : list
The indices of the ring that are in the tournament.
ring : object
Instance of Ring class.
Returns
-------
tuple : int,int
Indices of two best pmems to be used as parents.
"""
fit_vals = np.array([ring[i].fitness for i in selected_pmems])
print(fit_vals)
# from here:
# https://stackoverflow.com/questions/6910641/how-do-i-get-indices-of-n-maximum-values-in-a-numpy-array
# use numpy to get the two best fitness value indices
# from the list and link it to ring index
# note: this makes generator object
parents_gen = (selected_pmems[parent] for parent in np.argpartition(fit_vals, -2)[-2:])
parents = [next(parents_gen)]
parents.append(next(parents_gen))
print(parents)
return parents
| [
"kaplan.ring.RingEmptyError",
"kaplan.mutations.generate_children",
"numpy.argpartition",
"numpy.random.randint",
"numpy.array"
] | [((1348, 1404), 'kaplan.mutations.generate_children', 'generate_children', (['parent1', 'parent2', 'num_muts', 'num_swaps'], {}), '(parent1, parent2, num_muts, num_swaps)\n', (1365, 1404), False, 'from kaplan.mutations import generate_children\n'), ((2520, 2571), 'numpy.array', 'np.array', (['[ring[i].fitness for i in selected_pmems]'], {}), '([ring[i].fitness for i in selected_pmems])\n', (2528, 2571), True, 'import numpy as np\n'), ((949, 1004), 'kaplan.ring.RingEmptyError', 'RingEmptyError', (['"""Not enough pmems to run a tournament."""'], {}), "('Not enough pmems to run a tournament.')\n", (963, 1004), False, 'from kaplan.ring import RingEmptyError\n'), ((1961, 1997), 'numpy.random.randint', 'np.random.randint', (['(0)', 'ring.num_slots'], {}), '(0, ring.num_slots)\n', (1978, 1997), True, 'import numpy as np\n'), ((2917, 2946), 'numpy.argpartition', 'np.argpartition', (['fit_vals', '(-2)'], {}), '(fit_vals, -2)\n', (2932, 2946), True, 'import numpy as np\n')] |
import chainer
import numpy as np
import models
def main():
model = models.load_resnet50()
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
x = np.zeros((1, 3, model.insize, model.insize), dtype=np.float32)
t = np.zeros((1,), dtype=np.int32)
optimizer.update(model, x, t)
if __name__ == '__main__':
main()
| [
"models.load_resnet50",
"chainer.optimizers.Adam",
"numpy.zeros"
] | [((75, 97), 'models.load_resnet50', 'models.load_resnet50', ([], {}), '()\n', (95, 97), False, 'import models\n'), ((114, 139), 'chainer.optimizers.Adam', 'chainer.optimizers.Adam', ([], {}), '()\n', (137, 139), False, 'import chainer\n'), ((176, 238), 'numpy.zeros', 'np.zeros', (['(1, 3, model.insize, model.insize)'], {'dtype': 'np.float32'}), '((1, 3, model.insize, model.insize), dtype=np.float32)\n', (184, 238), True, 'import numpy as np\n'), ((247, 277), 'numpy.zeros', 'np.zeros', (['(1,)'], {'dtype': 'np.int32'}), '((1,), dtype=np.int32)\n', (255, 277), True, 'import numpy as np\n')] |
import os
import math
import argparse
import numpy as np
from black_box import FourierBlackBox
from bayes_opt import BayesianOptimization
from QuantumAnnealing.Three_SAT import get_3sat_problem
from QuantumAnnealing.GroverSearch import get_gs_problem
from tqdm import tqdm
def get_split(param_list):
value_list = sorted(param['target'] for param in param_list)
return value_list[len(value_list) // 2]
def save_results(param_list, path):
param_array = []
for param in param_list:
param_array.append([param['target']] + list(param['params'].values()))
param_array = np.array(param_array)
np.save(path, param_array)
parser = argparse.ArgumentParser()
parser.add_argument('--n_qubit', type=int, default=8)
parser.add_argument('--cutoff', type=int, default=6)
parser.add_argument('--time_final', type=float, default=62.2)
parser.add_argument('--time_step', type=float, default=1)
parser.add_argument('--pround', type=float, default=0.1)
parser.add_argument('--n_sample', type=int, default=10)
parser.add_argument('--n_point', type=int, default=1024)
parser.add_argument('--output_dir', type=str, default='output')
args = parser.parse_args()
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
black_box_class = FourierBlackBox(get_3sat_problem,
n_qubit=args.n_qubit,
cutoff=args.cutoff,
time_final=args.time_final,
time_step=args.time_step,
pround=(-args.pround, args.pround),
num_sample=args.n_sample)
print('Gaussian process started.')
optimizer = BayesianOptimization(black_box_class.black_box_reward,
black_box_class.prounds,
verbose=2)
optimizer.probe([0] * black_box_class.cutoff)
optimizer.maximize(init_points=0, n_iter=args.n_point-1)
print('Gaussian process completed.')
param_list = optimizer.res.copy()
save_results(param_list, os.path.join(args.output_dir, 'Gaussian_Process.npy'))
for i in range(len(param_list)):
param_list[i]['id'] = i
num_iter = int(math.ceil(math.log2(len(param_list))))
per_round_budget = int(math.ceil(len(param_list) / num_iter))
for i in range(1, num_iter + 1):
split_num = get_split(param_list)
n_hyperband_sample = int(math.ceil(2 * per_round_budget / len(param_list)))
new_param_list = []
pbar = tqdm(total=n_hyperband_sample*len(param_list)//2, dynamic_ncols=True)
pbar.set_description("Step {:02d} | Threshold = {:.4f}".format(i, split_num))
for param in param_list:
if param['target'] >= split_num:
reward = 0
for _ in range(n_hyperband_sample):
reward += black_box_class.black_box_reward(**param['params'])
pbar.update()
reward /= n_hyperband_sample
param['target'] = reward
new_param_list.append(param)
pbar.close()
param_list = new_param_list
save_results(param_list, os.path.join(args.output_dir, 'Multi-arm_Bandit_%d') % i)
print('Multi-arm bandit completed.')
print(param_list[0])
| [
"os.mkdir",
"numpy.save",
"argparse.ArgumentParser",
"bayes_opt.BayesianOptimization",
"black_box.FourierBlackBox",
"os.path.exists",
"numpy.array",
"os.path.join"
] | [((657, 682), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (680, 682), False, 'import argparse\n'), ((1261, 1461), 'black_box.FourierBlackBox', 'FourierBlackBox', (['get_3sat_problem'], {'n_qubit': 'args.n_qubit', 'cutoff': 'args.cutoff', 'time_final': 'args.time_final', 'time_step': 'args.time_step', 'pround': '(-args.pround, args.pround)', 'num_sample': 'args.n_sample'}), '(get_3sat_problem, n_qubit=args.n_qubit, cutoff=args.cutoff,\n time_final=args.time_final, time_step=args.time_step, pround=(-args.\n pround, args.pround), num_sample=args.n_sample)\n', (1276, 1461), False, 'from black_box import FourierBlackBox\n'), ((1706, 1801), 'bayes_opt.BayesianOptimization', 'BayesianOptimization', (['black_box_class.black_box_reward', 'black_box_class.prounds'], {'verbose': '(2)'}), '(black_box_class.black_box_reward, black_box_class.\n prounds, verbose=2)\n', (1726, 1801), False, 'from bayes_opt import BayesianOptimization\n'), ((594, 615), 'numpy.array', 'np.array', (['param_array'], {}), '(param_array)\n', (602, 615), True, 'import numpy as np\n'), ((620, 646), 'numpy.save', 'np.save', (['path', 'param_array'], {}), '(path, param_array)\n', (627, 646), True, 'import numpy as np\n'), ((1179, 1210), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (1193, 1210), False, 'import os\n'), ((1216, 1241), 'os.mkdir', 'os.mkdir', (['args.output_dir'], {}), '(args.output_dir)\n', (1224, 1241), False, 'import os\n'), ((2065, 2118), 'os.path.join', 'os.path.join', (['args.output_dir', '"""Gaussian_Process.npy"""'], {}), "(args.output_dir, 'Gaussian_Process.npy')\n", (2077, 2118), False, 'import os\n'), ((3084, 3136), 'os.path.join', 'os.path.join', (['args.output_dir', '"""Multi-arm_Bandit_%d"""'], {}), "(args.output_dir, 'Multi-arm_Bandit_%d')\n", (3096, 3136), False, 'import os\n')] |
import os
from PIL import Image
import numpy as np
import lodgepole.image_tools as lit
# The examples are pulled from images taken by the Mars Curiosity Rover.
# https://mars.nasa.gov/msl/multimedia/
training_path = os.path.join("data", "training")
tuning_path = os.path.join("data", "tuning")
evaluation_path = os.path.join("data", "evaluation")
switch_probability = 1 / 100
def load_image(path, imagename, patch_size):
img = np.asarray(Image.open(os.path.join(path, imagename))) / 255
# Convert color images to grayscale
if len(img.shape) == 3:
img = lit.rgb2gray_approx(img)
n_rows, n_cols = img.shape
assert len(img.shape) == 2
assert n_rows > patch_size
assert n_cols > patch_size
# Pad out to a multiple of patch_size
n_rows_pad = int(np.ceil(n_rows / patch_size)) * patch_size
n_cols_pad = int(np.ceil(n_cols / patch_size)) * patch_size
padded = np.pad(img, ((0, n_rows_pad - n_rows), (0, n_cols_pad - n_cols)))
assert np.sum(np.isnan(padded)) == 0
return padded
def pre_load(patch_size):
training_images = []
tuning_images = []
evaluation_images = []
for path, imagelist in zip(
(training_path, tuning_path, evaluation_path),
(training_images, tuning_images, evaluation_images)
):
filenames = os.listdir(path)
imagenames = [f for f in filenames if f[-4:] == ".jpg"]
assert len(imagenames) > 0
for imagename in imagenames:
imagelist.append(load_image(path, imagename, patch_size))
return (training_images, tuning_images, evaluation_images)
def get_data_sets(patch_size=10):
"""
This function creates three other functions that generate data.
One generates a training data set,
one a tuning data set, and the other, an evaluation set.
"""
# Pre-load all the images into memory
training_images, tuning_images, evaluation_images = pre_load(patch_size)
def data_generator(imagelist):
img = None
while True:
# Occasionally switch to a new image
if img is None or np.random.sample() < switch_probability:
img = np.random.choice(imagelist)
n_rows, n_cols = img.shape
i_row = np.random.randint(n_rows - patch_size)
i_col = np.random.randint(n_cols - patch_size)
yield img[i_row: i_row + patch_size, i_col: i_col + patch_size]
return (
data_generator(training_images),
data_generator(tuning_images),
data_generator(evaluation_images)
)
if __name__ == "__main__":
training_set, tuning_set, evaluation_set = get_data_sets()
for _ in range(1000):
print(next(training_set))
| [
"numpy.pad",
"numpy.ceil",
"numpy.isnan",
"numpy.random.randint",
"numpy.random.choice",
"os.path.join",
"os.listdir",
"lodgepole.image_tools.rgb2gray_approx",
"numpy.random.sample"
] | [((218, 250), 'os.path.join', 'os.path.join', (['"""data"""', '"""training"""'], {}), "('data', 'training')\n", (230, 250), False, 'import os\n'), ((265, 295), 'os.path.join', 'os.path.join', (['"""data"""', '"""tuning"""'], {}), "('data', 'tuning')\n", (277, 295), False, 'import os\n'), ((314, 348), 'os.path.join', 'os.path.join', (['"""data"""', '"""evaluation"""'], {}), "('data', 'evaluation')\n", (326, 348), False, 'import os\n'), ((913, 978), 'numpy.pad', 'np.pad', (['img', '((0, n_rows_pad - n_rows), (0, n_cols_pad - n_cols))'], {}), '(img, ((0, n_rows_pad - n_rows), (0, n_cols_pad - n_cols)))\n', (919, 978), True, 'import numpy as np\n'), ((578, 602), 'lodgepole.image_tools.rgb2gray_approx', 'lit.rgb2gray_approx', (['img'], {}), '(img)\n', (597, 602), True, 'import lodgepole.image_tools as lit\n'), ((1317, 1333), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1327, 1333), False, 'import os\n'), ((792, 820), 'numpy.ceil', 'np.ceil', (['(n_rows / patch_size)'], {}), '(n_rows / patch_size)\n', (799, 820), True, 'import numpy as np\n'), ((856, 884), 'numpy.ceil', 'np.ceil', (['(n_cols / patch_size)'], {}), '(n_cols / patch_size)\n', (863, 884), True, 'import numpy as np\n'), ((998, 1014), 'numpy.isnan', 'np.isnan', (['padded'], {}), '(padded)\n', (1006, 1014), True, 'import numpy as np\n'), ((2254, 2292), 'numpy.random.randint', 'np.random.randint', (['(n_rows - patch_size)'], {}), '(n_rows - patch_size)\n', (2271, 2292), True, 'import numpy as np\n'), ((2313, 2351), 'numpy.random.randint', 'np.random.randint', (['(n_cols - patch_size)'], {}), '(n_cols - patch_size)\n', (2330, 2351), True, 'import numpy as np\n'), ((457, 486), 'os.path.join', 'os.path.join', (['path', 'imagename'], {}), '(path, imagename)\n', (469, 486), False, 'import os\n'), ((2162, 2189), 'numpy.random.choice', 'np.random.choice', (['imagelist'], {}), '(imagelist)\n', (2178, 2189), True, 'import numpy as np\n'), ((2099, 2117), 'numpy.random.sample', 'np.random.sample', ([], {}), '()\n', (2115, 2117), True, 'import numpy as np\n')] |
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description='Plotting')
parser.add_argument('--file_name1',default='FINAL_script_local1-20200414_105220.log', type=str,help='path-name of the log file to be read')
parser.add_argument('--file_name2',default='FINAL_script_local5-20200414_105613.log', type=str,help='path-name of the log file to be read')
parser.add_argument('--file_name3',default='FINAL_script_local10-20200414_105643.log', type=str,help='path-name of the log file to be read')
parser.add_argument('--fig_name',default='local-epochs.pdf', type=str, help='output figure path-name')
parser.add_argument('--num_epochs', default=300, type=int, help='number of epochs we want to select our metrics from, we chose 300 in the paper')
args = parser.parse_args()
files = []
for name in [args.file_name1, args.file_name2, args.file_name3]:
with open(name, 'r') as f:
f = f.readlines()
files.append(f)
datasets = ['Market','DukeMTMC-reID','cuhk03-np-detected','cuhk01','MSMT17','viper','prid','3dpes','ilids']
acc_list = []
for f in files:
#Due to convergence issue, We select the best 3 federated models based on Big Dataset Performance and average the Rank-1 Acc&mAP as metrics
max_metrics = [-3,-2,-1]
dic = {item:[] for item in max_metrics}
epoch_count = 0
current_local_count = 0
sum_4_datasets = 0
temp_Rank1 = []
for line in f:
#test frequency=10 and there are 9 datasets to test
if epoch_count==int(args.num_epochs//10)*9:
break
if 'Rank' in line:
for p in [0,1,2,4]:
if datasets[p] in line:
lindex = line.index(':')
sum_4_datasets+=float(line[lindex+1:lindex+9])
lindex = line.index(':')
temp_Rank1.append(float(line[lindex+1:lindex+9]))
epoch_count+=1
current_local_count+=1
if current_local_count==9:
if sum_4_datasets/4>max_metrics[0]:
del dic[max_metrics[0]]
max_metrics[0] = sum_4_datasets/4
max_metrics.sort()
dic[sum_4_datasets/4] = temp_Rank1
sum_4_datasets=0
current_local_count=0
temp_Rank1=[]
arr = np.zeros(9)
for key in dic.keys():
arr+=np.array(dic[key])
arr = arr/3*100
acc_list.append(list(arr))
print(acc_list)
plt.figure(figsize=(20,10))
name_list = ['Market\n-1501','DukeMTMC\n-reID','CUHK03\n-NP','CUHK01','MSMT\n17','VIPeR','PRID\n2011','3DPeS','iLIDS\n-VID']
index = [4,1,0,2,6,3,5,7,8]
for i in range(len(acc_list)):
acc_list[i] = [acc_list[i][j] for j in index]
name_list = [name_list[i] for i in index]
x =list(range(len(index)))
total_width, n = 0.8, 3
width = total_width / n
plt.ylabel('Rank-1 Accuracy (%)', fontsize = 35)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.bar(x, acc_list[0], width=width, label='E=1',fc = '#7b8b6f')
for i in range(len(x)):
x[i] = x[i] + width
plt.bar(x, acc_list[1], width=width, label='E=5',fc = '#faead3')
for i in range(len(x)):
x[i] = x[i] + width
plt.bar(x, acc_list[2], width=width, label='E=10',tick_label = name_list,fc = '#965454')
plt.legend(loc=3,fontsize = 35, bbox_to_anchor = (0.13,-0.3), ncol = 3)
ax = plt.gca()
ax.set_ylim([15,85])
plt.savefig(args.fig_name, bbox_inches = 'tight', dpi = 300, format='pdf')
plt.close() | [
"argparse.ArgumentParser",
"matplotlib.pyplot.close",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.legend",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.s... | [((87, 134), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Plotting"""'}), "(description='Plotting')\n", (110, 134), False, 'import argparse\n'), ((2469, 2497), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (2479, 2497), True, 'import matplotlib.pyplot as plt\n'), ((2849, 2895), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Rank-1 Accuracy (%)"""'], {'fontsize': '(35)'}), "('Rank-1 Accuracy (%)', fontsize=35)\n", (2859, 2895), True, 'import matplotlib.pyplot as plt\n'), ((2898, 2921), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(25)'}), '(fontsize=25)\n', (2908, 2921), True, 'import matplotlib.pyplot as plt\n'), ((2922, 2945), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(25)'}), '(fontsize=25)\n', (2932, 2945), True, 'import matplotlib.pyplot as plt\n'), ((2947, 3010), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'acc_list[0]'], {'width': 'width', 'label': '"""E=1"""', 'fc': '"""#7b8b6f"""'}), "(x, acc_list[0], width=width, label='E=1', fc='#7b8b6f')\n", (2954, 3010), True, 'import matplotlib.pyplot as plt\n'), ((3060, 3123), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'acc_list[1]'], {'width': 'width', 'label': '"""E=5"""', 'fc': '"""#faead3"""'}), "(x, acc_list[1], width=width, label='E=5', fc='#faead3')\n", (3067, 3123), True, 'import matplotlib.pyplot as plt\n'), ((3173, 3264), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'acc_list[2]'], {'width': 'width', 'label': '"""E=10"""', 'tick_label': 'name_list', 'fc': '"""#965454"""'}), "(x, acc_list[2], width=width, label='E=10', tick_label=name_list, fc\n ='#965454')\n", (3180, 3264), True, 'import matplotlib.pyplot as plt\n'), ((3263, 3330), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(3)', 'fontsize': '(35)', 'bbox_to_anchor': '(0.13, -0.3)', 'ncol': '(3)'}), '(loc=3, fontsize=35, bbox_to_anchor=(0.13, -0.3), ncol=3)\n', (3273, 3330), True, 'import matplotlib.pyplot as plt\n'), ((3341, 3350), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3348, 3350), True, 'import matplotlib.pyplot as plt\n'), ((3373, 3443), 'matplotlib.pyplot.savefig', 'plt.savefig', (['args.fig_name'], {'bbox_inches': '"""tight"""', 'dpi': '(300)', 'format': '"""pdf"""'}), "(args.fig_name, bbox_inches='tight', dpi=300, format='pdf')\n", (3384, 3443), True, 'import matplotlib.pyplot as plt\n'), ((3448, 3459), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3457, 3459), True, 'import matplotlib.pyplot as plt\n'), ((2329, 2340), 'numpy.zeros', 'np.zeros', (['(9)'], {}), '(9)\n', (2337, 2340), True, 'import numpy as np\n'), ((2381, 2399), 'numpy.array', 'np.array', (['dic[key]'], {}), '(dic[key])\n', (2389, 2399), True, 'import numpy as np\n')] |
import numpy as np
from numpy.linalg import norm, lstsq
def regression(dict_list,data):
d_data = data.shape[1] #dimension of data
weights_list = [0]*d_data
for i_d,dict_cur in enumerate(dict_list):
data_cur = data[:,i_d] #select one dimension of data
print('dictionary shape:',dict_cur.shape)
weights_cur,residual = lstsq(dict_cur,data_cur,rcond=None)[:2]
print(f'cond(dictionary): {np.linalg.cond(dict_cur)}')
print(f'norm of displacement: {norm(data_cur)}')
print(f'representation error: {residual[0]}')
weights_list[i_d] = weights_cur
return weights_cur
| [
"numpy.linalg.norm",
"numpy.linalg.lstsq",
"numpy.linalg.cond"
] | [((355, 392), 'numpy.linalg.lstsq', 'lstsq', (['dict_cur', 'data_cur'], {'rcond': 'None'}), '(dict_cur, data_cur, rcond=None)\n', (360, 392), False, 'from numpy.linalg import norm, lstsq\n'), ((430, 454), 'numpy.linalg.cond', 'np.linalg.cond', (['dict_cur'], {}), '(dict_cur)\n', (444, 454), True, 'import numpy as np\n'), ((497, 511), 'numpy.linalg.norm', 'norm', (['data_cur'], {}), '(data_cur)\n', (501, 511), False, 'from numpy.linalg import norm, lstsq\n')] |
"""
Some functions that deal with the geometry of clusters.
Author:
<NAME>
Date:
7/18/2015
"""
__all__ = ['sample_coordinates', 'sample_many_coordinates',
'get_distance_matrices', 'usample', 'floyd', 'mme',
'usample_many']
import numpy as np
import scipy.spatial as spt
import sys
def _sample_one_more(X, box, r):
"""
Sample one more atom.
"""
if X.shape[0] == 0:
return box[:, 0] + (box[:, 1] - box[:, 0]) * np.random.rand(1, 3)
while True:
x = box[:, 0] + (box[:, 1] - box[:, 0]) * np.random.rand(1, 3)
d = spt.distance_matrix(X, x)
if (d > 2. * r).all():
return x
def sample_coordinates(n, box=[(0, 1), (0, 1), (0, 1)], r=0.1):
"""
Sample the coordinates of a cluster.
:param n: The number of atoms in the molecule.
:param box: A box in which the molecule is confined.
:param r: The radius of each atom.
"""
box = np.array(box)
X = np.ndarray((n, 3))
for i in xrange(n):
X[i, :] = _sample_one_more(X[:i, :], box, r).flatten()
return X
def sample_many_coordinates(s, n, box=None, r=.3):
if box is None:
#box = np.array([(0, 2 * n * 2 * r) * 3])
box = np.array([(0, 2) * 3])
X = np.ndarray((s, n, 3))
for i in xrange(s):
X[i, :, :] = sample_coordinates(n, box, r)
return X
def get_distance_matrices(X):
"""
Write
"""
return np.array([spt.distance.pdist(x) for x in X])
def usample(L, U):
"""
Samples a distance matrix from the configuration space C(L, U).
"""
n = L.shape[0]
D = np.zeros(L.shape)
while True:
L_in = L.copy()
U_in = U.copy()
for i in xrange(n - 1):
for j in xrange(i + 1, n):
D[i, j] = L_in[i, j] + np.random.rand() * (U_in[i, j] - L_in[i, j])
D[j, i] = D[i, j]
U_in[i, j] = D[i, j]
U_in[j, i] = D[i, j]
L_in[i, j] = D[i, j]
L_in[j, i] = L_in[i, j]
floyd(L_in, U_in)
D, X = mme(D)
if (L <= D).all() and (D <= U).all():
break
return D, X
def usample_many(L, U, num_samples):
"""
Sample many distance matrices and the corresponding coordinates.
"""
n = L.shape[0]
X = np.ndarray((num_samples, n, 3))
D = np.ndarray((num_samples, n * (n - 1) / 2))
t = len(str(num_samples))
for i in xrange(num_samples):
sys.stdout.write('> sampling {0:s} of {1:s}\r'.format(str(i + 1).zfill(t),
str(num_samples)))
sys.stdout.flush()
d, x = usample(L, U)
d = spt.distance.squareform(d)
D[i, :] = d
X[i, :, :] = x
sys.stdout.write('\n')
return D, X
def floyd(L, U):
"""
Update lower and upper bounds of the distance matrix by enforcing the triangle
inequality.
"""
n = L.shape[0]
for k in xrange(n):
for i in xrange(n - 1):
for j in xrange(i + 1, n):
if U[i, j] > U[i, k] + U[k, j]:
U[i, j] = U[i, k] + U[k, j]
U[j, i] = U[i, j]
if L[i, j] < L[i, k] - U[k, j]:
L[i, j] = L[i, k] - U[k, j]
L[j, i] = L[i, j]
if L[i, j] < L[j, k] - U[k, i]:
L[i, j] = L[j, k] - U[k, i]
L[j, i] = L[i, j]
if L[i, j] > U[i, j]:
raise ValueError('Bad Bounds')
def mme(D):
"""
Projects D to the closest distance matrix.
"""
n = D.shape[0]
W = np.ndarray((n, n))
d_cm = np.ndarray((n,))
for i in xrange(n):
d_cm[i] = np.sum(D[i, :] ** 2) / n
for j in xrange(n):
for k in xrange(j + 1, n):
d_cm[i] -= D[j, k] ** 2 / n ** 2
for i in xrange(n):
for j in xrange(n):
W[i, j] = .5 * (d_cm[i] + d_cm[j] - D[i, j] ** 2)
lam, w = np.linalg.eig(W)
lam = lam[::-1][:3]
w = w[:, ::-1][:, :3]
X = np.ndarray((n, 3))
for i in xrange(min(3, n)):
X[:, i] = np.sqrt(lam[i]) * w[:, i]
return spt.distance.squareform(spt.distance.pdist(X)), X | [
"sys.stdout.write",
"numpy.sum",
"scipy.spatial.distance.squareform",
"numpy.zeros",
"numpy.linalg.eig",
"scipy.spatial.distance_matrix",
"numpy.array",
"sys.stdout.flush",
"scipy.spatial.distance.pdist",
"numpy.random.rand",
"numpy.ndarray",
"numpy.sqrt"
] | [((956, 969), 'numpy.array', 'np.array', (['box'], {}), '(box)\n', (964, 969), True, 'import numpy as np\n'), ((978, 996), 'numpy.ndarray', 'np.ndarray', (['(n, 3)'], {}), '((n, 3))\n', (988, 996), True, 'import numpy as np\n'), ((1265, 1286), 'numpy.ndarray', 'np.ndarray', (['(s, n, 3)'], {}), '((s, n, 3))\n', (1275, 1286), True, 'import numpy as np\n'), ((1621, 1638), 'numpy.zeros', 'np.zeros', (['L.shape'], {}), '(L.shape)\n', (1629, 1638), True, 'import numpy as np\n'), ((2330, 2361), 'numpy.ndarray', 'np.ndarray', (['(num_samples, n, 3)'], {}), '((num_samples, n, 3))\n', (2340, 2361), True, 'import numpy as np\n'), ((2370, 2412), 'numpy.ndarray', 'np.ndarray', (['(num_samples, n * (n - 1) / 2)'], {}), '((num_samples, n * (n - 1) / 2))\n', (2380, 2412), True, 'import numpy as np\n'), ((2784, 2806), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (2800, 2806), False, 'import sys\n'), ((3666, 3684), 'numpy.ndarray', 'np.ndarray', (['(n, n)'], {}), '((n, n))\n', (3676, 3684), True, 'import numpy as np\n'), ((3696, 3712), 'numpy.ndarray', 'np.ndarray', (['(n,)'], {}), '((n,))\n', (3706, 3712), True, 'import numpy as np\n'), ((4024, 4040), 'numpy.linalg.eig', 'np.linalg.eig', (['W'], {}), '(W)\n', (4037, 4040), True, 'import numpy as np\n'), ((4099, 4117), 'numpy.ndarray', 'np.ndarray', (['(n, 3)'], {}), '((n, 3))\n', (4109, 4117), True, 'import numpy as np\n'), ((593, 618), 'scipy.spatial.distance_matrix', 'spt.distance_matrix', (['X', 'x'], {}), '(X, x)\n', (612, 618), True, 'import scipy.spatial as spt\n'), ((1234, 1256), 'numpy.array', 'np.array', (['[(0, 2) * 3]'], {}), '([(0, 2) * 3])\n', (1242, 1256), True, 'import numpy as np\n'), ((2649, 2667), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2665, 2667), False, 'import sys\n'), ((2709, 2735), 'scipy.spatial.distance.squareform', 'spt.distance.squareform', (['d'], {}), '(d)\n', (2732, 2735), True, 'import scipy.spatial as spt\n'), ((1454, 1475), 'scipy.spatial.distance.pdist', 'spt.distance.pdist', (['x'], {}), '(x)\n', (1472, 1475), True, 'import scipy.spatial as spt\n'), ((3755, 3775), 'numpy.sum', 'np.sum', (['(D[i, :] ** 2)'], {}), '(D[i, :] ** 2)\n', (3761, 3775), True, 'import numpy as np\n'), ((4168, 4183), 'numpy.sqrt', 'np.sqrt', (['lam[i]'], {}), '(lam[i])\n', (4175, 4183), True, 'import numpy as np\n'), ((4229, 4250), 'scipy.spatial.distance.pdist', 'spt.distance.pdist', (['X'], {}), '(X)\n', (4247, 4250), True, 'import scipy.spatial as spt\n'), ((473, 493), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)'], {}), '(1, 3)\n', (487, 493), True, 'import numpy as np\n'), ((560, 580), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)'], {}), '(1, 3)\n', (574, 580), True, 'import numpy as np\n'), ((1813, 1829), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1827, 1829), True, 'import numpy as np\n')] |
import re
import os
from json import JSONEncoder
from .lc_material import LCMaterial
import bpm_backend as bpm
import dtmm
dtmm.conf.set_verbose(2)
from vtk import vtkImageData, vtkXMLImageDataReader, vtkXMLImageDataWriter
from vtk.util import numpy_support as vn
import numpy as np
import multiprocessing
import pyfftw
from warnings import simplefilter
simplefilter(action='ignore', category=FutureWarning)
class LightPropagator:
"""The LightPropagator class allows to propagate optical fields through a LC sample as
in a real microscope: a set of plane waves with different wavevectors and wavelengths
are sent on the LC sample, and the associated transmitted optical fields (which can now
longer be represented as plane waves due to diffraction) are calculated using one of the
backend.
The actual set of wavelengths for the plane waves (choosen at construction) approximate
the relevant part of the spectrum of the illumination light, whereas the set of
wavevectors (also calculated at construction) are determined from the numerical aperture
of the input condenser. The more open the condenser aperture is, the smoother the
micrograph will look, since an open condenser aperture is associated with a wide range
of angle for the wavectors of the incident plane waves. Conversely, an almost closed
condenser aperture is associated with a single plane wave incident normally on the
sample. For more details, see `[Koehler illumination setup]
<https://nemaktis.readthedocs.io/en/latest/intro/microscopy_model.html#koehler-illumination-setup>`_.
Note that with the FieldViewer class, the transmitted optical fields calculated with
this class can be projected on a visualisation screen through an objective of given
numerical aperture. The numerical apertures of both the objective and condenser aperture
can be set interactively in the FieldViewer class, whereas in this class we only
specify the maximum value allowed for both quantities.
The simulation and choice of backend is done by calling the method ``propagate_field``.
For each wavelength and wavevector of the incident plane wave, two simulations are done:
one with a light source polarised along x, and one with a light source polarised along
y. This allows us to fully caracterize the transmission of the LC sample and reconstruct
any kind of optical micrograph.
Parameters
----------
material : :class:`~nemaktis.lc_material.LCMaterial` object
wavelengths : array-like object
An array containing all the wavelengths of the spectrum for the light source.
max_NA_objective : float
Sets the maximal numerical aperture for the microscope objective (you can
dynamically adjust this quantity later on with a FieldViewer).
max_NA_condenser : float
Sets the maximal numerical aperture for the microscope condenser (you can
dynamically adjust this quantity later on with a FieldViewer).
N_radial_wavevectors : int
Sets the number of wavevectors in the radial direction for the illumination plane
waves. The total number of plane waves for each wavelength is 1+3*Nr*(Nr-1), where
Nr correspond to the value of this parameter.
"""
def __init__(self, *, material, wavelengths, max_NA_objective,
max_NA_condenser = 0, N_radial_wavevectors = 1):
if not isinstance(material, LCMaterial):
raise TypeError("material should be a LCMaterial object")
self._material = material
self._wavelengths = list(wavelengths)
self._max_NA_objective = max_NA_objective
self._max_NA_condenser = max_NA_condenser
self._N_radial_wavevectors = N_radial_wavevectors
self._wavevectors = np.zeros(
(1+3*N_radial_wavevectors*(N_radial_wavevectors-1),2))
for ir in range(1,N_radial_wavevectors):
beta = ir*self._max_NA_condenser/(N_radial_wavevectors-1)
for iphi in range(0,6*ir):
phi = iphi*np.pi/(3*ir)
self._wavevectors[1+3*ir*(ir-1)+iphi,0] = beta*np.cos(phi)
self._wavevectors[1+3*ir*(ir-1)+iphi,1] = beta*np.sin(phi)
@property
def material(self):
"""Returns the current LC material"""
return self._material
def propagate_fields(self, *, method, bulk_filename=None):
"""Propagate optical fields through the LC sample using the specified backend.
Parameters
----------
method : "bpm" | "dtmm(D)"
If equal to "bpm", the beam propagation backend will be used (see
`[The beam-propagation backend (bpm-solver)]
<https://nemaktis.readthedocs.io/en/latest/intro/microscopy_model.html#the-beam-propagation-backend-bpm-solver>`_
for details). Should be used if accuracy is privileged over speed.
If equal to "dtmm(D)" (with D a positive integer), the diffractive transfer
matrix backend will be used with the "diffraction" parameter set to D (see
`[The diffraction transfer matrix backend (dtmm)]
<https://nemaktis.readthedocs.io/en/latest/intro/microscopy_model.html#the-diffraction-transfer-matrix-backend-dtmm>`_
for details). Should be used with small values of D if speed is privileged over
accuracy (D=0 correspond to the Jones method).
bulk_filename : None or string
If none, the backend will not export the bulk value of the optical fields in the
LC layer. Else, the bulk fields values will be exported to a vti file whose
basename is set by this parameter.
"""
if method=="bpm":
return self._bpm_propagation(bulk_filename)
elif method=="dtmm":
return self._dtmm_propagation(bulk_filename)
else:
match = re.compile("dtmm\((\d+)\)").match(method)
if match:
return self._dtmm_propagation(
bulk_filename, diffraction=int(match.group(1)))
else:
raise Exception("Unrecognised method, should be 'bpm' or 'dtmm'")
def _bpm_propagation(self, bulk_filename):
print("{ Running beam propagation backend }\n")
lc_field = self._material.lc_field
dims = lc_field.get_mesh_dimensions()
spacings = lc_field.get_mesh_spacings()
wavevectors = self._wavevectors.flatten().tolist()
json_str = JSONEncoder().encode({
"Algorithm settings": {
"General": {
"LC field type": "Director" if lc_field._Nv==3 else "Q-tensor",
"Results folder name": "" },
"Beam propagation": {
"N Woodbury steps": 2,
"Number of substeps per slab": 1 }},
"Physics settings": {
"Initial conditions": {
"Beam profile": "UniformBeam",
"LC field file": "",
"Mesh dimensions": dims,
"Mesh spacings": spacings,
"Basis convention": "XYZ" },
"Coefficients": {
"no": str(self._material.no),
"ne": str(self._material.ne),
"nhost": str(self._material.nhost),
"nin": str(self._material.nin),
"Wavelengths": self._wavelengths,
"Wavevectors": wavevectors}},
"Postprocessor settings": {
"Bulk output": {
"Activate": bulk_filename is not None,
"Base name": bulk_filename if bulk_filename is not None else ""},
"Screen output": {
"Activate": True,
"Base name": "",
"Isotropic layer thicknesses": self._material.iso_layer_thicknesses,
"Isotropic layer refractive indices": self._material.iso_layer_indices,
"Focalisation z-shift": 0,
"Numerical aperture": self._max_NA_objective }}})
lc_vals = lc_field.vals.ravel()
if lc_field.mask_type is not None:
mask_vals = lc_field.mask_vals.ravel()
mask_formula = lc_field.mask_formula if lc_field.mask_formula is not None else ""
mask_formula = mask_formula.replace("np.","").replace("**","^")
N_E_vals = \
self._wavevectors.shape[0]*len(self._wavelengths)*4*dims[0]*dims[1]
if lc_field.mask_type is None:
data_out = bpm.run_backend_without_mask(
json_str, lc_vals, N_E_vals)
else:
data_out = bpm.run_backend_with_mask(
json_str, mask_formula, lc_vals, mask_vals, N_E_vals)
print("")
output_fields = OpticalFields(
wavelengths = self._wavelengths,
max_NA_objective = self._max_NA_objective,
max_NA_condenser = self._max_NA_condenser,
N_radial_wavevectors = self._N_radial_wavevectors,
mesh_lengths = (spacings[0]*(dims[0]-1), spacings[1]*(dims[1]-1)),
mesh_dimensions = (dims[0], dims[1]))
Nl = len(self._wavelengths)
Nq = len(self._wavevectors)
output_fields.vals = data_out.reshape((Nl, Nq, 4, dims[1], dims[0]))/np.sqrt(2)
return output_fields
def _dtmm_propagation(self, bulk_filename, diffraction=1):
print("{ Running diffraction transfer matrix backend }\n")
lc_field = self._material.lc_field
dims = lc_field.get_mesh_dimensions()
spacings = lc_field.get_mesh_spacings()
if np.abs(spacings[0]-spacings[1])>1e-6:
# 2D simulation with an artificial spacings along the normal
if spacings[0]==0:
spacings[0] = spacings[1]
elif spacings[1]==0:
spacings[1] = spacings[0]
else:
raise Exception("dtmm supports only uniform spacings in the XY plane.")
if isinstance(self._material.ne, str):
if "lambda" in self._material.ne:
print("Warning: dtmm does not support dispersive index; " +
"Using ne(0.6µm) instead")
l = 0.6
ne = eval(self._material.ne.replace("lambda","l").replace("^","**"))
else:
ne = self._material.ne
if isinstance(self._material.no, str):
if "lambda" in self._material.no:
print("Warning: dtmm does not support dispersive index; " +
"Using no(0.6µm) instead")
l = 0.6
no = eval(self._material.no.replace("lambda","l").replace("^","**"))
else:
no = self._material.no
if isinstance(self._material.nhost, str):
if "lambda" in self._material.nhost:
print("Warning: dtmm does not support dispersive index; " +
"Using nhost(0.6µm) instead")
l = 0.6
nhost = eval(self._material.nhost.replace("lambda","l").replace("^","**"))
else:
nhost = self._material.nhost
if isinstance(self._material.nhost, str):
if "lambda" in self._material.nin:
print("Warning: dtmm does not support dispersive index; " +
"Using nin(0.6µm) instead")
l = 0.6
nin = eval(self._material.nin.replace("lambda","l").replace("^","**"))
else:
nin = self._material.nin
if len(self._material.iso_layer_indices)!=0:
print("Warning: specified isotropic layers will be ignored since this feature is "+
"not yet supported in dtmm.")
print("")
if lc_field.mask_vals is not None:
mask_vals = lc_field.mask_vals>=0
else:
mask_vals = None
if lc_field._Nv==3:
optical_data = dtmm.director2data(
lc_field.vals, mask = mask_vals,
no = no, ne = ne, nhost = nhost,
thickness = spacings[2]/spacings[0]*np.ones(dims[2]))
elif lc_field._Nv==6:
ea_eff = 2*(ne**2-no**2)/3
e_iso = no**2+(ne**2-no**2)/3
if mask_vals is not None:
lc_vals = lc_field.vals.reshape((dims[2]*dims[1]*dims[0],6))
eps_vals = np.zeros((dims[2]*dims[1]*dims[0],6))
eps_vals[mask_vals.flatten(),0:3] = e_iso+ea_eff*lc_vals[mask_vals.flatten(),0:3]
eps_vals[mask_vals.flatten(),3:6] = ea_eff*lc_vals[mask_vals.flatten(),3:6]
eps_vals[~mask_vals.flatten(),0:3] = nhost**2
eps_vals = eps_vals.reshape((dims[2],dims[1],dims[0],6))
else:
eps_vals = np.zeros((dims[2],dims[1],dims[0],6))
eps_vals[:,:,:,0:3] = e_iso+ea_eff*lc_field.vals[:,:,:,0:3]
eps_vals[:,:,:,3:6] = ea_eff*lc_field.vals[:,:,:,3:6]
epsv,epsa = dtmm.data.eps2epsva(eps_vals)
optical_data = (spacings[2]/spacings[0]*np.ones(dims[2]),epsv,epsa)
wavelengths = 1000*np.array(self._wavelengths)
beta = np.zeros((len(self._wavevectors),))
phi = np.zeros((len(self._wavevectors),))
intensity = np.ones((len(self._wavevectors),))
for ir in range(1,self._N_radial_wavevectors):
for iphi in range(0,6*ir):
beta[1+3*ir*(ir-1)+iphi] = ir*self._max_NA_condenser/(self._N_radial_wavevectors-1)
phi[1+3*ir*(ir-1)+iphi] = iphi*np.pi/3
field_data_in = dtmm.illumination_data(
(dims[1],dims[0]), wavelengths, pixelsize=1000*spacings[0], n=nin,
beta=beta, phi=phi, intensity=intensity)
field_data_out = dtmm.transfer_field(
field_data_in, optical_data, nin=nin,
betamax=self._max_NA_objective, diffraction=diffraction,
ret_bulk=bulk_filename is not None)[0]
print("")
if bulk_filename is not None:
print("{ Saving optical fields to "+bulk_filename+".vti }")
lengths = lc_field.get_mesh_lengths()
vti_data = vtkImageData()
vti_data.SetDimensions(dims[0], dims[1], dims[2]+1)
vti_data.SetOrigin(-lengths[0]/2, -lengths[1]/2, -lengths[2]/2)
vti_data.SetSpacing(spacings[0], spacings[1], spacings[2]*(dims[2]-1)/dims[2])
wavelengths_data = vn.numpy_to_vtk(self._wavelengths)
wavelengths_data.SetName("lambda")
vti_data.GetFieldData().AddArray(wavelengths_data)
qx_data = vn.numpy_to_vtk(self._wavevectors[:,0])
qx_data.SetName("qx")
vti_data.GetFieldData().AddArray(qx_data)
qy_data = vn.numpy_to_vtk(self._wavevectors[:,1])
qy_data.SetName("qy")
vti_data.GetFieldData().AddArray(qy_data)
Np = dims[0]*dims[1]*(dims[2]+1)
for wave_idx in range(0,len(self._wavelengths)):
for q_idx in range(0,len(self._wavevectors)):
E_inputX = field_data_out[:-1,q_idx,0,wave_idx,[0,2],:,:].transpose(
(1,0,2,3)).reshape((2,Np)).transpose()
E_inputY = field_data_out[:-1,q_idx,1,wave_idx,[0,2],:,:].transpose(
(1,0,2,3)).reshape((2,Np)).transpose()
E_real_inputX = vn.numpy_to_vtk(np.real(E_inputX))
E_real_inputX.SetName("E_real_inputX_%d_%d" % (wave_idx,q_idx))
vti_data.GetPointData().AddArray(E_real_inputX)
E_imag_inputX = vn.numpy_to_vtk(np.imag(E_inputX))
E_imag_inputX.SetName("E_imag_inputX_%d_%d" % (wave_idx,q_idx))
vti_data.GetPointData().AddArray(E_imag_inputX)
E_real_inputY = vn.numpy_to_vtk(np.real(E_inputY))
E_real_inputY.SetName("E_real_inputY_%d_%d" % (wave_idx,q_idx))
vti_data.GetPointData().AddArray(E_real_inputY)
E_imag_inputY = vn.numpy_to_vtk(np.imag(E_inputY))
E_imag_inputY.SetName("E_imag_inputY_%d_%d" % (wave_idx,q_idx))
vti_data.GetPointData().AddArray(E_imag_inputY)
writer = vtkXMLImageDataWriter()
writer.SetFileName(bulk_filename+".vti")
writer.SetInputData(vti_data)
writer.Write()
# We only keep the last slice to compute micrographs
field_data_out = field_data_out[-1,:,:,:,:,:,:]
output_fields = OpticalFields(
wavelengths = self._wavelengths,
max_NA_objective = self._max_NA_objective,
max_NA_condenser = self._max_NA_condenser,
N_radial_wavevectors = self._N_radial_wavevectors,
mesh_lengths = (spacings[0]*(dims[0]-1), spacings[1]*(dims[1]-1)),
mesh_dimensions = (dims[0], dims[1]))
Nl = len(self._wavelengths)
Nq = len(self._wavevectors)
output_fields.vals = field_data_out[:,:,:,[0,2],:,:].transpose(
(2,0,1,3,4,5)).reshape((Nl,Nq,4,dims[1],dims[0]))
return output_fields
class OpticalFields:
"""The OpticalFields object stores the mesh information of the transverse mesh (plane mesh
orthogonal to the z-direction, default altitude of 0) and the optical fields values on
this mesh. Since this python package is mainly used to reconstruct micrographs, we only
store internally the complex horizontal electric field for two simulation: one with a
light source polarised along ``x``, and the other with a light source polarised along
``y``. In case multiple wavelengths/wavectors were used in the simulation, we store these
quantities separately for each wavelength/wavevector.
This class is initialised either manually or with a path to a vti file containing
previously calculated optical fields and mesh details.
In the first version of this constructor:
.. code-block:: python
optical_fields = OpticalFields(
wavelengths=[l0,l1,...,lN], max_NA_objective=NA_o,
max_NA_condenser=NA_c, N_radial_wavevectors=Nr,
mesh_lengths=(Lx,Ly), mesh_dimensions=(Nx,Ny))
the actual values of the transverse fields needs to be provided later using the raw
setter method fields_vals (shape (N_wavelengths,N_wavevectors,4,Ny,Nx), with
N_wavevectors=3*Nr*(Nr-1)+1).
In the second version of this constructor:
.. code-block:: python
optical_fields = OpticalFields(vti_file="path to vti file")
the values of the wavelengths and transverse fields are automatically assigned from the
vti file.
"""
def __init__(self, **kwargs):
if len(kwargs)==1:
if not os.path.isfile(kwargs["vti_file"]):
raise Exception("VTI file does not exists")
print("{ Initializing optical fields from "+kwargs["vti_file"]+" }")
reader = vtkXMLImageDataReader()
reader.SetFileName(kwargs["vti_file"])
reader.Update()
point_data = reader.GetOutput().GetPointData()
field_data = reader.GetOutput().GetFieldData()
dims = np.array(reader.GetOutput().GetDimensions())
spacings = np.array(reader.GetOutput().GetSpacing())
if dims[2]!=1:
raise Exception("The specified vti file should include 2D data")
if not field_data.HasArray("lambda"):
raise Exception(
"VTI file is missing the field array \"lambda\" for the wavelengths")
self._wavelengths = vn.vtk_to_numpy(field_data.GetArray("lambda"))
if not field_data.HasArray("qx"):
raise Exception(
"VTI file is missing the field array \"qx\" for the wavevectors")
if not field_data.HasArray("qy"):
raise Exception(
"VTI file is missing the field array \"qy\" for the wavevectors")
self._wavevectors = np.stack(
(vn.vtk_to_numpy(field_data.GetArray("qx")),
vn.vtk_to_numpy(field_data.GetArray("qy"))), axis=-1)
Nq = len(self._wavevectors)
Nr = int(np.round((1+np.sqrt((4*Nq-1)/3))/2))
if Nq!=1+3*Nr*(Nr-1):
raise Exception(
"VTI file contain the wrong number of wavevectors")
else:
self._N_radial_wavevectors = Nr
self._max_NA_condenser = np.sqrt(np.sum(self._wavevectors**2,axis=1))[-1]
for qr_idx in range(0,Nr):
q_idx_start = 1+3*qr_idx*(qr_idx-1) if qr_idx>0 else 0
q_idx_end = 1+3*qr_idx*(qr_idx+1)
q_norms = np.sqrt(np.sum(self._wavevectors[q_idx_start:q_idx_end,:]**2,axis=1))
if np.max(np.abs(q_norms-qr_idx*self._max_NA_condenser/(Nr-1)))>1e-8:
raise Exception(
"Incompatible wavevector mesh inside the VTI file")
if not field_data.HasArray("max_NA_objective"):
raise Exception(
"VTI file is missing the field scalar \"max_NA_objective\"")
self._max_NA_objective = \
vn.vtk_to_numpy(field_data.GetArray("max_NA_objective"))[0]
Nl = len(self._wavelengths)
self._vals = pyfftw.empty_aligned(
(Nl,Nq,4,dims[1],dims[0]), dtype="complex128")
self._fft_vals = pyfftw.empty_aligned(
(Nl,Nq,4,dims[1],dims[0]), dtype="complex128")
self._focused_vals = pyfftw.empty_aligned(
(Nl,Nq,4,dims[1],dims[0]), dtype="complex128")
self._fft_plan = pyfftw.FFTW(
self._vals, self._fft_vals, axes=(3,4),
threads=multiprocessing.cpu_count())
self._ifft_plan = pyfftw.FFTW(
self._fft_vals, self._focused_vals, axes=(3,4),
threads=multiprocessing.cpu_count(), direction="FFTW_BACKWARD")
for wave_idx in range(0,Nl):
for q_idx in range(0,Nq):
suffixes = ["real_inputX", "imag_inputX", "real_inputY", "imag_inputY"]
for suffix in suffixes:
array_name = "E_"+suffix+"_%d_%d" % (wave_idx,q_idx)
if not point_data.HasArray(array_name):
raise Exception(
"Missing array \""+array_name+"\" in VTI file")
E_inputX = \
vn.vtk_to_numpy(point_data.GetArray(
"E_real_inputX_%d_%d" % (wave_idx,q_idx))) + \
1j*vn.vtk_to_numpy(point_data.GetArray(
"E_imag_inputX_%d_%d" % (wave_idx,q_idx)))
E_inputY = \
vn.vtk_to_numpy(point_data.GetArray(
"E_real_inputY_%d_%d" % (wave_idx,q_idx))) + \
1j*vn.vtk_to_numpy(point_data.GetArray(
"E_imag_inputY_%d_%d" % (wave_idx,q_idx)))
self._vals[wave_idx,q_idx,[0,1],:,:] = \
E_inputX[:,[0,1]].transpose().reshape(2,dims[1],dims[0])
self._vals[wave_idx,q_idx,[2,3],:,:] = \
E_inputY[:,[0,1]].transpose().reshape(2,dims[1],dims[0])
(self._Nx, self._Ny) = (dims[0], dims[1])
(self._dx, self._dy) = (spacings[0], spacings[1])
(self._Lx, self._Ly) = (spacings[0]*(dims[0]-1), spacings[1]*(dims[1]-1))
else:
if len(kwargs["mesh_dimensions"])!=2:
raise Exception("mesh_dimensions should be an array-like object of size 2")
if len(kwargs["mesh_lengths"])!=2:
raise Exception("mesh_lengths should be an array-like object of size 2")
self._wavelengths = np.array(kwargs["wavelengths"])
self._max_NA_objective = kwargs["max_NA_objective"]
self._max_NA_condenser = kwargs["max_NA_condenser"]
self._N_radial_wavevectors = kwargs["N_radial_wavevectors"]
Nr = self._N_radial_wavevectors
self._wavevectors = np.zeros((1+3*Nr*(Nr-1),2))
for ir in range(1,Nr):
beta = ir*self._max_NA_condenser/(Nr-1)
for iphi in range(0,6*ir):
phi = iphi*np.pi/(3*ir)
self._wavevectors[1+3*ir*(ir-1)+iphi,0] = beta*np.cos(phi)
self._wavevectors[1+3*ir*(ir-1)+iphi,1] = beta*np.sin(phi)
dims = np.array(kwargs["mesh_dimensions"])
lengths = np.array(kwargs["mesh_lengths"])
(self._Nx, self._Ny) = tuple(int(dim) for dim in dims)
(self._Lx, self._Ly) = tuple(lengths)
(self._dx, self._dy) = tuple(lengths/np.maximum(np.ones(2),dims-1))
Nl = len(self._wavelengths)
Nq = len(self._wavevectors)
self._vals = pyfftw.empty_aligned(
(Nl,Nq,4,dims[1],dims[0]), dtype="complex128")
self._fft_vals = pyfftw.empty_aligned(
(Nl,Nq,4,dims[1],dims[0]), dtype="complex128")
self._focused_vals = pyfftw.empty_aligned(
(Nl,Nq,4,dims[1],dims[0]), dtype="complex128")
self._fft_plan = pyfftw.FFTW(
self._vals, self._fft_vals, axes=(3,4),
threads=multiprocessing.cpu_count())
self._ifft_plan = pyfftw.FFTW(
self._fft_vals, self._focused_vals, axes=(3,4),
threads=multiprocessing.cpu_count(), direction="FFTW_BACKWARD")
if self._N_radial_wavevectors>1 and self._max_NA_condenser>0:
self._delta_qr = self._max_NA_condenser/(self._N_radial_wavevectors-1)
else:
self._delta_qr = 1
IY, IX = np.meshgrid(range(0,self._Ny), range(0,self._Nx), indexing="ij")
kx = -np.abs(2*np.pi/self._Lx*(IX-0.5*self._Nx)) + np.pi*self._Nx/self._Lx
ky = -np.abs(2*np.pi/self._Ly*(IY-0.5*self._Ny)) + np.pi*self._Ny/self._Ly
k0 = np.tile(2*np.pi/self._wavelengths, (self._Nx,self._Ny,1,Nq,1)).transpose()
px = k0*self._wavevectors[:,0][np.newaxis,:,np.newaxis,np.newaxis,np.newaxis]
py = k0*self._wavevectors[:,1][np.newaxis,:,np.newaxis,np.newaxis,np.newaxis]
kSqr = (kx[np.newaxis,np.newaxis,:,:]+px)**2+(ky[np.newaxis,np.newaxis,:,:]+py)**2
mask = kSqr.flatten()<k0.flatten()**2
filt = 1j*np.zeros(Nl*Nq*self._Nx*self._Ny)
filt[mask] = np.exp(1j*np.sqrt(k0.flatten()[mask]**2-kSqr.flatten()[mask]))
self._objective_filter = np.reshape(filt, (Nl,Nq,1,self._Ny,self._Nx))
self._objective_mask = (kSqr<(k0*self._max_NA_objective)**2).astype(float)
self._kSqr = kSqr
self._k0 = k0
self._z = 0
def copy(self):
"""Returns a hard copy of this OpticalFields object"""
new_fields = OpticalFields(
wavelengths = self._wavelengths,
max_NA_condenser = self._max_NA_condenser,
N_radial_wavevectors = self._N_radial_wavevectors,
mesh_dimensions = (self._Nx, self._Ny),
mesh_lengths = (self._Lx, self._Ly))
new_fields.vals = self.vals # need to use the setter method for byte-aligned hard copy
return new_fields
@property
def focused_vals(self):
"""Numpy array for the optical fields values after focalisation by the microscope
objective, of shape (N_wavelengths,N_wavevectors,4,Ny,Nx).
"""
return self._focused_vals
@property
def vals(self):
"""Numpy array for the optical fields values, of shape
(N_wavelengths,N_wavevectors,4,Ny,Nx).
If you want to initialize by hand the optical fields, the four components in the
third dimension correspond to:
* complex Ex field for an input polarisation//x
* complex Ey field for an input polarisation//x
* complex Ex field for an input polarisation//y
* complex Ey field for an input polarisation//y
"""
return self._vals
@vals.setter
def vals(self, fields_ndarray):
if self._vals.shape==fields_ndarray.shape:
self._vals[:] = fields_ndarray
else:
raise Exception("Wrong shape for the optical field ndarray")
@property
def z_focus(self):
return self._z
def update_NA_objective(self, new_NA):
NA = max(0,min(self._max_NA_objective,new_NA))
self._objective_mask = (self._kSqr<(self._k0*NA)**2).astype(float)
def focus_fields(self, z_focus=None):
"""Propagate the optical fields through the objective lens to the screen conjugate
to the focusing plane (whose altitude inside the sample is set with the parameter
z_focus)."""
if z_focus is not None:
filt = self._objective_mask*self._objective_filter**np.abs(z_focus)
self._z = z_focus
else:
z_focus = self._z
filt = self._objective_mask*self._objective_filter**np.abs(z_focus)
self._fft_plan()
self._fft_vals *= np.conj(filt) if z_focus<0 else filt
self._ifft_plan()
def save_to_vti(self, filename):
"""Save the optical fields into a vti file.
The ".vti" extension is automatically appended, no need to include it in the filename
parameter (but in case you do only one extension will be added)
"""
if filename[-4:]==".vti":
path = filename
else:
path = filename+".vti"
print("{ Saving optical fields to "+path+" }")
vti_data = vtkImageData()
vti_data.SetDimensions(self._Nx, self._Ny, 1)
vti_data.SetOrigin(-self._Lx/2, -self._Ly/2, 0)
vti_data.SetSpacing(self._dx, self._dy, 0)
Np = self.get_n_vertices()
for wave_idx in range(0,len(self._wavelengths)):
for q_idx in range(0,len(self._wavevectors)):
E_inputX = self._vals[wave_idx,q_idx,[0,1],:,:].reshape((2,Np)).transpose()
E_inputY = self._vals[wave_idx,q_idx,[2,3],:,:].reshape((2,Np)).transpose()
E_real_inputX = vn.numpy_to_vtk(np.real(E_inputX))
E_real_inputX.SetName("E_real_inputX_%d_%d" % (wave_idx,q_idx))
vti_data.GetPointData().AddArray(E_real_inputX)
E_imag_inputX = vn.numpy_to_vtk(np.imag(E_inputX))
E_imag_inputX.SetName("E_imag_inputX_%d_%d" % (wave_idx,q_idx))
vti_data.GetPointData().AddArray(E_imag_inputX)
E_real_inputY = vn.numpy_to_vtk(np.real(E_inputY))
E_real_inputY.SetName("E_real_inputY_%d_%d" % (wave_idx,q_idx))
vti_data.GetPointData().AddArray(E_real_inputY)
E_imag_inputY = vn.numpy_to_vtk(np.imag(E_inputY))
E_imag_inputY.SetName("E_imag_inputY_%d_%d" % (wave_idx,q_idx))
vti_data.GetPointData().AddArray(E_imag_inputY)
wavelengths_data = vn.numpy_to_vtk(self._wavelengths)
wavelengths_data.SetName("lambda")
vti_data.GetFieldData().AddArray(wavelengths_data)
qx_data = vn.numpy_to_vtk(self._wavevectors[:,0])
qx_data.SetName("qx")
vti_data.GetFieldData().AddArray(qx_data)
qy_data = vn.numpy_to_vtk(self._wavevectors[:,1])
qy_data.SetName("qy")
vti_data.GetFieldData().AddArray(qy_data)
NA_data = vn.numpy_to_vtk(np.array([self._max_NA_objective]))
NA_data.SetName("max_NA_objective")
vti_data.GetFieldData().AddArray(NA_data)
writer = vtkXMLImageDataWriter()
writer.SetFileName(path)
writer.SetInputData(vti_data)
writer.Write()
def get_pos(self, ix, iy):
"""Returns the position associated with the mesh indices (ix,iy)
It is assumed that the mesh is centered on the origin (0,0).
"""
return (ix*self._dx-self._Lx/2, iy*self._dy-self._Ly/2)
def get_wavelengths(self):
"""Returns the wavelength array"""
return self._wavelengths
def get_wavevectors(self):
"""Returns the wavevectors array"""
return self._wavevectors
def get_qr_index(self, NA_condenser):
"""For internal use.
Allows to build sub-range of wavevector index for a given numerical aperture of the
condenser, which must be smaller than the internal maximal numerical aperture set at
construction.
"""
return int(np.ceil(NA_condenser/self._delta_qr))
def get_delta_qr(self):
"""For internal use.
Allows to build integration rule with respect to the wavectors.
"""
return self._delta_qr
def get_mesh_dimensions(self):
"""Returns the dimensions (Nx,Ny) of the transverse mesh"""
return (self._Nx, self._Ny)
def get_mesh_lengths(self):
"""Returns the lengths (Lx,Ly) of the transverse mesh"""
return (self._Lx, self._Ly)
def get_mesh_spacings(self):
"""Returns the spacings (dx,dy,dz) of the transverse mesh"""
return (self._dx, self._dy)
def get_n_vertices(self):
"""Returns the number of vertices in the transverse mesh"""
return self._Nx*self._Ny
| [
"numpy.abs",
"dtmm.transfer_field",
"vtk.util.numpy_support.numpy_to_vtk",
"numpy.sum",
"numpy.ones",
"pyfftw.empty_aligned",
"os.path.isfile",
"numpy.sin",
"numpy.imag",
"numpy.tile",
"vtk.vtkImageData",
"multiprocessing.cpu_count",
"warnings.simplefilter",
"bpm_backend.run_backend_with_m... | [((125, 149), 'dtmm.conf.set_verbose', 'dtmm.conf.set_verbose', (['(2)'], {}), '(2)\n', (146, 149), False, 'import dtmm\n'), ((361, 414), 'warnings.simplefilter', 'simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (373, 414), False, 'from warnings import simplefilter\n'), ((3806, 3878), 'numpy.zeros', 'np.zeros', (['(1 + 3 * N_radial_wavevectors * (N_radial_wavevectors - 1), 2)'], {}), '((1 + 3 * N_radial_wavevectors * (N_radial_wavevectors - 1), 2))\n', (3814, 3878), True, 'import numpy as np\n'), ((13850, 13987), 'dtmm.illumination_data', 'dtmm.illumination_data', (['(dims[1], dims[0])', 'wavelengths'], {'pixelsize': '(1000 * spacings[0])', 'n': 'nin', 'beta': 'beta', 'phi': 'phi', 'intensity': 'intensity'}), '((dims[1], dims[0]), wavelengths, pixelsize=1000 *\n spacings[0], n=nin, beta=beta, phi=phi, intensity=intensity)\n', (13872, 13987), False, 'import dtmm\n'), ((27014, 27063), 'numpy.reshape', 'np.reshape', (['filt', '(Nl, Nq, 1, self._Ny, self._Nx)'], {}), '(filt, (Nl, Nq, 1, self._Ny, self._Nx))\n', (27024, 27063), True, 'import numpy as np\n'), ((30068, 30082), 'vtk.vtkImageData', 'vtkImageData', ([], {}), '()\n', (30080, 30082), False, 'from vtk import vtkImageData, vtkXMLImageDataReader, vtkXMLImageDataWriter\n'), ((31453, 31487), 'vtk.util.numpy_support.numpy_to_vtk', 'vn.numpy_to_vtk', (['self._wavelengths'], {}), '(self._wavelengths)\n', (31468, 31487), True, 'from vtk.util import numpy_support as vn\n'), ((31609, 31649), 'vtk.util.numpy_support.numpy_to_vtk', 'vn.numpy_to_vtk', (['self._wavevectors[:, 0]'], {}), '(self._wavevectors[:, 0])\n', (31624, 31649), True, 'from vtk.util import numpy_support as vn\n'), ((31748, 31788), 'vtk.util.numpy_support.numpy_to_vtk', 'vn.numpy_to_vtk', (['self._wavevectors[:, 1]'], {}), '(self._wavevectors[:, 1])\n', (31763, 31788), True, 'from vtk.util import numpy_support as vn\n'), ((32051, 32074), 'vtk.vtkXMLImageDataWriter', 'vtkXMLImageDataWriter', ([], {}), '()\n', (32072, 32074), False, 'from vtk import vtkImageData, vtkXMLImageDataReader, vtkXMLImageDataWriter\n'), ((8828, 8885), 'bpm_backend.run_backend_without_mask', 'bpm.run_backend_without_mask', (['json_str', 'lc_vals', 'N_E_vals'], {}), '(json_str, lc_vals, N_E_vals)\n', (8856, 8885), True, 'import bpm_backend as bpm\n'), ((8940, 9019), 'bpm_backend.run_backend_with_mask', 'bpm.run_backend_with_mask', (['json_str', 'mask_formula', 'lc_vals', 'mask_vals', 'N_E_vals'], {}), '(json_str, mask_formula, lc_vals, mask_vals, N_E_vals)\n', (8965, 9019), True, 'import bpm_backend as bpm\n'), ((9592, 9602), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (9599, 9602), True, 'import numpy as np\n'), ((9914, 9947), 'numpy.abs', 'np.abs', (['(spacings[0] - spacings[1])'], {}), '(spacings[0] - spacings[1])\n', (9920, 9947), True, 'import numpy as np\n'), ((13392, 13419), 'numpy.array', 'np.array', (['self._wavelengths'], {}), '(self._wavelengths)\n', (13400, 13419), True, 'import numpy as np\n'), ((14031, 14190), 'dtmm.transfer_field', 'dtmm.transfer_field', (['field_data_in', 'optical_data'], {'nin': 'nin', 'betamax': 'self._max_NA_objective', 'diffraction': 'diffraction', 'ret_bulk': '(bulk_filename is not None)'}), '(field_data_in, optical_data, nin=nin, betamax=self.\n _max_NA_objective, diffraction=diffraction, ret_bulk=bulk_filename is not\n None)\n', (14050, 14190), False, 'import dtmm\n'), ((14425, 14439), 'vtk.vtkImageData', 'vtkImageData', ([], {}), '()\n', (14437, 14439), False, 'from vtk import vtkImageData, vtkXMLImageDataReader, vtkXMLImageDataWriter\n'), ((14703, 14737), 'vtk.util.numpy_support.numpy_to_vtk', 'vn.numpy_to_vtk', (['self._wavelengths'], {}), '(self._wavelengths)\n', (14718, 14737), True, 'from vtk.util import numpy_support as vn\n'), ((14871, 14911), 'vtk.util.numpy_support.numpy_to_vtk', 'vn.numpy_to_vtk', (['self._wavevectors[:, 0]'], {}), '(self._wavevectors[:, 0])\n', (14886, 14911), True, 'from vtk.util import numpy_support as vn\n'), ((15022, 15062), 'vtk.util.numpy_support.numpy_to_vtk', 'vn.numpy_to_vtk', (['self._wavevectors[:, 1]'], {}), '(self._wavevectors[:, 1])\n', (15037, 15062), True, 'from vtk.util import numpy_support as vn\n'), ((16539, 16562), 'vtk.vtkXMLImageDataWriter', 'vtkXMLImageDataWriter', ([], {}), '()\n', (16560, 16562), False, 'from vtk import vtkImageData, vtkXMLImageDataReader, vtkXMLImageDataWriter\n'), ((19270, 19293), 'vtk.vtkXMLImageDataReader', 'vtkXMLImageDataReader', ([], {}), '()\n', (19291, 19293), False, 'from vtk import vtkImageData, vtkXMLImageDataReader, vtkXMLImageDataWriter\n'), ((21687, 21758), 'pyfftw.empty_aligned', 'pyfftw.empty_aligned', (['(Nl, Nq, 4, dims[1], dims[0])'], {'dtype': '"""complex128"""'}), "((Nl, Nq, 4, dims[1], dims[0]), dtype='complex128')\n", (21707, 21758), False, 'import pyfftw\n'), ((21801, 21872), 'pyfftw.empty_aligned', 'pyfftw.empty_aligned', (['(Nl, Nq, 4, dims[1], dims[0])'], {'dtype': '"""complex128"""'}), "((Nl, Nq, 4, dims[1], dims[0]), dtype='complex128')\n", (21821, 21872), False, 'import pyfftw\n'), ((21919, 21990), 'pyfftw.empty_aligned', 'pyfftw.empty_aligned', (['(Nl, Nq, 4, dims[1], dims[0])'], {'dtype': '"""complex128"""'}), "((Nl, Nq, 4, dims[1], dims[0]), dtype='complex128')\n", (21939, 21990), False, 'import pyfftw\n'), ((24253, 24284), 'numpy.array', 'np.array', (["kwargs['wavelengths']"], {}), "(kwargs['wavelengths'])\n", (24261, 24284), True, 'import numpy as np\n'), ((24562, 24598), 'numpy.zeros', 'np.zeros', (['(1 + 3 * Nr * (Nr - 1), 2)'], {}), '((1 + 3 * Nr * (Nr - 1), 2))\n', (24570, 24598), True, 'import numpy as np\n'), ((24946, 24981), 'numpy.array', 'np.array', (["kwargs['mesh_dimensions']"], {}), "(kwargs['mesh_dimensions'])\n", (24954, 24981), True, 'import numpy as np\n'), ((25004, 25036), 'numpy.array', 'np.array', (["kwargs['mesh_lengths']"], {}), "(kwargs['mesh_lengths'])\n", (25012, 25036), True, 'import numpy as np\n'), ((25341, 25412), 'pyfftw.empty_aligned', 'pyfftw.empty_aligned', (['(Nl, Nq, 4, dims[1], dims[0])'], {'dtype': '"""complex128"""'}), "((Nl, Nq, 4, dims[1], dims[0]), dtype='complex128')\n", (25361, 25412), False, 'import pyfftw\n'), ((25455, 25526), 'pyfftw.empty_aligned', 'pyfftw.empty_aligned', (['(Nl, Nq, 4, dims[1], dims[0])'], {'dtype': '"""complex128"""'}), "((Nl, Nq, 4, dims[1], dims[0]), dtype='complex128')\n", (25475, 25526), False, 'import pyfftw\n'), ((25573, 25644), 'pyfftw.empty_aligned', 'pyfftw.empty_aligned', (['(Nl, Nq, 4, dims[1], dims[0])'], {'dtype': '"""complex128"""'}), "((Nl, Nq, 4, dims[1], dims[0]), dtype='complex128')\n", (25593, 25644), False, 'import pyfftw\n'), ((26863, 26902), 'numpy.zeros', 'np.zeros', (['(Nl * Nq * self._Nx * self._Ny)'], {}), '(Nl * Nq * self._Nx * self._Ny)\n', (26871, 26902), True, 'import numpy as np\n'), ((29549, 29562), 'numpy.conj', 'np.conj', (['filt'], {}), '(filt)\n', (29556, 29562), True, 'import numpy as np\n'), ((31903, 31937), 'numpy.array', 'np.array', (['[self._max_NA_objective]'], {}), '([self._max_NA_objective])\n', (31911, 31937), True, 'import numpy as np\n'), ((32952, 32990), 'numpy.ceil', 'np.ceil', (['(NA_condenser / self._delta_qr)'], {}), '(NA_condenser / self._delta_qr)\n', (32959, 32990), True, 'import numpy as np\n'), ((6512, 6525), 'json.JSONEncoder', 'JSONEncoder', ([], {}), '()\n', (6523, 6525), False, 'from json import JSONEncoder\n'), ((13254, 13283), 'dtmm.data.eps2epsva', 'dtmm.data.eps2epsva', (['eps_vals'], {}), '(eps_vals)\n', (13273, 13283), False, 'import dtmm\n'), ((19070, 19104), 'os.path.isfile', 'os.path.isfile', (["kwargs['vti_file']"], {}), "(kwargs['vti_file'])\n", (19084, 19104), False, 'import os\n'), ((26293, 26345), 'numpy.abs', 'np.abs', (['(2 * np.pi / self._Lx * (IX - 0.5 * self._Nx))'], {}), '(2 * np.pi / self._Lx * (IX - 0.5 * self._Nx))\n', (26299, 26345), True, 'import numpy as np\n'), ((26376, 26428), 'numpy.abs', 'np.abs', (['(2 * np.pi / self._Ly * (IY - 0.5 * self._Ny))'], {}), '(2 * np.pi / self._Ly * (IY - 0.5 * self._Ny))\n', (26382, 26428), True, 'import numpy as np\n'), ((26459, 26529), 'numpy.tile', 'np.tile', (['(2 * np.pi / self._wavelengths)', '(self._Nx, self._Ny, 1, Nq, 1)'], {}), '(2 * np.pi / self._wavelengths, (self._Nx, self._Ny, 1, Nq, 1))\n', (26466, 26529), True, 'import numpy as np\n'), ((4144, 4155), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (4150, 4155), True, 'import numpy as np\n'), ((4219, 4230), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (4225, 4230), True, 'import numpy as np\n'), ((12637, 12679), 'numpy.zeros', 'np.zeros', (['(dims[2] * dims[1] * dims[0], 6)'], {}), '((dims[2] * dims[1] * dims[0], 6))\n', (12645, 12679), True, 'import numpy as np\n'), ((13045, 13085), 'numpy.zeros', 'np.zeros', (['(dims[2], dims[1], dims[0], 6)'], {}), '((dims[2], dims[1], dims[0], 6))\n', (13053, 13085), True, 'import numpy as np\n'), ((20835, 20873), 'numpy.sum', 'np.sum', (['(self._wavevectors ** 2)'], {'axis': '(1)'}), '(self._wavevectors ** 2, axis=1)\n', (20841, 20873), True, 'import numpy as np\n'), ((21070, 21134), 'numpy.sum', 'np.sum', (['(self._wavevectors[q_idx_start:q_idx_end, :] ** 2)'], {'axis': '(1)'}), '(self._wavevectors[q_idx_start:q_idx_end, :] ** 2, axis=1)\n', (21076, 21134), True, 'import numpy as np\n'), ((22126, 22153), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (22151, 22153), False, 'import multiprocessing\n'), ((22286, 22313), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (22311, 22313), False, 'import multiprocessing\n'), ((25780, 25807), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (25805, 25807), False, 'import multiprocessing\n'), ((25940, 25967), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (25965, 25967), False, 'import multiprocessing\n'), ((29327, 29342), 'numpy.abs', 'np.abs', (['z_focus'], {}), '(z_focus)\n', (29333, 29342), True, 'import numpy as np\n'), ((29481, 29496), 'numpy.abs', 'np.abs', (['z_focus'], {}), '(z_focus)\n', (29487, 29496), True, 'import numpy as np\n'), ((30628, 30645), 'numpy.real', 'np.real', (['E_inputX'], {}), '(E_inputX)\n', (30635, 30645), True, 'import numpy as np\n'), ((30839, 30856), 'numpy.imag', 'np.imag', (['E_inputX'], {}), '(E_inputX)\n', (30846, 30856), True, 'import numpy as np\n'), ((31051, 31068), 'numpy.real', 'np.real', (['E_inputY'], {}), '(E_inputY)\n', (31058, 31068), True, 'import numpy as np\n'), ((31262, 31279), 'numpy.imag', 'np.imag', (['E_inputY'], {}), '(E_inputY)\n', (31269, 31279), True, 'import numpy as np\n'), ((5913, 5943), 're.compile', 're.compile', (['"""dtmm\\\\((\\\\d+)\\\\)"""'], {}), "('dtmm\\\\((\\\\d+)\\\\)')\n", (5923, 5943), False, 'import re\n'), ((12366, 12382), 'numpy.ones', 'np.ones', (['dims[2]'], {}), '(dims[2])\n', (12373, 12382), True, 'import numpy as np\n'), ((13336, 13352), 'numpy.ones', 'np.ones', (['dims[2]'], {}), '(dims[2])\n', (13343, 13352), True, 'import numpy as np\n'), ((15676, 15693), 'numpy.real', 'np.real', (['E_inputX'], {}), '(E_inputX)\n', (15683, 15693), True, 'import numpy as np\n'), ((15899, 15916), 'numpy.imag', 'np.imag', (['E_inputX'], {}), '(E_inputX)\n', (15906, 15916), True, 'import numpy as np\n'), ((16123, 16140), 'numpy.real', 'np.real', (['E_inputY'], {}), '(E_inputY)\n', (16130, 16140), True, 'import numpy as np\n'), ((16346, 16363), 'numpy.imag', 'np.imag', (['E_inputY'], {}), '(E_inputY)\n', (16353, 16363), True, 'import numpy as np\n'), ((21158, 21218), 'numpy.abs', 'np.abs', (['(q_norms - qr_idx * self._max_NA_condenser / (Nr - 1))'], {}), '(q_norms - qr_idx * self._max_NA_condenser / (Nr - 1))\n', (21164, 21218), True, 'import numpy as np\n'), ((24835, 24846), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (24841, 24846), True, 'import numpy as np\n'), ((24914, 24925), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (24920, 24925), True, 'import numpy as np\n'), ((25215, 25225), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (25222, 25225), True, 'import numpy as np\n'), ((20560, 20585), 'numpy.sqrt', 'np.sqrt', (['((4 * Nq - 1) / 3)'], {}), '((4 * Nq - 1) / 3)\n', (20567, 20585), True, 'import numpy as np\n')] |
from __future__ import division
import os
import cv2
import numpy as np
import tensorflow as tf
from TensorflowToolbox.utility import file_loader
from TensorflowToolbox.data_flow import data_arg
class InputLayer(object):
def __init__(self, file_name, params, is_train):
self.file_name = file_name
self.file_parse = file_loader.TextFileLoader()
self.file_parse.read_file(file_name, params.shuffle)
self.file_len = self.file_parse.get_file_len()
self.is_train = is_train
self.params = params
self.data_arg = data_arg.DataArg()
def _py_read_data(self):
if self.is_train:
data_dir = "../segmentation_dataset/"
else:
data_dir = ''
#file_list = self.file_parse.get_next(1)
#image_name, label_name = file_list[0]
while(1):
file_list = self.file_parse.get_next(1)
image_name, label_name = file_list[0]
if image_name == "/MSRA10K/Imgs/142426.jpg" or label_name == "/MSRA10K/GTs/102052.png":
continue
if os.path.exists(data_dir+image_name) and os.path.exists(data_dir+label_name):
break
image = cv2.imread(data_dir + image_name)
label = cv2.imread(data_dir + label_name)
#back_ground = 1 - label
#label = np.concatenate((back_ground, label), 2)
label = np.amax(label, 2)
image = cv2.resize(image, (self.params.r_img_h, self.params.r_img_w)).astype(np.float32)
image /= 255.0
label = cv2.resize(label, (self.params.r_label_h, self.params.r_label_w),
interpolation = cv2.INTER_NEAREST).astype(np.float32)
if len(image.shape) < 3:
image = np.expand_dims(image,2)
image = np.tile(image, (1,1,3))
if len(label.shape) < 3:
label = np.expand_dims(label, 2)
# print(label.shape)
# label[label > 1] = 1.0 # comment by shz
# label[label < 1] = 0.0
# image_name = image_name.encode("utf-8")
# label_name = label_name.encode("utf-8")
return image_name, label_name, image, label
def read_data(self, dtypes):
return tf.py_func(self._py_read_data, [], dtypes)
# def ImageCrop(self, im_path, gt_path, im_reshape, transformer):
# return tf.py_func(self.processImageCrop, [im_path, gt_path, im_reshape,transformer], [float32, float32])
#
#
#
# def processImageCrop(im_path, gt_path, im_reshape, transformer):
#
# img_src = caffe.io.load_image(im_path)
# img_src = perturb(img_src)
# pathparts = im_path.split('/')
# gt = caffe.io.load_image(gt_path)
# gt = gt[:,:,0]
# gt[gt>0] = 1
#
# crop = getRandCrop(img_src.shape[1], img_src.shape[0], 0.9, 1.0)
# image_mean = [123.68/255, 116.779/255, 103.939/255]
# data1 = img_src[crop[1]:crop[3],crop[0]:crop[2],:]
# data2 = gt[crop[1]:crop[3],crop[0]:crop[2]]
# if random.random() > 0.5:
# data1 = data1[:, ::-1,]
# data2 =data2[:, ::-1,]
# if random.random() > 0.7: # conver to grey
# data1 = rgb2gray(data1)
#
# data1 = transformer.preprocess('data_in',data1)
# data2 = resize_gt(data2, (im_reshape[0]*gt_scale,im_reshape[1]*gt_scale))
#
# return data1, data2
def process_data(self, read_tensor, dtypes):
pq_params = self.params.preprocess_queue
image_name = read_tensor[0]
label_name = read_tensor[1]
# image = read_tensor[2]
# label = read_tensor[3]
arg_dict = self.params.arg_dict
if not self.is_train:
for d in arg_dict:
if "rcrop_size" in d:
rcrop_size = d.pop('rcrop_size')
d['ccrop_size'] = rcrop_size
if "rbright_max" in d:
rbright_max = d.pop('rbright_max')
if "rcontrast_lower" in d:
rcontrast_lower = d.pop('rcontrast_lower')
if "rcontrast_upper" in d:
rcontrast_upper = d.pop('rcontrast_upper')
if "rhue_max" in d:
rhue_max = d.pop('rhue_max')
if "rflip_updown" in d:
rflip_updown = d.pop('rflip_updown')
if "rflipp_leftright" in d:
rflipp_leftright = d.pop('rflipp_leftright')
data_list = read_tensor[2:]
data_list = self.data_arg(data_list, arg_dict)
image = data_list[0]
label = data_list[1]
return image_name, label_name, image, label
| [
"TensorflowToolbox.data_flow.data_arg.DataArg",
"tensorflow.py_func",
"os.path.exists",
"numpy.expand_dims",
"numpy.amax",
"cv2.imread",
"numpy.tile",
"TensorflowToolbox.utility.file_loader.TextFileLoader",
"cv2.resize"
] | [((339, 367), 'TensorflowToolbox.utility.file_loader.TextFileLoader', 'file_loader.TextFileLoader', ([], {}), '()\n', (365, 367), False, 'from TensorflowToolbox.utility import file_loader\n'), ((570, 588), 'TensorflowToolbox.data_flow.data_arg.DataArg', 'data_arg.DataArg', ([], {}), '()\n', (586, 588), False, 'from TensorflowToolbox.data_flow import data_arg\n'), ((1215, 1248), 'cv2.imread', 'cv2.imread', (['(data_dir + image_name)'], {}), '(data_dir + image_name)\n', (1225, 1248), False, 'import cv2\n'), ((1265, 1298), 'cv2.imread', 'cv2.imread', (['(data_dir + label_name)'], {}), '(data_dir + label_name)\n', (1275, 1298), False, 'import cv2\n'), ((1405, 1422), 'numpy.amax', 'np.amax', (['label', '(2)'], {}), '(label, 2)\n', (1412, 1422), True, 'import numpy as np\n'), ((2249, 2291), 'tensorflow.py_func', 'tf.py_func', (['self._py_read_data', '[]', 'dtypes'], {}), '(self._py_read_data, [], dtypes)\n', (2259, 2291), True, 'import tensorflow as tf\n'), ((1779, 1803), 'numpy.expand_dims', 'np.expand_dims', (['image', '(2)'], {}), '(image, 2)\n', (1793, 1803), True, 'import numpy as np\n'), ((1823, 1848), 'numpy.tile', 'np.tile', (['image', '(1, 1, 3)'], {}), '(image, (1, 1, 3))\n', (1830, 1848), True, 'import numpy as np\n'), ((1909, 1933), 'numpy.expand_dims', 'np.expand_dims', (['label', '(2)'], {}), '(label, 2)\n', (1923, 1933), True, 'import numpy as np\n'), ((1098, 1135), 'os.path.exists', 'os.path.exists', (['(data_dir + image_name)'], {}), '(data_dir + image_name)\n', (1112, 1135), False, 'import os\n'), ((1138, 1175), 'os.path.exists', 'os.path.exists', (['(data_dir + label_name)'], {}), '(data_dir + label_name)\n', (1152, 1175), False, 'import os\n'), ((1448, 1509), 'cv2.resize', 'cv2.resize', (['image', '(self.params.r_img_h, self.params.r_img_w)'], {}), '(image, (self.params.r_img_h, self.params.r_img_w))\n', (1458, 1509), False, 'import cv2\n'), ((1569, 1671), 'cv2.resize', 'cv2.resize', (['label', '(self.params.r_label_h, self.params.r_label_w)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(label, (self.params.r_label_h, self.params.r_label_w),\n interpolation=cv2.INTER_NEAREST)\n', (1579, 1671), False, 'import cv2\n')] |
"""Base model framework."""
import math
import random
import pickle
import argparse
import datetime
from datetime import timedelta
from timeit import default_timer as timer
import numpy as np
from mindspore import context, save_checkpoint, load_checkpoint, load_param_into_net
import mindspore.nn as nn
from utils.mindspore_helper import GradWrap
from utils.logger import Logger
from utils.data import LabeledDocuments
from utils.evaluation import compute_retrieval_precision
from utils.mindspore_helper import gen_checkpoints_list
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
class BaseModel(nn.Cell):
"""Base model"""
def __init__(self, hparams):
super().__init__()
self.hparams = hparams
self.load_data()
def load_data(self):
self.data = LabeledDocuments(self.hparams.data_path, self.hparams.num_neighbors)
def run_training_sessions(self):
"""run outer training session"""
logger = Logger(self.hparams.model_path + '.log', on=True)
val_perfs = []
best_val_perf = float('-inf')
start = timer()
random.seed(self.hparams.seed) # For reproducible random runs
for run_num in range(1, self.hparams.num_runs + 1):
state_dict, val_perf = self.run_training_session(run_num, logger)
val_perfs.append(val_perf)
if val_perf > best_val_perf:
best_val_perf = val_perf
logger.log('----New best {:8.2f}, saving'.format(val_perf))
save_checkpoint(gen_checkpoints_list(state_dict), self.hparams.model_path+'.ckpt')
pickle.dump(self.hparams, open(self.hparams.model_path+'.hpar', 'wb'))
logger.log('Time: %s' % str(timedelta(seconds=round(timer() - start))))
self.load()
if self.hparams.num_runs > 1:
logger.log_perfs(val_perfs)
logger.log('best hparams: ' + self.flag_hparams())
val_perf, test_perf = self.run_test()
logger.log('Val: {:8.2f}'.format(val_perf))
logger.log('Test: {:8.2f}'.format(test_perf))
def run_training_session(self, run_num, logger):
"""run inner training session"""
if self.hparams.num_runs > 1:
logger.log('RANDOM RUN: %d/%d' % (run_num, self.hparams.num_runs))
for hparam, values in self.get_hparams_grid().items():
assert hasattr(self.hparams, hparam)
self.hparams.__dict__[hparam] = random.choice(values)
np.random.seed(self.hparams.seed)
random.seed(self.hparams.seed)
train_loader, database_loader, val_loader, _ = self.data.get_loaders(
self.hparams.num_trees, self.hparams.alpha, self.hparams.batch_size, self.hparams.num_workers,
shuffle_train=True, get_test=False)
self.define_parameters(self.data.num_nodes, self.data.num_edges)
opt = nn.Adam(params=self.trainable_params(), learning_rate=self.hparams.lr)
train_network = GradWrap(self)
train_network.set_train()
best_val_perf = float('-inf')
loss_sum = 0
num_steps = 0
bad_epochs = 0
times = []
try:
for epoch in range(1, self.hparams.epochs + 1):
starttime = datetime.datetime.now()
for _, batch in enumerate(train_loader):
x, label, edge1, edge2, weight = batch[0], batch[1], batch[2], batch[3], batch[4]
grads = train_network(x, label, edge1, edge2, weight)
opt(grads)
loss = self.construct(x, edge1, edge2, weight)
loss_sum += loss.asnumpy()
num_steps += 1
endtime = datetime.datetime.now()
times.append(endtime - starttime)
if math.isnan(loss_sum):
logger.log('Stopping epoch because loss is NaN')
break
val_perf = self.evaluate(database_loader, val_loader)
logger.log('End of epoch {:3d}'.format(epoch), False)
logger.log(' Loss: {:8.2f} | val perf {:8.2f}'.format(loss_sum / num_steps, val_perf), False)
if val_perf > best_val_perf:
best_val_perf = val_perf
bad_epochs = 0
logger.log('\t\t*Best model so far, deep copying*')
state_dict = self.parameters_and_names()
else:
bad_epochs += 1
logger.log('\t\tBad epoch %d' % bad_epochs)
if bad_epochs > self.hparams.num_bad_epochs:
break
except KeyboardInterrupt:
logger.log('-' * 89)
logger.log('Exiting from training early')
logger.log("time per training epoch: " + str(np.mean(times)))
return state_dict, best_val_perf
def evaluate(self, database_loader, eval_loader):
perf = compute_retrieval_precision(database_loader, eval_loader, self.encode_discrete,\
self.hparams.distance_metric, self.hparams.num_retrieve, self.hparams.num_features)
return perf
def load(self):
checkpoint = load_checkpoint(self.hparams.model_path+'.ckpt')
hparams = pickle.load(open(self.hparams.model_path+'.hpar', 'rb'))
self.hparams = hparams
self.define_parameters(self.data.num_nodes, self.data.num_edges)
load_param_into_net(self, checkpoint, strict_load=True)
def run_test(self):
_, database_loader, val_loader, test_loader = self.data.get_loaders(
self.hparams.num_trees, self.hparams.alpha, self.hparams.batch_size, self.hparams.num_workers,
shuffle_train=False, get_test=True)
val_perf = self.evaluate(database_loader, val_loader)
test_perf = self.evaluate(database_loader, test_loader)
return val_perf, test_perf
def flag_hparams(self):
"""flag hyperparameters"""
flags = '%s %s' % (self.hparams.model_path, self.hparams.data_path)
for hparam in vars(self.hparams):
val = getattr(self.hparams, hparam)
if str(val) == 'False':
continue
elif str(val) == 'True':
flags += ' --%s' % (hparam)
elif str(hparam) in {'model_path', 'data_path', 'num_runs',
'num_workers'}:
continue
else:
flags += ' --%s %s' % (hparam, val)
return flags
@staticmethod
def get_general_argparser():
"""command parser"""
parser = argparse.ArgumentParser()
parser.add_argument('model_path', type=str)
parser.add_argument('data_path', type=str)
parser.add_argument('--train', action='store_true',
help='train a model?')
parser.add_argument('--num_features', type=int, default=64,
help='num discrete features [%(default)d]')
parser.add_argument('--dim_hidden', type=int, default=500,
help='dimension of hidden state [%(default)d]')
parser.add_argument('--num_layers', type=int, default=0,
help='num layers [%(default)d]')
parser.add_argument('--num_neighbors', type=int, default=10,
help='num neighbors [%(default)d]')
parser.add_argument('--batch_size', type=int, default=128,
help='batch size [%(default)d]')
parser.add_argument('--lr', type=float, default=0.001,
help='initial learning rate [%(default)g]')
parser.add_argument('--init', type=float, default=0.05,
help='unif init range (default if 0) [%(default)g]')
parser.add_argument('--clip', type=float, default=10,
help='gradient clipping [%(default)g]')
parser.add_argument('--epochs', type=int, default=100,
help='max number of epochs [%(default)d]')
parser.add_argument('--num_runs', type=int, default=1,
help='num random runs (not random if 1) '
'[%(default)d]')
parser.add_argument('--num_retrieve', type=int, default=100,
help='num neighbors to retrieve [%(default)d]')
parser.add_argument('--num_bad_epochs', type=int, default=6,
help='num indulged bad epochs [%(default)d]')
parser.add_argument('--num_workers', type=int, default=0,
help='num dataloader workers [%(default)d]')
parser.add_argument('--distance_metric', default='hamming',
choices=['hamming', 'cosine'])
parser.add_argument('--no_tfidf', action='store_true',
help='raw bag-of-words as input instead of tf-idf?')
parser.add_argument('--seed', type=int, default=50971,
help='random seed [%(default)d]')
parser.add_argument('--cuda', action='store_true',
help='use CUDA?')
return parser
| [
"mindspore.context.set_context",
"math.isnan",
"numpy.random.seed",
"argparse.ArgumentParser",
"mindspore.load_checkpoint",
"mindspore.load_param_into_net",
"timeit.default_timer",
"utils.mindspore_helper.GradWrap",
"utils.evaluation.compute_retrieval_precision",
"random.choice",
"utils.data.Lab... | [((535, 603), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.PYNATIVE_MODE', 'device_target': '"""CPU"""'}), "(mode=context.PYNATIVE_MODE, device_target='CPU')\n", (554, 603), False, 'from mindspore import context, save_checkpoint, load_checkpoint, load_param_into_net\n'), ((814, 882), 'utils.data.LabeledDocuments', 'LabeledDocuments', (['self.hparams.data_path', 'self.hparams.num_neighbors'], {}), '(self.hparams.data_path, self.hparams.num_neighbors)\n', (830, 882), False, 'from utils.data import LabeledDocuments\n'), ((979, 1028), 'utils.logger.Logger', 'Logger', (["(self.hparams.model_path + '.log')"], {'on': '(True)'}), "(self.hparams.model_path + '.log', on=True)\n", (985, 1028), False, 'from utils.logger import Logger\n'), ((1106, 1113), 'timeit.default_timer', 'timer', ([], {}), '()\n', (1111, 1113), True, 'from timeit import default_timer as timer\n'), ((1122, 1152), 'random.seed', 'random.seed', (['self.hparams.seed'], {}), '(self.hparams.seed)\n', (1133, 1152), False, 'import random\n'), ((2515, 2548), 'numpy.random.seed', 'np.random.seed', (['self.hparams.seed'], {}), '(self.hparams.seed)\n', (2529, 2548), True, 'import numpy as np\n'), ((2557, 2587), 'random.seed', 'random.seed', (['self.hparams.seed'], {}), '(self.hparams.seed)\n', (2568, 2587), False, 'import random\n'), ((3004, 3018), 'utils.mindspore_helper.GradWrap', 'GradWrap', (['self'], {}), '(self)\n', (3012, 3018), False, 'from utils.mindspore_helper import GradWrap\n'), ((4977, 5150), 'utils.evaluation.compute_retrieval_precision', 'compute_retrieval_precision', (['database_loader', 'eval_loader', 'self.encode_discrete', 'self.hparams.distance_metric', 'self.hparams.num_retrieve', 'self.hparams.num_features'], {}), '(database_loader, eval_loader, self.\n encode_discrete, self.hparams.distance_metric, self.hparams.\n num_retrieve, self.hparams.num_features)\n', (5004, 5150), False, 'from utils.evaluation import compute_retrieval_precision\n'), ((5216, 5266), 'mindspore.load_checkpoint', 'load_checkpoint', (["(self.hparams.model_path + '.ckpt')"], {}), "(self.hparams.model_path + '.ckpt')\n", (5231, 5266), False, 'from mindspore import context, save_checkpoint, load_checkpoint, load_param_into_net\n'), ((5452, 5507), 'mindspore.load_param_into_net', 'load_param_into_net', (['self', 'checkpoint'], {'strict_load': '(True)'}), '(self, checkpoint, strict_load=True)\n', (5471, 5507), False, 'from mindspore import context, save_checkpoint, load_checkpoint, load_param_into_net\n'), ((6633, 6658), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6656, 6658), False, 'import argparse\n'), ((2484, 2505), 'random.choice', 'random.choice', (['values'], {}), '(values)\n', (2497, 2505), False, 'import random\n'), ((3278, 3301), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3299, 3301), False, 'import datetime\n'), ((3743, 3766), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3764, 3766), False, 'import datetime\n'), ((3837, 3857), 'math.isnan', 'math.isnan', (['loss_sum'], {}), '(loss_sum)\n', (3847, 3857), False, 'import math\n'), ((1554, 1586), 'utils.mindspore_helper.gen_checkpoints_list', 'gen_checkpoints_list', (['state_dict'], {}), '(state_dict)\n', (1574, 1586), False, 'from utils.mindspore_helper import gen_checkpoints_list\n'), ((4849, 4863), 'numpy.mean', 'np.mean', (['times'], {}), '(times)\n', (4856, 4863), True, 'import numpy as np\n'), ((1769, 1776), 'timeit.default_timer', 'timer', ([], {}), '()\n', (1774, 1776), True, 'from timeit import default_timer as timer\n')] |
import numpy as np
from matplotlib import pyplot as plt
from scipy.optimize import curve_fit
from configs import input_ProtoConfig
def f(x, argInverse):
if argInverse == False:
return np.log(x + 1)
else:
return np.exp(x) - 1
def _make_hallem_dataset(file, N_ORNS_TOTAL = 50, arg_positive=True, arg_expand= True):
'''
:param config:
:return: hallem carlson dataset in matrix format
110 odors * 24 ORNs, with spontaneous activity subtracted
'''
N_ODORS = 110
N_ORNS = 24
N_ORNS_FILL = N_ORNS_TOTAL - N_ORNS
with open(file) as f:
vec = f.readlines()
vec = [int(x.strip()) for x in vec]
mat = np.reshape(vec, (N_ODORS+1, N_ORNS),'F')
spontaneous_activity = mat[-1,:]
odor_activation = mat[:-1,:] + spontaneous_activity
if arg_expand:
out = np.zeros((N_ODORS, N_ORNS_TOTAL))
for i in range(N_ODORS):
sampled = np.random.choice(odor_activation[i,:], size=N_ORNS_FILL, replace=True)
out[i,:N_ORNS] = odor_activation[i,:]
out[i,N_ORNS:] = sampled
else:
out = odor_activation
if arg_positive:
out[out < 0] = 0
return out
def _simple_distribution_subplot(data, r, c, max, savename):
fig, ax = plt.subplots(r, c)
for i in range(data.shape[1]):
ix = np.unravel_index(i, (r,c))
ax[ix].hist(data[:,i], range=(0,max), bins=20)
plt.savefig(savename)
def _covariance_image(data, savename):
plt.figure()
plt.imshow(data, cmap='RdBu_r', interpolation='none')
plt.colorbar()
plt.savefig(savename)
def _generate_from_hallem(config=None, size= 1000):
arg_positive = True
arg_expand = False
if config is None:
config = input_ProtoConfig()
odor_activation = _make_hallem_dataset(config.hallem_path, N_ORNS_TOTAL= config.N_ORN,
arg_positive=arg_positive, arg_expand=arg_expand)
log_odor_activation = f(odor_activation, argInverse=False)
means = np.mean(log_odor_activation, axis=0)
covs = np.cov(log_odor_activation, rowvar=False)
sampled = np.random.multivariate_normal(means, covs, size= size)
fsampled = f(sampled, argInverse=True)
realistic_max = np.max(odor_activation.flatten())
fsampled[fsampled > realistic_max] = realistic_max
plt.figure()
plt.hist(np.sum(odor_activation, axis=1))
plt.savefig('hallem')
# plt.figure()
# plt.hist(np.sum(fsampled, axis=1))
# plt.show()
# sampled_cc = np.cov(fsampled, rowvar=False)
# hallem_cc = np.cov(odor_activation, rowvar=False)
# _simple_distribution_subplot(odor_activation, 7, 8, 100, 'HALLEM')
# _simple_distribution_subplot(fsampled, 7, 8, 100, 'SAMPLED')
# _covariance_image(hallem_cc, 'HALLEM_COV')
# _covariance_image(sampled_cc, 'SAMPLED_COV')
return sampled
_generate_from_hallem() | [
"numpy.sum",
"numpy.log",
"matplotlib.pyplot.imshow",
"configs.input_ProtoConfig",
"numpy.zeros",
"numpy.unravel_index",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.random.multivariate_normal",
"numpy.reshape",
"numpy.exp",
"numpy.random.choice",
"numpy.c... | [((670, 713), 'numpy.reshape', 'np.reshape', (['vec', '(N_ODORS + 1, N_ORNS)', '"""F"""'], {}), "(vec, (N_ODORS + 1, N_ORNS), 'F')\n", (680, 713), True, 'import numpy as np\n'), ((1263, 1281), 'matplotlib.pyplot.subplots', 'plt.subplots', (['r', 'c'], {}), '(r, c)\n', (1275, 1281), True, 'from matplotlib import pyplot as plt\n'), ((1416, 1437), 'matplotlib.pyplot.savefig', 'plt.savefig', (['savename'], {}), '(savename)\n', (1427, 1437), True, 'from matplotlib import pyplot as plt\n'), ((1482, 1494), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1492, 1494), True, 'from matplotlib import pyplot as plt\n'), ((1499, 1552), 'matplotlib.pyplot.imshow', 'plt.imshow', (['data'], {'cmap': '"""RdBu_r"""', 'interpolation': '"""none"""'}), "(data, cmap='RdBu_r', interpolation='none')\n", (1509, 1552), True, 'from matplotlib import pyplot as plt\n'), ((1557, 1571), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1569, 1571), True, 'from matplotlib import pyplot as plt\n'), ((1576, 1597), 'matplotlib.pyplot.savefig', 'plt.savefig', (['savename'], {}), '(savename)\n', (1587, 1597), True, 'from matplotlib import pyplot as plt\n'), ((2020, 2056), 'numpy.mean', 'np.mean', (['log_odor_activation'], {'axis': '(0)'}), '(log_odor_activation, axis=0)\n', (2027, 2056), True, 'import numpy as np\n'), ((2068, 2109), 'numpy.cov', 'np.cov', (['log_odor_activation'], {'rowvar': '(False)'}), '(log_odor_activation, rowvar=False)\n', (2074, 2109), True, 'import numpy as np\n'), ((2124, 2177), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['means', 'covs'], {'size': 'size'}), '(means, covs, size=size)\n', (2153, 2177), True, 'import numpy as np\n'), ((2337, 2349), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2347, 2349), True, 'from matplotlib import pyplot as plt\n'), ((2400, 2421), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""hallem"""'], {}), "('hallem')\n", (2411, 2421), True, 'from matplotlib import pyplot as plt\n'), ((198, 211), 'numpy.log', 'np.log', (['(x + 1)'], {}), '(x + 1)\n', (204, 211), True, 'import numpy as np\n'), ((838, 871), 'numpy.zeros', 'np.zeros', (['(N_ODORS, N_ORNS_TOTAL)'], {}), '((N_ODORS, N_ORNS_TOTAL))\n', (846, 871), True, 'import numpy as np\n'), ((1330, 1357), 'numpy.unravel_index', 'np.unravel_index', (['i', '(r, c)'], {}), '(i, (r, c))\n', (1346, 1357), True, 'import numpy as np\n'), ((1740, 1759), 'configs.input_ProtoConfig', 'input_ProtoConfig', ([], {}), '()\n', (1757, 1759), False, 'from configs import input_ProtoConfig\n'), ((2363, 2394), 'numpy.sum', 'np.sum', (['odor_activation'], {'axis': '(1)'}), '(odor_activation, axis=1)\n', (2369, 2394), True, 'import numpy as np\n'), ((237, 246), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (243, 246), True, 'import numpy as np\n'), ((927, 998), 'numpy.random.choice', 'np.random.choice', (['odor_activation[i, :]'], {'size': 'N_ORNS_FILL', 'replace': '(True)'}), '(odor_activation[i, :], size=N_ORNS_FILL, replace=True)\n', (943, 998), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
author: <NAME>
email: <EMAIL>
license: MIT
Please feel free to use and modify this, but keep the above information. Thanks!
"""
import numpy as np
from numpy import sin, cos, tan
def phiThetaPsiDotToPQR(phi, theta, psi, phidot, thetadot, psidot):
p = -sin(theta)*psidot + phidot
q = sin(phi)*cos(theta)*psidot + cos(phi)*thetadot
r = -sin(phi)*thetadot + cos(phi)*cos(theta)*psidot
return np.array([p, q, r])
def xyzDotToUVW_euler(phi, theta, psi, xdot, ydot, zdot):
u = xdot*cos(psi)*cos(theta) + ydot*sin(psi)*cos(theta) - zdot*sin(theta)
v = (sin(phi)*sin(psi)*sin(theta) + cos(phi)*cos(psi))*ydot + (sin(phi)*sin(theta)*cos(psi) - sin(psi)*cos(phi))*xdot + zdot*sin(phi)*cos(theta)
w = (sin(phi)*sin(psi) + sin(theta)*cos(phi)*cos(psi))*xdot + (-sin(phi)*cos(psi) + sin(psi)*sin(theta)*cos(phi))*ydot + zdot*cos(phi)*cos(theta)
return np.array([u, v, w])
def xyzDotToUVW_Flat_euler(phi, theta, psi, xdot, ydot, zdot):
uFlat = xdot * cos(psi) + ydot * sin(psi)
vFlat = -xdot * sin(psi) + ydot * cos(psi)
wFlat = zdot
return np.array([uFlat, vFlat, wFlat])
def xyzDotToUVW_Flat_quat(q, xdot, ydot, zdot):
q0 = q[0]
q1 = q[1]
q2 = q[2]
q3 = q[3]
uFlat = 2*(q0*q3 - q1*q2)*ydot + (q0**2 - q1**2 + q2**2 - q3**2)*xdot
vFlat = -2*(q0*q3 + q1*q2)*xdot + (q0**2 + q1**2 - q2**2 - q3**2)*ydot
wFlat = zdot
return np.array([uFlat, vFlat, wFlat])
| [
"numpy.sin",
"numpy.array",
"numpy.cos"
] | [((455, 474), 'numpy.array', 'np.array', (['[p, q, r]'], {}), '([p, q, r])\n', (463, 474), True, 'import numpy as np\n'), ((938, 957), 'numpy.array', 'np.array', (['[u, v, w]'], {}), '([u, v, w])\n', (946, 957), True, 'import numpy as np\n'), ((1147, 1178), 'numpy.array', 'np.array', (['[uFlat, vFlat, wFlat]'], {}), '([uFlat, vFlat, wFlat])\n', (1155, 1178), True, 'import numpy as np\n'), ((1465, 1496), 'numpy.array', 'np.array', (['[uFlat, vFlat, wFlat]'], {}), '([uFlat, vFlat, wFlat])\n', (1473, 1496), True, 'import numpy as np\n'), ((360, 368), 'numpy.cos', 'cos', (['phi'], {}), '(phi)\n', (363, 368), False, 'from numpy import sin, cos, tan\n'), ((602, 612), 'numpy.sin', 'sin', (['theta'], {}), '(theta)\n', (605, 612), False, 'from numpy import sin, cos, tan\n'), ((756, 766), 'numpy.cos', 'cos', (['theta'], {}), '(theta)\n', (759, 766), False, 'from numpy import sin, cos, tan\n'), ((911, 921), 'numpy.cos', 'cos', (['theta'], {}), '(theta)\n', (914, 921), False, 'from numpy import sin, cos, tan\n'), ((1042, 1050), 'numpy.cos', 'cos', (['psi'], {}), '(psi)\n', (1045, 1050), False, 'from numpy import sin, cos, tan\n'), ((1060, 1068), 'numpy.sin', 'sin', (['psi'], {}), '(psi)\n', (1063, 1068), False, 'from numpy import sin, cos, tan\n'), ((1090, 1098), 'numpy.sin', 'sin', (['psi'], {}), '(psi)\n', (1093, 1098), False, 'from numpy import sin, cos, tan\n'), ((1108, 1116), 'numpy.cos', 'cos', (['psi'], {}), '(psi)\n', (1111, 1116), False, 'from numpy import sin, cos, tan\n'), ((291, 301), 'numpy.sin', 'sin', (['theta'], {}), '(theta)\n', (294, 301), False, 'from numpy import sin, cos, tan\n'), ((331, 339), 'numpy.sin', 'sin', (['phi'], {}), '(phi)\n', (334, 339), False, 'from numpy import sin, cos, tan\n'), ((340, 350), 'numpy.cos', 'cos', (['theta'], {}), '(theta)\n', (343, 350), False, 'from numpy import sin, cos, tan\n'), ((392, 400), 'numpy.sin', 'sin', (['phi'], {}), '(phi)\n', (395, 400), False, 'from numpy import sin, cos, tan\n'), ((412, 420), 'numpy.cos', 'cos', (['phi'], {}), '(phi)\n', (415, 420), False, 'from numpy import sin, cos, tan\n'), ((421, 431), 'numpy.cos', 'cos', (['theta'], {}), '(theta)\n', (424, 431), False, 'from numpy import sin, cos, tan\n'), ((557, 567), 'numpy.cos', 'cos', (['theta'], {}), '(theta)\n', (560, 567), False, 'from numpy import sin, cos, tan\n'), ((584, 594), 'numpy.cos', 'cos', (['theta'], {}), '(theta)\n', (587, 594), False, 'from numpy import sin, cos, tan\n'), ((747, 755), 'numpy.sin', 'sin', (['phi'], {}), '(phi)\n', (750, 755), False, 'from numpy import sin, cos, tan\n'), ((902, 910), 'numpy.cos', 'cos', (['phi'], {}), '(phi)\n', (905, 910), False, 'from numpy import sin, cos, tan\n'), ((548, 556), 'numpy.cos', 'cos', (['psi'], {}), '(psi)\n', (551, 556), False, 'from numpy import sin, cos, tan\n'), ((575, 583), 'numpy.sin', 'sin', (['psi'], {}), '(psi)\n', (578, 583), False, 'from numpy import sin, cos, tan\n'), ((645, 655), 'numpy.sin', 'sin', (['theta'], {}), '(theta)\n', (648, 655), False, 'from numpy import sin, cos, tan\n'), ((658, 666), 'numpy.cos', 'cos', (['phi'], {}), '(phi)\n', (661, 666), False, 'from numpy import sin, cos, tan\n'), ((667, 675), 'numpy.cos', 'cos', (['psi'], {}), '(psi)\n', (670, 675), False, 'from numpy import sin, cos, tan\n'), ((705, 713), 'numpy.cos', 'cos', (['psi'], {}), '(psi)\n', (708, 713), False, 'from numpy import sin, cos, tan\n'), ((716, 724), 'numpy.sin', 'sin', (['psi'], {}), '(psi)\n', (719, 724), False, 'from numpy import sin, cos, tan\n'), ((725, 733), 'numpy.cos', 'cos', (['phi'], {}), '(phi)\n', (728, 733), False, 'from numpy import sin, cos, tan\n'), ((781, 789), 'numpy.sin', 'sin', (['phi'], {}), '(phi)\n', (784, 789), False, 'from numpy import sin, cos, tan\n'), ((790, 798), 'numpy.sin', 'sin', (['psi'], {}), '(psi)\n', (793, 798), False, 'from numpy import sin, cos, tan\n'), ((821, 829), 'numpy.cos', 'cos', (['psi'], {}), '(psi)\n', (824, 829), False, 'from numpy import sin, cos, tan\n'), ((849, 857), 'numpy.cos', 'cos', (['psi'], {}), '(psi)\n', (852, 857), False, 'from numpy import sin, cos, tan\n'), ((880, 888), 'numpy.cos', 'cos', (['phi'], {}), '(phi)\n', (883, 888), False, 'from numpy import sin, cos, tan\n'), ((627, 635), 'numpy.sin', 'sin', (['phi'], {}), '(phi)\n', (630, 635), False, 'from numpy import sin, cos, tan\n'), ((636, 644), 'numpy.sin', 'sin', (['psi'], {}), '(psi)\n', (639, 644), False, 'from numpy import sin, cos, tan\n'), ((685, 693), 'numpy.sin', 'sin', (['phi'], {}), '(phi)\n', (688, 693), False, 'from numpy import sin, cos, tan\n'), ((694, 704), 'numpy.sin', 'sin', (['theta'], {}), '(theta)\n', (697, 704), False, 'from numpy import sin, cos, tan\n'), ((801, 811), 'numpy.sin', 'sin', (['theta'], {}), '(theta)\n', (804, 811), False, 'from numpy import sin, cos, tan\n'), ((812, 820), 'numpy.cos', 'cos', (['phi'], {}), '(phi)\n', (815, 820), False, 'from numpy import sin, cos, tan\n'), ((840, 848), 'numpy.sin', 'sin', (['phi'], {}), '(phi)\n', (843, 848), False, 'from numpy import sin, cos, tan\n'), ((860, 868), 'numpy.sin', 'sin', (['psi'], {}), '(psi)\n', (863, 868), False, 'from numpy import sin, cos, tan\n'), ((869, 879), 'numpy.sin', 'sin', (['theta'], {}), '(theta)\n', (872, 879), False, 'from numpy import sin, cos, tan\n')] |
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from gym import spaces
from rl.policies.distributions import FixedCategorical, FixedNormal, \
MixedDistribution, FixedGumbelSoftmax
from rl.policies.utils import MLP
from util.pytorch import to_tensor
class Actor(nn.Module):
def __init__(self, config, ob_space, ac_space, tanh_policy, deterministic=False):
super().__init__()
self._config = config
self._activation_fn = getattr(F, config.activation)
self._tanh = tanh_policy
self._deterministic= deterministic
@property
def info(self):
return {}
def act(self, ob, is_train=True, return_log_prob=False):
ob = to_tensor(ob, self._config.device)
means, stds = self.forward(ob, self._deterministic)
dists = OrderedDict()
for k, space in self._ac_space.spaces.items():
if isinstance(space, spaces.Box):
if self._deterministic:
stds[k] = torch.zeros_like(means[k])
dists[k] = FixedNormal(means[k], stds[k])
else:
if self._config.meta_algo == 'sac' or self._config.algo == 'sac':
dists[k] = FixedGumbelSoftmax(torch.tensor(self._config.temperature), logits=means[k])
else:
dists[k] = FixedCategorical(logits=means[k])
actions = OrderedDict()
mixed_dist = MixedDistribution(dists)
if not is_train or self._deterministic:
activations = mixed_dist.mode()
else:
activations = mixed_dist.sample()
if return_log_prob:
log_probs = mixed_dist.log_probs(activations)
for k, space in self._ac_space.spaces.items():
z = activations[k]
if self._tanh and isinstance(space, spaces.Box):
# action_scale = to_tensor((self._ac_space[k].high), self._config.device).detach()
# action = torch.tanh(z) * action_scale
action = torch.tanh(z)
if return_log_prob:
# follow the Appendix C. Enforcing Action Bounds
# log_det_jacobian = 2 * (np.log(2.) - z - F.softplus(-2. * z)).sum(dim=1, keepdim=True)
log_det_jacobian = 2 * (np.log(2.) - z - F.softplus(-2. * z)).sum(dim=-1, keepdim=True)
# log_det_jacobian = torch.log((1-torch.tanh(z).pow(2))+1e-6).sum(dim=1, keepdim=True)
log_probs[k] = log_probs[k] - log_det_jacobian
else:
action = z
if action.shape[0] == 1:
actions[k] = action.detach().cpu().numpy().squeeze(0)
else:
actions[k] = action.detach().cpu().numpy()
if return_log_prob:
log_probs_ = torch.cat(list(log_probs.values()), -1).sum(-1, keepdim=True)
# if log_probs_.min() < -100:
# print('sampling an action with a probability of 1e-100')
# import ipdb; ipdb.set_trace()
log_probs_ = log_probs_.detach().cpu().numpy().squeeze(0)
return actions, activations, log_probs_
else:
return actions, activations
def act_log(self, ob, activations=None):
means, stds = self.forward(ob)
dists = OrderedDict()
actions = OrderedDict()
for k, space in self._ac_space.spaces.items():
if isinstance(space, spaces.Box):
if self._deterministic:
stds[k] = torch.zeros_like(means[k])
dists[k] = FixedNormal(means[k], stds[k])
else:
if self._config.meta_algo == 'sac' or self._config.algo == 'sac':
dists[k] = FixedGumbelSoftmax(torch.tensor(self._config.temperature), logits=means[k])
else:
dists[k] = FixedCategorical(logits=means[k])
mixed_dist = MixedDistribution(dists)
activations_ = mixed_dist.rsample() if activations is None else activations
for k in activations_.keys():
if len(activations_[k].shape) == 1:
activations_[k] = activations_[k].unsqueeze(0)
log_probs = mixed_dist.log_probs(activations_)
for k, space in self._ac_space.spaces.items():
z = activations_[k]
if self._tanh and isinstance(space, spaces.Box):
# action_scale = to_tensor((self._ac_space[k].high), self._config.device).detach()
action = torch.tanh(z)
# action = torch.tanh(z)
# follow the Appendix C. Enforcing Action Bounds
# log_det_jacobian = 2 * (np.log(2.) - z - F.softplus(-2. * z)).sum(dim=1, keepdim=True)
log_det_jacobian = 2 * (np.log(2.) - z - F.softplus(-2. * z)).sum(dim=-1, keepdim=True)
log_probs[k] = log_probs[k] - log_det_jacobian
else:
action = z
log_probs[k] *= self._config.discrete_ent_coef
actions[k] = action
log_probs_ = torch.cat(list(log_probs.values()), -1).sum(-1, keepdim=True)
# if log_probs_.min() < -100:
# print(ob)
# print(log_probs_.min())
# import ipdb; ipdb.set_trace()
if activations is None:
return actions, log_probs_
else:
ents = mixed_dist.entropy()
return log_probs_, ents
def act_log_debug(self, ob, activations=None):
means, stds = self.forward(ob)
dists = OrderedDict()
actions = OrderedDict()
for k, space in self._ac_space.spaces.items():
if isinstance(space, spaces.Box):
dists[k] = FixedNormal(means[k], stds[k])
else:
dists[k] = FixedCategorical(logits=means[k])
mixed_dist = MixedDistribution(dists)
activations_ = mixed_dist.rsample() if activations is None else activations
log_probs = mixed_dist.log_probs(activations_)
for k, space in self._ac_space.spaces.items():
z = activations_[k]
if self._tanh and isinstance(space, spaces.Box):
action = torch.tanh(z) * to_tensor((self._ac_space[k].high), self._config.device)
# follow the Appendix C. Enforcing Action Bounds
log_det_jacobian = 2 * (np.log(2.) - z - F.softplus(-2. * z)).sum(dim=-1, keepdim=True)
log_probs[k] = log_probs[k] - log_det_jacobian
else:
action = z
actions[k] = action
ents = mixed_dist.entropy()
#print(torch.cat(list(log_probs.values()), -1))
log_probs_ = torch.cat(list(log_probs.values()), -1).sum(-1, keepdim=True)
if log_probs_.min() < -100:
print(ob)
print(log_probs_.min())
import ipdb; ipdb.set_trace()
if activations is None:
return actions, log_probs_
else:
return log_probs_, ents, log_probs, means, stds
class Critic(nn.Module):
def __init__(self, config):
super().__init__()
self._config = config
| [
"rl.policies.distributions.MixedDistribution",
"rl.policies.distributions.FixedCategorical",
"torch.zeros_like",
"ipdb.set_trace",
"numpy.log",
"collections.OrderedDict",
"rl.policies.distributions.FixedNormal",
"torch.nn.functional.softplus",
"util.pytorch.to_tensor",
"torch.tensor",
"torch.tan... | [((762, 796), 'util.pytorch.to_tensor', 'to_tensor', (['ob', 'self._config.device'], {}), '(ob, self._config.device)\n', (771, 796), False, 'from util.pytorch import to_tensor\n'), ((874, 887), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (885, 887), False, 'from collections import OrderedDict\n'), ((1457, 1470), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1468, 1470), False, 'from collections import OrderedDict\n'), ((1492, 1516), 'rl.policies.distributions.MixedDistribution', 'MixedDistribution', (['dists'], {}), '(dists)\n', (1509, 1516), False, 'from rl.policies.distributions import FixedCategorical, FixedNormal, MixedDistribution, FixedGumbelSoftmax\n'), ((3384, 3397), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3395, 3397), False, 'from collections import OrderedDict\n'), ((3416, 3429), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3427, 3429), False, 'from collections import OrderedDict\n'), ((4002, 4026), 'rl.policies.distributions.MixedDistribution', 'MixedDistribution', (['dists'], {}), '(dists)\n', (4019, 4026), False, 'from rl.policies.distributions import FixedCategorical, FixedNormal, MixedDistribution, FixedGumbelSoftmax\n'), ((5620, 5633), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5631, 5633), False, 'from collections import OrderedDict\n'), ((5652, 5665), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5663, 5665), False, 'from collections import OrderedDict\n'), ((5926, 5950), 'rl.policies.distributions.MixedDistribution', 'MixedDistribution', (['dists'], {}), '(dists)\n', (5943, 5950), False, 'from rl.policies.distributions import FixedCategorical, FixedNormal, MixedDistribution, FixedGumbelSoftmax\n'), ((6943, 6959), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (6957, 6959), False, 'import ipdb\n'), ((1113, 1143), 'rl.policies.distributions.FixedNormal', 'FixedNormal', (['means[k]', 'stds[k]'], {}), '(means[k], stds[k])\n', (1124, 1143), False, 'from rl.policies.distributions import FixedCategorical, FixedNormal, MixedDistribution, FixedGumbelSoftmax\n'), ((2085, 2098), 'torch.tanh', 'torch.tanh', (['z'], {}), '(z)\n', (2095, 2098), False, 'import torch\n'), ((3655, 3685), 'rl.policies.distributions.FixedNormal', 'FixedNormal', (['means[k]', 'stds[k]'], {}), '(means[k], stds[k])\n', (3666, 3685), False, 'from rl.policies.distributions import FixedCategorical, FixedNormal, MixedDistribution, FixedGumbelSoftmax\n'), ((4590, 4603), 'torch.tanh', 'torch.tanh', (['z'], {}), '(z)\n', (4600, 4603), False, 'import torch\n'), ((5794, 5824), 'rl.policies.distributions.FixedNormal', 'FixedNormal', (['means[k]', 'stds[k]'], {}), '(means[k], stds[k])\n', (5805, 5824), False, 'from rl.policies.distributions import FixedCategorical, FixedNormal, MixedDistribution, FixedGumbelSoftmax\n'), ((5870, 5903), 'rl.policies.distributions.FixedCategorical', 'FixedCategorical', ([], {'logits': 'means[k]'}), '(logits=means[k])\n', (5886, 5903), False, 'from rl.policies.distributions import FixedCategorical, FixedNormal, MixedDistribution, FixedGumbelSoftmax\n'), ((1059, 1085), 'torch.zeros_like', 'torch.zeros_like', (['means[k]'], {}), '(means[k])\n', (1075, 1085), False, 'import torch\n'), ((1404, 1437), 'rl.policies.distributions.FixedCategorical', 'FixedCategorical', ([], {'logits': 'means[k]'}), '(logits=means[k])\n', (1420, 1437), False, 'from rl.policies.distributions import FixedCategorical, FixedNormal, MixedDistribution, FixedGumbelSoftmax\n'), ((3601, 3627), 'torch.zeros_like', 'torch.zeros_like', (['means[k]'], {}), '(means[k])\n', (3617, 3627), False, 'import torch\n'), ((3946, 3979), 'rl.policies.distributions.FixedCategorical', 'FixedCategorical', ([], {'logits': 'means[k]'}), '(logits=means[k])\n', (3962, 3979), False, 'from rl.policies.distributions import FixedCategorical, FixedNormal, MixedDistribution, FixedGumbelSoftmax\n'), ((6265, 6278), 'torch.tanh', 'torch.tanh', (['z'], {}), '(z)\n', (6275, 6278), False, 'import torch\n'), ((6281, 6335), 'util.pytorch.to_tensor', 'to_tensor', (['self._ac_space[k].high', 'self._config.device'], {}), '(self._ac_space[k].high, self._config.device)\n', (6290, 6335), False, 'from util.pytorch import to_tensor\n'), ((1294, 1332), 'torch.tensor', 'torch.tensor', (['self._config.temperature'], {}), '(self._config.temperature)\n', (1306, 1332), False, 'import torch\n'), ((3836, 3874), 'torch.tensor', 'torch.tensor', (['self._config.temperature'], {}), '(self._config.temperature)\n', (3848, 3874), False, 'import torch\n'), ((4872, 4892), 'torch.nn.functional.softplus', 'F.softplus', (['(-2.0 * z)'], {}), '(-2.0 * z)\n', (4882, 4892), True, 'import torch.nn.functional as F\n'), ((6460, 6480), 'torch.nn.functional.softplus', 'F.softplus', (['(-2.0 * z)'], {}), '(-2.0 * z)\n', (6470, 6480), True, 'import torch.nn.functional as F\n'), ((2374, 2394), 'torch.nn.functional.softplus', 'F.softplus', (['(-2.0 * z)'], {}), '(-2.0 * z)\n', (2384, 2394), True, 'import torch.nn.functional as F\n'), ((4855, 4866), 'numpy.log', 'np.log', (['(2.0)'], {}), '(2.0)\n', (4861, 4866), True, 'import numpy as np\n'), ((6443, 6454), 'numpy.log', 'np.log', (['(2.0)'], {}), '(2.0)\n', (6449, 6454), True, 'import numpy as np\n'), ((2357, 2368), 'numpy.log', 'np.log', (['(2.0)'], {}), '(2.0)\n', (2363, 2368), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2019-07-09 at 13:42
@author: cook
"""
import numpy as np
from astropy.table import Table
from astropy import constants as cc
from astropy import units as uu
import os
import warnings
from apero import core
from apero.core import constants
from apero.core import math as mp
from apero import lang
from apero.core.core import drs_log
from apero.core.core import drs_file
from apero.core.core import drs_startup
from apero.io import drs_data
from apero.io import drs_path
from apero.science.calib import localisation
from apero.science.calib import shape
from apero.science.calib import wave
from apero.science.calib import general
from apero.science.calib import flat_blaze
from apero.science.extract import berv
# =============================================================================
# Define variables
# =============================================================================
__NAME__ = 'science.extraction.general.py'
__INSTRUMENT__ = 'None'
# Get constants
Constants = constants.load(__INSTRUMENT__)
# Get version and author
__version__ = Constants['DRS_VERSION']
__author__ = Constants['AUTHORS']
__date__ = Constants['DRS_DATE']
__release__ = Constants['DRS_RELEASE']
# get param dict
ParamDict = constants.ParamDict
DrsFitsFile = drs_file.DrsFitsFile
DrsNpyFile = drs_file.DrsNpyFile
# Get Logging function
WLOG = drs_log.wlog
# Get the text types
TextEntry = lang.drs_text.TextEntry
TextDict = lang.drs_text.TextDict
# alias pcheck
pcheck = core.pcheck
# -----------------------------------------------------------------------------
# Speed of light
# noinspection PyUnresolvedReferences
speed_of_light_ms = cc.c.to(uu.m / uu.s).value
# noinspection PyUnresolvedReferences
speed_of_light_kms = cc.c.to(uu.km / uu.s).value
# Get function string
display_func = drs_log.display_func
# =============================================================================
# Define general functions
# =============================================================================
def order_profiles(params, recipe, infile, fibertypes, shapelocal, shapex,
shapey, orderpfile, filenames=None):
func_name = __NAME__ + '.order_profiles()'
# filenames must be a dictionary
if not isinstance(filenames, dict):
filenames = dict()
for fiber in fibertypes:
filenames[fiber] = None
# ------------------------------------------------------------------------
# get header from infile
header = infile.header
# ------------------------------------------------------------------------
# storage for order profiles
orderprofiles = dict()
orderprofilefiles = dict()
# loop around fibers
for fiber in fibertypes:
# log progress (straightening orderp)
WLOG(params, 'info', TextEntry('40-016-00003', args=[fiber]))
# get key
key = orderpfile.get_dbkey(fiber=fiber)
# ------------------------------------------------------------------
# check for filename in inputs
filename = general.get_input_files(params, 'ORDERPFILE', key, header,
default=filenames[fiber])
# ------------------------------------------------------------------
# construct order profile file
orderpsfile = orderpfile.newcopy(recipe=recipe, fiber=fiber)
orderpsfile.construct_filename(params, infile=infile)
# check if temporary file exists
if orderpsfile.file_exists() and (filename is None):
# load the numpy temporary file
# Note: NpyFitsFile needs arguments params!
if isinstance(orderpsfile, DrsNpyFile):
# log progress (read file)
wargs = [orderpsfile.filename]
WLOG(params, '', TextEntry('40-013-00023', args=wargs))
# read npy file
orderpsfile.read_file(params)
else:
eargs = [orderpsfile.__str__(), func_name]
WLOG(params, 'error', TextEntry('00-016-00023', args=eargs))
# push data into orderp
orderp = orderpsfile.data
orderpfilename = orderpsfile.filename
# load the order profile
else:
# load using localisation load order profile function
out = localisation.load_orderp(params, header, fiber=fiber,
filename=filename)
orderpfilename, orderp = out
# straighten orders
orderp = shape.ea_transform(params, orderp, shapelocal,
dxmap=shapex, dymap=shapey)
# push into orderpsfile
orderpsfile.data = orderp
# log progress (saving to file)
wargs = [orderpsfile.filename]
WLOG(params, '', TextEntry('40-013-00024', args=wargs))
# save for use later (as .npy)
orderpsfile.write_file(params)
# store in storage dictionary
orderprofiles[fiber] = orderp
orderprofilefiles[fiber] = orderpfilename
# return order profiles
return orderprofiles, orderprofilefiles
# =============================================================================
# Define thermal functions
# =============================================================================
def thermal_correction(params, recipe, header, props=None, eprops=None,
fiber=None, **kwargs):
func_name = __NAME__ + '.thermal_correction()'
# deal with props = None
if props is None:
props = ParamDict()
# deal with eprops = None
if eprops is None:
eprops = ParamDict()
# get properties from parameter dictionaries / kwargs
dprtype = pcheck(params, 'DPRTYPE', 'dprtype', kwargs, func_name,
paramdict=props)
tapas_thres = pcheck(params, 'THERMAL_THRES_TAPAS', 'tapas_thres', kwargs,
func_name)
envelope = pcheck(params, 'THERMAL_ENVELOPE_PERCENTILE', 'envelope',
kwargs, func_name)
filter_wid = pcheck(params, 'THERMAL_FILTER_WID', 'filter_wid', kwargs,
func_name)
torder = pcheck(params, 'THERMAL_ORDER', 'torder', kwargs, func_name)
red_limt = pcheck(params, 'THERMAL_RED_LIMIT', 'red_limit', kwargs,
func_name)
blue_limit = pcheck(params, 'THERMAL_BLUE_LIMIT', 'blue_limit', kwargs,
func_name)
e2ds = pcheck(params, 'E2DS', 'e2ds', kwargs, func_name, paramdict=eprops)
e2dsff = pcheck(params, 'E2DSFF', 'e2dsff', kwargs, func_name,
paramdict=eprops)
flat = pcheck(params, 'FLAT', paramdict=eprops)
corrtype1 = pcheck(params, 'THERMAL_CORRETION_TYPE1', 'corrtype1', kwargs,
func_name, mapf='list', dtype=str)
corrtype2 = pcheck(params, 'THERMAL_CORRETION_TYPE2', 'corrtype2', kwargs,
func_name, mapf='list', dtype=str)
thermal_file = kwargs.get('thermal_file', None)
thermal_correct = pcheck(params, 'THERMAL_CORRECT', 'thermal_correct',
kwargs, func_name)
# ----------------------------------------------------------------------
# get pconstant from p
pconst = constants.pload(params['INSTRUMENT'])
# ----------------------------------------------------------------------
# get fiber dprtype
fibertype = pconst.FIBER_DATA_TYPE(dprtype, fiber)
# ----------------------------------------------------------------------
# get master wave filename
mwavefile = wave.get_masterwave_filename(params, fiber)
# get master wave map
wprops = wave.get_wavesolution(params, recipe, filename=mwavefile)
# get the wave solution
wavemap = wprops['WAVEMAP']
# ----------------------------------------------------------------------
# deal with skipping thermal correction
if not thermal_correct:
# add / update eprops
eprops['E2DS'] = e2ds
eprops['E2DSFF'] = e2dsff
eprops['FIBERTYPE'] = fibertype
eprops['THERMALFILE'] = 'None'
# update source
keys = ['E2DS', 'E2DSFF', 'FIBERTYPE', 'THERMALFILE']
eprops.set_sources(keys, func_name)
# return eprops
return eprops
# ----------------------------------------------------------------------
# get thermal (only if in one of the correction lists)
if fibertype in corrtype1:
thermalfile, thermal = get_thermal(params, header, fiber=fiber,
filename=thermal_file,
kind='THERMALT_E2DS')
elif fibertype in corrtype2:
thermalfile, thermal = get_thermal(params, header, fiber=fiber,
filename=thermal_file,
kind='THERMALI_E2DS')
else:
thermal = None
thermalfile = 'None'
# ----------------------------------------------------------------------
# thermal correction kwargs
tkwargs = dict(header=header, fiber=fiber, wavemap=wavemap,
tapas_thres=tapas_thres, envelope=envelope,
filter_wid=filter_wid, torder=torder,
red_limit=red_limt, blue_limit=blue_limit,
thermal=thermal)
# base thermal correction on fiber type
if fibertype in corrtype1:
# log progress: doing thermal correction
wargs = [fibertype, 1]
WLOG(params, 'info', TextEntry('40-016-00012', args=wargs))
# do thermal correction
e2ds = tcorrect1(params, recipe, e2ds, **tkwargs)
e2dsff = tcorrect1(params, recipe, e2dsff, flat=flat, **tkwargs)
elif fibertype in corrtype2:
# log progress: doing thermal correction
wargs = [fibertype, 1]
WLOG(params, 'info', TextEntry('40-016-00012', args=wargs))
# do thermal correction
e2ds = tcorrect2(params, recipe, e2ds, **tkwargs)
e2dsff = tcorrect2(params, recipe, e2dsff, flat=flat, **tkwargs)
else:
# log that we are not correcting thermal
WLOG(params, 'info', TextEntry('40-016-00013', args=[fibertype]))
thermalfile = 'None'
# ----------------------------------------------------------------------
# add / update eprops
eprops['E2DS'] = e2ds
eprops['E2DSFF'] = e2dsff
eprops['FIBERTYPE'] = fibertype
eprops['THERMALFILE'] = thermalfile
# update source
keys = ['E2DS', 'E2DSFF', 'FIBERTYPE', 'THERMALFILE']
eprops.set_sources(keys, func_name)
# return eprops
return eprops
def get_thermal(params, header, fiber, kind, filename=None):
# get file definition
out_thermal = core.get_file_definition(kind, params['INSTRUMENT'],
kind='red')
# get key
key = out_thermal.get_dbkey(fiber=fiber)
# ------------------------------------------------------------------------
# check for filename in inputs
filename = general.get_input_files(params, 'THERMALFILE', key, header,
filename)
# ------------------------------------------------------------------------
# load calib file
thermal, thermal_file = general.load_calib_file(params, key, header,
filename=filename)
# log which fpmaster file we are using
WLOG(params, '', TextEntry('40-016-00027', args=[thermal_file]))
# return the master image
return thermal_file, thermal
def tcorrect1(params, recipe, image, header, fiber, wavemap, thermal=None,
flat=None, **kwargs):
# get parameters from skwargs
tapas_thres = kwargs.get('tapas_thres', None)
filter_wid = kwargs.get('filter_wid', None)
torder = kwargs.get('torder', None)
red_limit = kwargs.get('red_limit', None)
tapas_file = kwargs.get('tapas_file', None)
# ----------------------------------------------------------------------
# deal with no thermal
if thermal is None:
# get thermal
_, thermal = get_thermal(params, header, fiber=fiber,
kind='THERMALT_E2DS')
# ----------------------------------------------------------------------
# if we have a flat we should apply it to the thermal
if flat is not None:
thermal = thermal / flat
kind = 'FF '
else:
kind = ''
# ----------------------------------------------------------------------
# deal with rare case that thermal is all zeros
if mp.nansum(thermal) == 0 or np.sum(np.isfinite(thermal)) == 0:
return image
# ----------------------------------------------------------------------
# load tapas
tapas, _ = drs_data.load_tapas(params, filename=tapas_file)
wtapas, ttapas = tapas['wavelength'], tapas['trans_combined']
# ----------------------------------------------------------------------
# splining tapas onto the order 49 wavelength grid
sptapas = mp.iuv_spline(wtapas, ttapas)
# binary mask to be saved; this corresponds to the domain for which
# transmission is basically zero and we can safely use the domain
# to scale the thermal background. We only do this for wavelength smaller
# than "THERMAL_TAPAS_RED_LIMIT" nm as this is the red end of the
# TAPAS domain
# set torder mask all to False initially
torder_mask = np.zeros_like(wavemap[torder, :], dtype=bool)
# get the wave mask
wavemask = wavemap[torder] < red_limit
# get the tapas data for these wavelengths
torder_tapas = sptapas(wavemap[torder, wavemask])
# find those pixels lower than threshold in tapas
torder_mask[wavemask] = torder_tapas < tapas_thres
# median filter the thermal (loop around orders)
for order_num in range(thermal.shape[0]):
thermal[order_num] = mp.medfilt_1d(thermal[order_num], filter_wid)
# we find the median scale between the observation and the thermal
# background in domains where there is no transmission
thermal_torder = thermal[torder, torder_mask]
image_torder = image[torder, torder_mask]
ratio = mp.nanmedian(thermal_torder / image_torder)
# scale thermal by ratio
thermal = thermal / ratio
# ----------------------------------------------------------------------
# plot thermal background plot
recipe.plot('THERMAL_BACKGROUND', params=params, wave=wavemap, image=image,
thermal=thermal, torder=torder, tmask=torder_mask, fiber=fiber,
kind=kind)
# ----------------------------------------------------------------------
# correct image
corrected_image = image - thermal
# ----------------------------------------------------------------------
# return p and corrected image
return corrected_image
def tcorrect2(params, recipe, image, header, fiber, wavemap, thermal=None,
flat=None, **kwargs):
envelope_percent = kwargs.get('envelope', None)
filter_wid = kwargs.get('filter_wid', None)
torder = kwargs.get('torder', None)
red_limit = kwargs.get('red_limit', None)
blue_limit = kwargs.get('blue_limit', None)
# thermal_file = kwargs.get('thermal_file', None)
# get the shape
dim1, dim2 = image.shape
# ----------------------------------------------------------------------
# deal with no thermal
if thermal is None:
# get thermal
_, thermal = get_thermal(params, header, fiber=fiber,
kind='THERMALI_E2DS')
# ----------------------------------------------------------------------
# if we have a flat we should apply it to the thermal
if flat is not None:
thermal = thermal / flat
kind = 'FF '
else:
kind = ''
# ----------------------------------------------------------------------
# deal with rare case that thermal is all zeros
if mp.nansum(thermal) == 0 or np.sum(np.isfinite(thermal)) == 0:
return image
# ----------------------------------------------------------------------
# set up an envelope to measure thermal background in image
envelope = np.zeros(dim2)
# loop around all pixels
for x_it in range(dim2):
# define start and end points
start = x_it - filter_wid // 2
end = x_it + filter_wid // 2
# deal with out of bounds
if start < 0:
start = 0
if end > dim2 - 1:
end = dim2 - 1
# get the box for this pixel
imagebox = image[torder, start:end]
# get the envelope
with warnings.catch_warnings(record=True) as _:
envelope[x_it] = np.nanpercentile(imagebox, envelope_percent)
# ----------------------------------------------------------------------
# median filter the thermal (loop around orders)
for order_num in range(dim1):
thermal[order_num] = mp.medfilt_1d(thermal[order_num], filter_wid)
# ----------------------------------------------------------------------
# only keep wavelength in range of thermal limits
wavemask = (wavemap[torder] > blue_limit) & (wavemap[torder] < red_limit)
# we find the median scale between the observation and the thermal
# background in domains where there is no transmission
thermal_torder = thermal[torder, wavemask]
envelope_torder = envelope[wavemask]
ratio = mp.nanmedian(thermal_torder / envelope_torder)
# scale thermal by ratio
thermal = thermal / ratio
# ----------------------------------------------------------------------
# plot thermal background plot
recipe.plot('THERMAL_BACKGROUND', params=params, wavemap=wavemap,
image=image, thermal=thermal, torder=torder, tmask=wavemask,
fiber=fiber, kind=kind)
# ----------------------------------------------------------------------
# correct image
corrected_image = image - thermal
# ----------------------------------------------------------------------
# return p and corrected image
return corrected_image
# =============================================================================
# Define leakage functions
# =============================================================================
def correct_master_dark_fp(params, extractdict, **kwargs):
# set function name
func_name = __NAME__ + '.correct_master_dark_fp'
# load parameters from params/kwargs
bckgrd_percentile = pcheck(params, 'LEAK_BCKGRD_PERCENTILE',
'bckgrd_percentile', kwargs, func_name)
norm_percentile = pcheck(params, 'LEAK_NORM_PERCENTILE', 'norm_percentile',
kwargs, func_name)
w_smooth = pcheck(params, 'LEAKM_WSMOOTH', 'w_smooth', kwargs, func_name)
ker_size = pcheck(params, 'LEAKM_KERSIZE', 'ker_size', kwargs, func_name)
# define a gaussian kernel that goes from +/- ker_size * w_smooth
xkernel = np.arange(-ker_size * w_smooth, ker_size * w_smooth)
ykernel = np.exp(-0.5 * (xkernel / w_smooth) ** 2)
# get this instruments science fibers and reference fiber
pconst = constants.pload(params['INSTRUMENT'])
# science fibers should be list of strings, reference fiber should be string
sci_fibers, ref_fiber = pconst.FIBER_KINDS()
# output storage (dictionary of corrected extracted files)
outputs = dict()
# ----------------------------------------------------------------------
# Deal with loading the reference fiber image props
# ----------------------------------------------------------------------
# check for reference fiber in extract dict
if ref_fiber not in extractdict:
eargs = [ref_fiber, ', '.join(extractdict.keys()), func_name]
WLOG(params, 'error', TextEntry('00-016-00024', args=eargs))
# get the reference file
reffile = extractdict[ref_fiber]
# get dprtype
dprtype = reffile.get_key('KW_DPRTYPE')
# get dpr type for ref image
refdpr = pconst.FIBER_DPR_POS(dprtype, ref_fiber)
# check that refdpr is FP (must be a FP)
if refdpr != 'FP':
# log and raise error
eargs = [ref_fiber, dprtype, func_name]
WLOG(params, 'error', TextEntry('00-016-00025', args=eargs))
# get the data for the reference image
refimage = np.array(reffile.data)
# get reference image size
nord, nbpix = refimage.shape
# ----------------------------------------------------------------------
# remove the pedestal from the reference image and work out the amplitude
# of the leak from the reference fiber
# ----------------------------------------------------------------------
# storage
ref_amps = np.zeros_like(refimage)
# loop around the orders
for order_num in range(nord):
# remove the pedestal from the FP to avoid an offset from
# thermal background
background = np.nanpercentile(refimage[order_num], bckgrd_percentile)
refimage[order_num] = refimage[order_num] - background
# get the amplitudes
amplitude = np.nanpercentile(refimage[order_num], norm_percentile)
ref_amps[order_num] = amplitude
# normalize the reference image by this amplitude
refimage[order_num] = refimage[order_num] / amplitude
# save corrected refimage into output storage
reffile.data = refimage
outputs[ref_fiber] = reffile
# ----------------------------------------------------------------------
# process the science fibers
# ----------------------------------------------------------------------
for sci_fiber in sci_fibers:
# check that science fiber is in extraction dictionary
if sci_fiber not in extractdict:
eargs = [sci_fiber, ', '.join(extractdict.keys()), func_name]
WLOG(params, 'error', TextEntry('00-016-00026', args=eargs))
# get the science image
scifile = extractdict[sci_fiber]
# get the data for the reference image
sciimage = np.array(scifile.data)
# get the science image size
nord, nbpix = sciimage.shape
# loop around orders
for order_num in range(nord):
# median filtering has to be an odd number
medfac = 2 * (w_smooth // 2) + 1
# calculate median filter
tmpimage = mp.medfilt_1d(sciimage[order_num], medfac)
# set NaN pixels to zero
tmpimage[np.isnan(tmpimage)] = 0
# find a proxy for the low-frequency in the science channel
mask = np.ones_like(tmpimage)
mask[tmpimage == 0] = 0
# calculate low-frequency
part1 = np.convolve(tmpimage, ykernel, mode='same')
part2 = np.convolve(mask, ykernel, mode='same')
with warnings.catch_warnings(record=True) as _:
low_f = part1 / part2
# remove the low-frequencies from science image
sciimage[order_num] = sciimage[order_num] - low_f
# normalize by the reference amplitudes
sciimage[order_num] = sciimage[order_num] / ref_amps[order_num]
# save corrected science image into output storage
scifile.data = sciimage
outputs[sci_fiber] = scifile
# ----------------------------------------------------------------------
# Make properties dictionary
props = ParamDict()
props['LEAK_BCKGRD_PERCENTILE'] = bckgrd_percentile
props['LEAK_NORM_PERCENTILE'] = norm_percentile
props['LEAKM_WSMOOTH'] = w_smooth
props['LEAKM_KERSIZE'] = ker_size
# set sources
keys = ['LEAK_BCKGRD_PERCENTILE', 'LEAK_NORM_PERCENTILE',
'LEAKM_WSMOOTH', 'LEAKM_KERSIZE']
props.set_sources(keys, func_name)
# ----------------------------------------------------------------------
# return output dictionary with corrected extracted files
return outputs, props
def correct_dark_fp(params, extractdict, **kwargs):
# set the function name
func_name = __NAME__ + '.correct_dark_fp()'
# get properties from parameters
leak2dext = pcheck(params, 'LEAK_2D_EXTRACT_FILES', 'leak2dext', kwargs,
func_name, mapf='list')
extfiletype = pcheck(params, 'LEAK_EXTRACT_FILE', 'extfiletype', kwargs,
func_name)
bckgrd_percentile = pcheck(params, 'LEAK_BCKGRD_PERCENTILE',
'bckgrd_percentile', kwargs, func_name)
norm_percentile = pcheck(params, 'LEAK_NORM_PERCENTILE', 'norm_percentile',
kwargs, func_name)
low_percentile = pcheck(params, 'LEAK_LOW_PERCENTILE', 'low_percentile',
kwargs, func_name)
high_percentile = pcheck(params, 'LEAK_HIGH_PERCENTILE', 'high_percentile',
kwargs, func_name)
bad_ratio = pcheck(params, 'LEAK_BAD_RATIO_OFFSET', 'bad_ratio')
# group bounding percentiles
bpercents = [low_percentile, high_percentile]
# ----------------------------------------------------------------------
# get this instruments science fibers and reference fiber
pconst = constants.pload(params['INSTRUMENT'])
# science fibers should be list of strings, reference fiber should be string
sci_fibers, ref_fiber = pconst.FIBER_KINDS()
all_fibers = sci_fibers + [ref_fiber]
# ----------------------------------------------------------------------
# get reference file
ref_file = extractdict[ref_fiber][extfiletype]
refimage = np.array(ref_file.data)
ref_header = ref_file.header
# get size of reference image
nbo, nbpix = refimage.shape
# ----------------------------------------------------------------------
# storage for master files
master_leaks = dict()
# load master data
for fiber in all_fibers:
# get leak master for file
_, leakmaster = get_leak_master(params, ref_header, fiber,
'LEAKM_E2DS')
# append to storage
master_leaks[fiber] = leakmaster
# ----------------------------------------------------------------------
# store the ratio of observe to master reference
ref_ratio_arr = np.zeros(nbo)
dot_ratio_arr = np.zeros(nbo)
approx_ratio_arr = np.zeros(nbo)
# store the method used (either "dot" or "approx")
method = []
# loop around reference image orders and normalise by percentile
for order_num in range(nbo):
# get order values for master
master_ref_ord = master_leaks[ref_fiber][order_num]
# remove the pedestal from the FP to avoid an offset from
# thermal background
background = np.nanpercentile(refimage[order_num], bckgrd_percentile)
refimage[order_num] = refimage[order_num] - background
# only perform the measurement of the amplitude of the leakage signal
# on the lower and upper percentiles. This allows for a small number
# of hot/dark pixels along the order. Without this, we end up with
# some spurious amplitude values in the frames
with warnings.catch_warnings(record=True) as _:
# get percentiles
low, high = np.nanpercentile(refimage[order_num], bpercents)
lowm, highm = np.nanpercentile(master_ref_ord, bpercents)
# translate this into a mask
mask = refimage[order_num] > low
mask &= refimage[order_num] < high
mask &= master_ref_ord > lowm
mask &= master_ref_ord < highm
# approximate ratio, we know that frames were normalized with their
# "norm_percentile" percentile prior to median combining
amplitude = np.nanpercentile(refimage[order_num], norm_percentile)
approx_ratio = 1 / amplitude
# save to storage
approx_ratio_arr[order_num] = float(approx_ratio)
# much more accurate ratio from a dot product
part1 = mp.nansum(master_ref_ord[mask] * refimage[order_num][mask])
part2 = mp.nansum(refimage[order_num][mask] ** 2)
ratio = part1 / part2
# save to storage
dot_ratio_arr[order_num] = float(ratio)
# deal with spurious ref FP ratio
cond1 = (ratio / approx_ratio) < (1 - bad_ratio)
cond2 = (ratio / approx_ratio) > (1 + bad_ratio)
# Ratio must be within (1-badratio) to (1+badratio) of the approximate
# ratio -- otherwise ratio is bad
if cond1 or cond2:
# log warning that ref FP ratio is spurious
wargs = [order_num, ratio, approx_ratio, ratio / approx_ratio,
1 - bad_ratio, 1 + bad_ratio]
WLOG(params, 'warning', TextEntry('10-016-00024', args=wargs))
# set the ratio to the approx ratio
ratio = float(approx_ratio)
# set the ratio method
method.append('approx')
else:
# set method
method.append('dot')
# save ratios to storage
ref_ratio_arr[order_num] = float(ratio)
# ----------------------------------------------------------------------
# storage for extraction outputs
outputs = dict()
leakage = dict()
# ----------------------------------------------------------------------
# loop around science fibers
for fiber in sci_fibers:
# storage for fiber outputs
outputs[fiber] = dict()
leakage[fiber] = dict()
# get the master for this fiber
master_sci = master_leaks[fiber]
# loop around extraction types
for extfiletype in leak2dext:
# log progress
wargs = [fiber, extfiletype]
WLOG(params, 'info', TextEntry('40-016-00029', args=wargs))
# get extfile
extfile = extractdict[fiber][extfiletype]
# get the extraction image
extimage = np.array(extfile.data)
# --------------------------------------------------------------
# if we are dealing with the E2DS we need the flat
if extfiletype == 'E2DS_FILE':
# load the flat file for this fiber
flat_file, flat = flat_blaze.get_flat(params, extfile.header,
fiber, quiet=True)
# else we set it to None
else:
flat = np.ones_like(extimage)
# --------------------------------------------------------------
# storage for the ratio of leakage
ratio_leak = np.zeros(nbo)
# loop around orders
for order_num in range(nbo):
# scale the leakage for that order to the observed amplitude
scale = master_sci[order_num] / ref_ratio_arr[order_num]
# correct for the flat (in E2DS case) - master is E2DSFF
scale = scale * flat[order_num]
# apply leakage scaling
extimage[order_num] = extimage[order_num] - scale
# calculate the ratio of the leakage
rpart1 = np.nanpercentile(refimage[order_num], norm_percentile)
rpart2 = mp.nanmedian(extimage[order_num])
ratio_leak[order_num] = rpart1 / rpart2
# update ext file
extfile.data = extimage
# add to output
outputs[fiber][extfiletype] = extfile
leakage[fiber][extfiletype] = ratio_leak
# ----------------------------------------------------------------------
# generate a properties dictionary
props = ParamDict()
# ----------------------------------------------------------------------
# add outputs
props['OUTPUTS'] = outputs
props['LEAKAGE'] = leakage
# set sources
props.set_sources(['OUTPUTS', 'LEAKAGE'], func_name)
# ----------------------------------------------------------------------
# add used parameters
props['LEAK_2D_EXTRACT_FILES_USED'] = leak2dext
props['LEAK_EXTRACT_FILE_USED'] = extfiletype
props['LEAK_BCKGRD_PERCENTILE_USED'] = bckgrd_percentile
props['LEAK_NORM_PERCENTILE_USED'] = norm_percentile
props['LEAK_LOW_PERCENTILE_USED'] = low_percentile
props['LEAK_HIGH_PERCENTILE_USED'] = high_percentile
props['LEAK_BAD_RATIO_OFFSET_USED'] = bad_ratio
# set sources
keys = ['LEAK_2D_EXTRACT_FILES_USED', 'LEAK_EXTRACT_FILE_USED',
'LEAK_BCKGRD_PERCENTILE_USED', 'LEAK_NORM_PERCENTILE_USED',
'LEAK_LOW_PERCENTILE_USED', 'LEAK_HIGH_PERCENTILE_USED',
'LEAK_BAD_RATIO_OFFSET_USED']
props.set_sources(keys, func_name)
# ----------------------------------------------------------------------
# return properties
return props
def dark_fp_regen_s1d(params, recipe, props, **kwargs):
# set function name
func_name = __NAME__ + '.dark_fp_regen_s1d()'
# get outputs from props
outputs = props['OUTPUTS']
# get the leak extract file type
s1dextfile = pcheck(params, 'EXT_S1D_INFILE', 's1dextfile', kwargs,
func_name)
# storage for s1d outputs
s1dv_outs = dict()
s1dw_outs = dict()
# loop around fibers
for fiber in outputs:
# get the s1d in file type
extfile = outputs[fiber][s1dextfile]
# get the ext file header
header = extfile.header
# --------------------------------------------------------------
# load the blaze file for this fiber
blaze_file, blaze = flat_blaze.get_blaze(params, header, fiber)
# --------------------------------------------------------------
# load wavelength solution for this fiber
wprops = wave.get_wavesolution(params, recipe, header, fiber=fiber)
# --------------------------------------------------------------
# create 1d spectra (s1d) of the e2ds file
sargs = [wprops['WAVEMAP'], extfile.data, blaze]
swprops = e2ds_to_s1d(params, recipe, *sargs, wgrid='wave',
fiber=fiber, kind=s1dextfile)
svprops = e2ds_to_s1d(params, recipe, *sargs, wgrid='velocity',
fiber=fiber, kind=s1dextfile)
# add to outputs
s1dw_outs[fiber] = swprops
s1dv_outs[fiber] = svprops
# push updated outputs into props
props['S1DW'] = s1dw_outs
props['S1DV'] = s1dv_outs
props.set_sources(['S1DW', 'S1DV'], func_name)
# return outputs
return props
def get_leak_master(params, header, fiber, kind, filename=None):
# get file definition
out_leak = core.get_file_definition(kind, params['INSTRUMENT'],
kind='red')
# get key
key = out_leak.get_dbkey(fiber=fiber)
# ------------------------------------------------------------------------
# check for filename in inputs
filename = general.get_input_files(params, 'LEAKFILE', key, header,
filename)
# ------------------------------------------------------------------------
# load calib file
leak, leak_file = general.load_calib_file(params, key, header,
filename=filename)
# log which fpmaster file we are using
WLOG(params, '', TextEntry('40-016-00028', args=[leak_file]))
# return the master image
return leak_file, leak
def master_dark_fp_cube(params, recipe, extractdict):
# median cube storage dictionary
medcubedict = dict()
# loop around fibers
for fiber in extractdict:
# get the file list for this fiber
extfiles = extractdict[fiber]
# get the first file as reference
extfile = extfiles[0]
# construct the leak master file instance
outfile = recipe.outputs['LEAK_MASTER'].newcopy(recipe=recipe,
fiber=fiber)
# construct the filename from file instance
outfile.construct_filename(params, infile=extfile)
# copy keys from input file
outfile.copy_original_keys(extfile)
# storage for cube
cube = []
# loop around files and get data cube
for it in range(len(extfiles)):
# add to cube
cube.append(extfiles[it].data)
# make cube a numpy array
cube = np.array(cube)
# produce super dark using median
medcube = mp.nanmedian(cube, axis=0)
# delete cube
del cube
# add median cube to outfile instance
outfile.data = medcube
# add to median cube storage
medcubedict[fiber] = outfile
# return median cube storage dictionary
return medcubedict
def get_extraction_files(params, recipe, infile, extname):
# get properties from parameters
leak2dext = params.listp('LEAK_2D_EXTRACT_FILES', dtype=str)
leak1dext = params.listp('LEAK_1D_EXTRACT_FILES', dtype=str)
# get this instruments science fibers and reference fiber
pconst = constants.pload(params['INSTRUMENT'])
# science fibers should be list of strings, reference fiber should be string
sci_fibers, ref_fiber = pconst.FIBER_KINDS()
all_fibers = sci_fibers + [ref_fiber]
# get the input pp list
rawfiles = infile.read_header_key_1d_list('KW_INFILE1', dtype=str)
# get the preprocessed file
ppfile = infile.intype.newcopy(recipe=recipe)
# get the preprocessed file path
pppath = os.path.join(params['DRS_DATA_WORKING'], params['NIGHTNAME'])
# get the pp filename
ppfile.set_filename(os.path.join(pppath, rawfiles[0]))
# ------------------------------------------------------------------
# find the extraction recipe
extrecipe, _ = drs_startup.find_recipe(extname, params['INSTRUMENT'],
mod=recipe.recipemod)
extrecipe.drs_params = params
# ------------------------------------------------------------------
# storage for outputs
extouts = recipe.outputs.keys()
outputs = dict()
for fiber in all_fibers:
outputs[fiber] = dict()
# ------------------------------------------------------------------
# loop around fibers
for fiber in all_fibers:
# loop around extraction outputs
for extout in extouts:
# get extraction file instance
outfile = extrecipe.outputs[extout].newcopy(recipe=extrecipe,
fiber=fiber)
# construct filename
outfile.construct_filename(params, infile=ppfile)
# read 2D image (not 1D images -- these will be re-generated)
if extout in leak2dext:
outfile.read_file()
# push to storage
outputs[fiber][extout] = outfile
# puash 1D images to storage
if extout in leak1dext:
# push to storage
outputs[fiber][extout] = outfile
# return outputs
return outputs
def save_uncorrected_ext_fp(params, extractdict):
# loop around fibers
for fiber in extractdict:
# loop around file type
for extname in extractdict[fiber]:
# get ext file
extfile = extractdict[fiber][extname]
# --------------------------------------------------------------
# check that file exists - if it doesn't generate exception
if not os.path.exists(extfile.filename):
eargs = [fiber, extname, extfile.filename]
WLOG(params, 'error', TextEntry('00-016-00027', args=eargs))
# --------------------------------------------------------------
# check we want to save uncorrected
if not params['LEAK_SAVE_UNCORRECTED']:
continue
# --------------------------------------------------------------
# get basename
infile = extfile.basename
inpath = extfile.filename
indir = inpath.split(infile)[0]
# add prefix
outfile = 'DEBUG-uncorr-{0}'.format(infile)
# construct full path
outpath = os.path.join(indir, outfile)
# copy files
drs_path.copyfile(params, inpath, outpath)
def ref_fplines(params, recipe, e2dsfile, wavemap, fiber, **kwargs):
# set up function name
func_name = display_func(params, 'ref_fplines', __NAME__)
# get constant from params
allowtypes = pcheck(params, 'WAVE_FP_DPRLIST', 'fptypes', kwargs, func_name,
mapf='list')
# get dprtype
dprtype = e2dsfile.get_key('KW_DPRTYPE', dtype=str)
# get psuedo constants
pconst = constants.pload(params['INSTRUMENT'])
sfibers, rfiber = pconst.FIBER_KINDS()
# ----------------------------------------------------------------------
# deal with fiber being the reference fiber
if fiber != rfiber:
# Skipping FPLINES (Fiber = {0})'
WLOG(params, 'debug', TextEntry('90-016-00003', args=[fiber]))
return None
# ----------------------------------------------------------------------
# deal with allowed dprtypes
if dprtype not in allowtypes:
# Skipping FPLINES (DPRTYPE = {0})
WLOG(params, 'debug', TextEntry('90-016-000034', args=[dprtype]))
return None
# ----------------------------------------------------------------------
# get master hc lines and fp lines from calibDB
wout = wave.get_wavelines(params, recipe, fiber, infile=e2dsfile)
mhclines, mhclsource, mfplines, mfplsource = wout
# deal with no fplines found
if mfplines is None:
return None
# ----------------------------------------------------------------------
# generate the fp reference lines
fpargs = dict(e2dsfile=e2dsfile, wavemap=wavemap, fplines=mfplines)
rfpl = wave.get_master_lines(params, recipe, **fpargs)
# ----------------------------------------------------------------------
# return fp lines for e2ds file
return rfpl
# =============================================================================
# Define s1d functions
# =============================================================================
def e2ds_to_s1d(params, recipe, wavemap, e2ds, blaze, fiber=None, wgrid='wave',
kind=None, **kwargs):
func_name = __NAME__ + '.e2ds_to_s1d()'
# get parameters from p
wavestart = pcheck(params, 'EXT_S1D_WAVESTART', 'wavestart', kwargs,
func_name)
waveend = pcheck(params, 'EXT_S1D_WAVEEND', 'waveend', kwargs,
func_name)
binwave = pcheck(params, 'EXT_S1D_BIN_UWAVE', 'binwave', kwargs,
func_name)
binvelo = pcheck(params, 'EXT_S1D_BIN_UVELO', 'binvelo', kwargs,
func_name)
smooth_size = pcheck(params, 'EXT_S1D_EDGE_SMOOTH_SIZE', 'smooth_size',
kwargs, func_name)
blazethres = pcheck(params, 'TELLU_CUT_BLAZE_NORM', 'blazethres', kwargs,
func_name)
# get size from e2ds
nord, npix = e2ds.shape
# log progress: calculating s1d (wavegrid)
WLOG(params, '', TextEntry('40-016-00009', args=[wgrid]))
# -------------------------------------------------------------------------
# Decide on output wavelength grid
# -------------------------------------------------------------------------
if wgrid == 'wave':
wavegrid = np.arange(wavestart, waveend + binwave / 2.0, binwave)
else:
# work out number of wavelength points
flambda = np.log(waveend / wavestart)
nlambda = np.round((speed_of_light_kms / binvelo) * flambda)
# updating end wavelength slightly to have exactly 'step' km/s
waveend = np.exp(nlambda * (binvelo / speed_of_light_kms)) * wavestart
# get the wavegrid
index = np.arange(nlambda) / nlambda
wavegrid = wavestart * np.exp(index * np.log(waveend / wavestart))
# -------------------------------------------------------------------------
# define a smooth transition mask at the edges of the image
# this ensures that the s1d has no discontinuity when going from one order
# to the next. We define a scale for this mask
# smoothing scale
# -------------------------------------------------------------------------
# define a kernal that goes from -3 to +3 smooth_sizes of the mask
xker = np.arange(-smooth_size * 3, smooth_size * 3, 1)
ker = np.exp(-0.5 * (xker / smooth_size) ** 2)
# set up the edge vector
edges = np.ones(npix, dtype=bool)
# set edges of the image to 0 so that we get a sloping weight
edges[:int(3 * smooth_size)] = False
edges[-int(3 * smooth_size):] = False
# define the weighting for the edges (slopevector)
slopevector = np.zeros_like(blaze)
# for each order find the sloping weight vector
for order_num in range(nord):
# get the blaze for this order
oblaze = np.array(blaze[order_num])
# find the valid pixels
cond1 = np.isfinite(oblaze) & np.isfinite(e2ds[order_num])
with warnings.catch_warnings(record=True) as _:
cond2 = oblaze > (blazethres * mp.nanmax(oblaze))
valid = cond1 & cond2 & edges
# convolve with the edge kernel
oweight = np.convolve(valid, ker, mode='same')
# normalise to the maximum
with warnings.catch_warnings(record=True) as _:
oweight = oweight - mp.nanmin(oweight)
oweight = oweight / mp.nanmax(oweight)
# append to sloping vector storage
slopevector[order_num] = oweight
# multiple the spectrum and blaze by the sloping vector
sblaze = np.array(blaze) * slopevector
se2ds = np.array(e2ds) * slopevector
# -------------------------------------------------------------------------
# Perform a weighted mean of overlapping orders
# by performing a spline of both the blaze and the spectrum
# -------------------------------------------------------------------------
out_spec = np.zeros_like(wavegrid)
weight = np.zeros_like(wavegrid)
# loop around all orders
for order_num in range(nord):
# identify the valid pixels
valid = np.isfinite(se2ds[order_num]) & np.isfinite(sblaze[order_num])
# if we have no valid points we need to skip
if np.sum(valid) == 0:
continue
# get this orders vectors
owave = wavemap[order_num]
oe2ds = se2ds[order_num, valid]
oblaze = sblaze[order_num]
# create the splines for this order
spline_sp = mp.iuv_spline(owave[valid], oe2ds, k=5, ext=1)
spline_bl = mp.iuv_spline(owave, oblaze, k=1, ext=1)
valid_float = valid.astype(float)
# we mask pixels that are neighbours to a NaN.
valid_float = np.convolve(valid_float, np.ones(3) / 3.0, mode='same')
spline_valid = mp.iuv_spline(owave, valid_float, k=1, ext=1)
# can only spline in domain of the wave
useful_range = (wavegrid > mp.nanmin(owave[valid]))
useful_range &= (wavegrid < mp.nanmax(owave[valid]))
# finding pixels where we have immediate neighbours that are
# considered valid in the spline (to avoid interpolating over large
# gaps in validity)
maskvalid = np.zeros_like(wavegrid, dtype=bool)
maskvalid[useful_range] = spline_valid(wavegrid[useful_range]) > 0.9
useful_range &= maskvalid
# get splines and add to outputs
weight[useful_range] += spline_bl(wavegrid[useful_range])
out_spec[useful_range] += spline_sp(wavegrid[useful_range])
# need to deal with zero weight --> set them to NaNs
zeroweights = weight == 0
weight[zeroweights] = np.nan
# plot the s1d weight/before/after plot
recipe.plot('EXTRACT_S1D_WEIGHT', params=params, wave=wavegrid,
flux=out_spec, weight=weight, kind=wgrid, fiber=fiber,
stype=kind)
# work out the weighted spectrum
with warnings.catch_warnings(record=True) as _:
w_out_spec = out_spec / weight
# TODO: propagate errors
ew_out_spec = np.zeros_like(w_out_spec)
# construct the s1d table (for output)
s1dtable = Table()
s1dtable['wavelength'] = wavegrid
s1dtable['flux'] = w_out_spec
s1dtable['eflux'] = ew_out_spec
s1dtable['weight'] = weight
# set up return dictionary
props = ParamDict()
# add data
props['WAVEGRID'] = wavegrid
props['S1D'] = w_out_spec
props['S1D_ERROR'] = ew_out_spec
props['WEIGHT'] = weight
# add astropy table
props['S1DTABLE'] = s1dtable
# add constants
props['WAVESTART'] = wavestart
props['WAVEEND'] = waveend
props['WAVEKIND'] = wgrid
if wgrid == 'wave':
props['BIN_WAVE'] = binwave
props['BIN_VELO'] = 'None'
else:
props['BIN_WAVE'] = 'None'
props['BIN_VELO'] = binvelo
props['SMOOTH_SIZE'] = smooth_size
props['BLAZE_THRES'] = blazethres
# add source
keys = ['WAVEGRID', 'S1D', 'WEIGHT', 'S1D_ERROR', 'S1DTABLE',
'WAVESTART', 'WAVEEND', 'WAVEKIND', 'BIN_WAVE',
'BIN_VELO', 'SMOOTH_SIZE', 'BLAZE_THRES']
props.set_sources(keys, func_name)
# return properties
return props
def add_s1d_keys(infile, props):
infile.add_hkey('KW_S1D_WAVESTART', value=props['WAVESTART'])
infile.add_hkey('KW_S1D_WAVEEND', value=props['WAVEEND'])
infile.add_hkey('KW_S1D_KIND', value=props['WAVEKIND'])
infile.add_hkey('KW_S1D_BWAVE', value=props['BIN_WAVE'])
infile.add_hkey('KW_S1D_BVELO', value=props['BIN_VELO'])
infile.add_hkey('KW_S1D_SMOOTH', value=props['SMOOTH_SIZE'])
infile.add_hkey('KW_S1D_BLAZET', value=props['BLAZE_THRES'])
return infile
# =============================================================================
# writing and qc functions
# =============================================================================
def qc_extraction(params, eprops):
# set passed variable and fail message list
fail_msg, qc_values, qc_names = [], [], [],
qc_logic, qc_pass = [], []
textdict = TextDict(params['INSTRUMENT'], params['LANGUAGE'])
# --------------------------------------------------------------
# if array is completely NaNs it shouldn't pass
if np.sum(np.isfinite(eprops['E2DS'])) == 0:
# add failed message to fail message list
fail_msg.append(textdict['40-016-00008'])
qc_pass.append(0)
else:
qc_pass.append(1)
# add to qc header lists
qc_values.append('NaN')
qc_names.append('image')
qc_logic.append('image is all NaN')
# --------------------------------------------------------------
# finally log the failed messages and set QC = 1 if we pass the
# quality control QC = 0 if we fail quality control
if np.sum(qc_pass) == len(qc_pass):
WLOG(params, 'info', TextEntry('40-005-10001'))
passed = 1
else:
for farg in fail_msg:
WLOG(params, 'warning', TextEntry('40-005-10002') + farg)
passed = 0
# store in qc_params
qc_params = [qc_names, qc_values, qc_logic, qc_pass]
# return
return qc_params, passed
def write_extraction_files(params, recipe, infile, rawfiles, combine, fiber,
orderpfile, props, lprops, wprops, eprops, bprops,
swprops, svprops, shapelocalfile, shapexfile,
shapeyfile, shapelocal, flat_file, blaze_file,
qc_params):
# ----------------------------------------------------------------------
# Store E2DS in file
# ----------------------------------------------------------------------
# get a new copy of the e2ds file
e2dsfile = recipe.outputs['E2DS_FILE'].newcopy(recipe=recipe,
fiber=fiber)
# construct the filename from file instance
e2dsfile.construct_filename(params, infile=infile)
# define header keys for output file
# copy keys from input file (excluding loc)
e2dsfile.copy_original_keys(infile, exclude_groups=['loc'])
# add version
e2dsfile.add_hkey('KW_VERSION', value=params['DRS_VERSION'])
# add dates
e2dsfile.add_hkey('KW_DRS_DATE', value=params['DRS_DATE'])
e2dsfile.add_hkey('KW_DRS_DATE_NOW', value=params['DATE_NOW'])
# add process id
e2dsfile.add_hkey('KW_PID', value=params['PID'])
# add output tag
e2dsfile.add_hkey('KW_OUTPUT', value=e2dsfile.name)
e2dsfile.add_hkey('KW_FIBER', value=fiber)
# add input files (and deal with combining or not combining)
if combine:
hfiles = rawfiles
else:
hfiles = [infile.basename]
e2dsfile.add_hkey_1d('KW_INFILE1', values=hfiles, dim1name='file')
# add the calibration files use
e2dsfile = general.add_calibs_to_header(e2dsfile, props)
# ----------------------------------------------------------------------
# add the other calibration files used
e2dsfile.add_hkey('KW_CDBORDP', value=orderpfile)
e2dsfile.add_hkey('KW_CDBLOCO', value=lprops['LOCOFILE'])
e2dsfile.add_hkey('KW_CDBSHAPEL', value=shapelocalfile)
e2dsfile.add_hkey('KW_CDBSHAPEDX', value=shapexfile)
e2dsfile.add_hkey('KW_CDBSHAPEDY', value=shapeyfile)
e2dsfile.add_hkey('KW_CDBFLAT', value=flat_file)
e2dsfile.add_hkey('KW_CDBBLAZE', value=blaze_file)
if 'THERMALFILE' in eprops:
e2dsfile.add_hkey('KW_CDBTHERMAL', value=eprops['THERMALFILE'])
e2dsfile.add_hkey('KW_CDBWAVE', value=wprops['WAVEFILE'])
# additional calibration keys
if 'FIBERTYPE' in eprops:
e2dsfile.add_hkey('KW_C_FTYPE', value=eprops['FIBERTYPE'])
# ----------------------------------------------------------------------
# add qc parameters
e2dsfile.add_qckeys(qc_params)
# ----------------------------------------------------------------------
# add shape transform parameters
e2dsfile.add_hkey('KW_SHAPE_DX', value=shapelocal[0])
e2dsfile.add_hkey('KW_SHAPE_DY', value=shapelocal[1])
e2dsfile.add_hkey('KW_SHAPE_A', value=shapelocal[2])
e2dsfile.add_hkey('KW_SHAPE_B', value=shapelocal[3])
e2dsfile.add_hkey('KW_SHAPE_C', value=shapelocal[4])
e2dsfile.add_hkey('KW_SHAPE_D', value=shapelocal[5])
# ----------------------------------------------------------------------
# add extraction type (does not change for future files)
e2dsfile.add_hkey('KW_EXT_TYPE', value=e2dsfile.name)
# add SNR parameters to header
e2dsfile.add_hkey_1d('KW_EXT_SNR', values=eprops['SNR'],
dim1name='order')
# add start and end extraction order used
e2dsfile.add_hkey('KW_EXT_START', value=eprops['START_ORDER'])
e2dsfile.add_hkey('KW_EXT_END', value=eprops['END_ORDER'])
# add extraction ranges used
e2dsfile.add_hkey('KW_EXT_RANGE1', value=eprops['RANGE1'])
e2dsfile.add_hkey('KW_EXT_RANGE2', value=eprops['RANGE2'])
# add cosmic parameters used
e2dsfile.add_hkey('KW_COSMIC', value=eprops['COSMIC'])
e2dsfile.add_hkey('KW_COSMIC_CUT', value=eprops['COSMIC_SIGCUT'])
e2dsfile.add_hkey('KW_COSMIC_THRES',
value=eprops['COSMIC_THRESHOLD'])
# add saturation parameters used
e2dsfile.add_hkey('KW_SAT_QC', value=eprops['SAT_LEVEL'])
with warnings.catch_warnings(record=True) as _:
max_sat_level = mp.nanmax(eprops['FLUX_VAL'])
e2dsfile.add_hkey('KW_SAT_LEVEL', value=max_sat_level)
# ----------------------------------------------------------------------
# add loco parameters (using locofile)
locofile = lprops['LOCOOBJECT']
e2dsfile.copy_original_keys(locofile, group='loc')
# ----------------------------------------------------------------------
# add wave keys
e2dsfile = wave.add_wave_keys(params, e2dsfile, wprops)
# ----------------------------------------------------------------------
# add berv properties to header
e2dsfile = berv.add_berv_keys(params, e2dsfile, bprops)
# add leakage switch to header (leakage currently not corrected)
e2dsfile.add_hkey('KW_LEAK_CORR', value=0)
# ----------------------------------------------------------------------
# copy data
e2dsfile.data = eprops['E2DS']
# ----------------------------------------------------------------------
# log that we are saving rotated image
wargs = [e2dsfile.filename]
WLOG(params, '', TextEntry('40-016-00005', args=wargs))
# write image to file
e2dsfile.write_file()
# add to output files (for indexing)
recipe.add_output_file(e2dsfile)
# ----------------------------------------------------------------------
# Store E2DSFF in file
# ----------------------------------------------------------------------
# get a new copy of the e2dsff file
e2dsfffile = recipe.outputs['E2DSFF_FILE'].newcopy(recipe=recipe,
fiber=fiber)
# construct the filename from file instance
e2dsfffile.construct_filename(params, infile=infile)
# copy header from e2dsff file
e2dsfffile.copy_hdict(e2dsfile)
# add extraction type (does not change for future files)
e2dsfffile.add_hkey('KW_EXT_TYPE', value=e2dsfffile.name)
# set output key
e2dsfffile.add_hkey('KW_OUTPUT', value=e2dsfffile.name)
# copy data
e2dsfffile.data = eprops['E2DSFF']
# ----------------------------------------------------------------------
# log that we are saving rotated image
wargs = [e2dsfffile.filename]
WLOG(params, '', TextEntry('40-016-00006', args=wargs))
# write image to file
e2dsfffile.write_file()
# add to output files (for indexing)
recipe.add_output_file(e2dsfffile)
# ----------------------------------------------------------------------
# Store E2DSLL in file
# ----------------------------------------------------------------------
# get a new copy of the e2dsll file
e2dsllfile = recipe.outputs['E2DSLL_FILE'].newcopy(recipe=recipe,
fiber=fiber)
# construct the filename from file instance
e2dsllfile.construct_filename(params, infile=infile)
# copy header from e2dsll file
e2dsllfile.copy_hdict(e2dsfile)
# set output key
e2dsllfile.add_hkey('KW_OUTPUT', value=e2dsllfile.name)
# copy data
e2dsllfile.data = eprops['E2DSLL']
# ----------------------------------------------------------------------
# log that we are saving rotated image
wargs = [e2dsllfile.filename]
WLOG(params, '', TextEntry('40-016-00007', args=wargs))
# write image to file
e2dsllfile.write_file()
# add to output files (for indexing)
recipe.add_output_file(e2dsllfile)
# ----------------------------------------------------------------------
# Store S1D_W in file
# ----------------------------------------------------------------------
# get a new copy of the s1d_w file
s1dwfile = recipe.outputs['S1D_W_FILE'].newcopy(recipe=recipe,
fiber=fiber)
# construct the filename from file instance
s1dwfile.construct_filename(params, infile=infile)
# copy header from e2dsll file
s1dwfile.copy_hdict(e2dsfile)
# set output key
s1dwfile.add_hkey('KW_OUTPUT', value=s1dwfile.name)
# add new header keys
s1dwfile = add_s1d_keys(s1dwfile, swprops)
# copy data
s1dwfile.data = swprops['S1DTABLE']
# must change the datatype to 'table'
s1dwfile.datatype = 'table'
# ----------------------------------------------------------------------
# log that we are saving rotated image
wargs = ['wave', s1dwfile.filename]
WLOG(params, '', TextEntry('40-016-00010', args=wargs))
# write image to file
s1dwfile.write_file()
# add to output files (for indexing)
recipe.add_output_file(s1dwfile)
# ----------------------------------------------------------------------
# Store S1D_V in file
# ----------------------------------------------------------------------
# get a new copy of the s1d_v file
s1dvfile = recipe.outputs['S1D_V_FILE'].newcopy(recipe=recipe,
fiber=fiber)
# construct the filename from file instance
s1dvfile.construct_filename(params, infile=infile)
# copy header from e2dsll file
s1dvfile.copy_hdict(e2dsfile)
# add new header keys
s1dvfile = add_s1d_keys(s1dvfile, svprops)
# set output key
s1dvfile.add_hkey('KW_OUTPUT', value=s1dvfile.name)
# copy data
s1dvfile.data = svprops['S1DTABLE']
# must change the datatype to 'table'
s1dvfile.datatype = 'table'
# ----------------------------------------------------------------------
# log that we are saving rotated image
wargs = ['velocity', s1dvfile.filename]
WLOG(params, '', TextEntry('40-016-00010', args=wargs))
# write image to file
s1dvfile.write_file()
# add to output files (for indexing)
recipe.add_output_file(s1dvfile)
# ----------------------------------------------------------------------
# return e2ds files
return e2dsfile, e2dsfffile
def write_extraction_files_ql(params, recipe, infile, rawfiles, combine, fiber,
orderpfile, props, lprops, eprops, shapelocalfile,
shapexfile, shapeyfile, shapelocal, flat_file,
blaze_file, qc_params):
# ----------------------------------------------------------------------
# Store E2DS in file
# ----------------------------------------------------------------------
# get a new copy of the e2ds file
e2dsfile = recipe.outputs['Q2DS_FILE'].newcopy(recipe=recipe,
fiber=fiber)
# construct the filename from file instance
e2dsfile.construct_filename(params, infile=infile)
# define header keys for output file
# copy keys from input file (excluding loc)
e2dsfile.copy_original_keys(infile, exclude_groups=['loc'])
# add version
e2dsfile.add_hkey('KW_VERSION', value=params['DRS_VERSION'])
# add dates
e2dsfile.add_hkey('KW_DRS_DATE', value=params['DRS_DATE'])
e2dsfile.add_hkey('KW_DRS_DATE_NOW', value=params['DATE_NOW'])
# add process id
e2dsfile.add_hkey('KW_PID', value=params['PID'])
# add output tag
e2dsfile.add_hkey('KW_OUTPUT', value=e2dsfile.name)
e2dsfile.add_hkey('KW_FIBER', value=fiber)
# add input files (and deal with combining or not combining)
if combine:
hfiles = rawfiles
else:
hfiles = [infile.basename]
e2dsfile.add_hkey_1d('KW_INFILE1', values=hfiles, dim1name='file')
# add the calibration files use
e2dsfile = general.add_calibs_to_header(e2dsfile, props)
# ----------------------------------------------------------------------
# add the other calibration files used
e2dsfile.add_hkey('KW_CDBORDP', value=orderpfile)
e2dsfile.add_hkey('KW_CDBLOCO', value=lprops['LOCOFILE'])
e2dsfile.add_hkey('KW_CDBSHAPEL', value=shapelocalfile)
e2dsfile.add_hkey('KW_CDBSHAPEDX', value=shapexfile)
e2dsfile.add_hkey('KW_CDBSHAPEDY', value=shapeyfile)
e2dsfile.add_hkey('KW_CDBFLAT', value=flat_file)
e2dsfile.add_hkey('KW_CDBBLAZE', value=blaze_file)
# additional calibration keys
if 'FIBERTYPE' in eprops:
e2dsfile.add_hkey('KW_C_FTYPE', value=eprops['FIBERTYPE'])
# ----------------------------------------------------------------------
# add qc parameters
e2dsfile.add_qckeys(qc_params)
# ----------------------------------------------------------------------
# add shape transform parameters
e2dsfile.add_hkey('KW_SHAPE_DX', value=shapelocal[0])
e2dsfile.add_hkey('KW_SHAPE_DY', value=shapelocal[1])
e2dsfile.add_hkey('KW_SHAPE_A', value=shapelocal[2])
e2dsfile.add_hkey('KW_SHAPE_B', value=shapelocal[3])
e2dsfile.add_hkey('KW_SHAPE_C', value=shapelocal[4])
e2dsfile.add_hkey('KW_SHAPE_D', value=shapelocal[5])
# ----------------------------------------------------------------------
# add extraction type (does not change for future files)
e2dsfile.add_hkey('KW_EXT_TYPE', value=e2dsfile.name)
# add SNR parameters to header
e2dsfile.add_hkey_1d('KW_EXT_SNR', values=eprops['SNR'],
dim1name='order')
# add start and end extraction order used
e2dsfile.add_hkey('KW_EXT_START', value=eprops['START_ORDER'])
e2dsfile.add_hkey('KW_EXT_END', value=eprops['END_ORDER'])
# add extraction ranges used
e2dsfile.add_hkey('KW_EXT_RANGE1', value=eprops['RANGE1'])
e2dsfile.add_hkey('KW_EXT_RANGE2', value=eprops['RANGE2'])
# add cosmic parameters used
e2dsfile.add_hkey('KW_COSMIC', value=eprops['COSMIC'])
e2dsfile.add_hkey('KW_COSMIC_CUT', value=eprops['COSMIC_SIGCUT'])
e2dsfile.add_hkey('KW_COSMIC_THRES',
value=eprops['COSMIC_THRESHOLD'])
# add saturation parameters used
e2dsfile.add_hkey('KW_SAT_QC', value=eprops['SAT_LEVEL'])
with warnings.catch_warnings(record=True) as _:
max_sat_level = mp.nanmax(eprops['FLUX_VAL'])
e2dsfile.add_hkey('KW_SAT_LEVEL', value=max_sat_level)
# ----------------------------------------------------------------------
# copy data
e2dsfile.data = eprops['E2DS']
# ----------------------------------------------------------------------
# log that we are saving rotated image
wargs = [e2dsfile.filename]
WLOG(params, '', TextEntry('40-016-00005', args=wargs))
# write image to file
e2dsfile.write_file()
# add to output files (for indexing)
recipe.add_output_file(e2dsfile)
# ----------------------------------------------------------------------
# Store E2DSFF in file
# ----------------------------------------------------------------------
# get a new copy of the e2dsff file
e2dsfffile = recipe.outputs['Q2DSFF_FILE'].newcopy(recipe=recipe,
fiber=fiber)
# construct the filename from file instance
e2dsfffile.construct_filename(params, infile=infile)
# copy header from e2dsff file
e2dsfffile.copy_hdict(e2dsfile)
# add extraction type (does not change for future files)
e2dsfffile.add_hkey('KW_EXT_TYPE', value=e2dsfffile.name)
# set output key
e2dsfffile.add_hkey('KW_OUTPUT', value=e2dsfffile.name)
# copy data
e2dsfffile.data = eprops['E2DSFF']
# ----------------------------------------------------------------------
# log that we are saving rotated image
wargs = [e2dsfffile.filename]
WLOG(params, '', TextEntry('40-016-00006', args=wargs))
# write image to file
e2dsfffile.write_file()
# add to output files (for indexing)
recipe.add_output_file(e2dsfffile)
# ----------------------------------------------------------------------
# return e2ds files
return e2dsfile, e2dsfffile
def extract_summary(recipe, params, qc_params, e2dsfile, shapelocal, eprops,
fiber):
# add qc params (fiber specific)
recipe.plot.add_qc_params(qc_params, fiber=fiber)
# add stats
recipe.plot.add_stat('KW_VERSION', value=params['DRS_VERSION'],
fiber=fiber)
recipe.plot.add_stat('KW_DRS_DATE', value=params['DRS_DATE'],
fiber=fiber)
recipe.plot.add_stat('KW_EXT_TYPE', value=e2dsfile.name,
fiber=fiber)
recipe.plot.add_stat('KW_SHAPE_DX', value=shapelocal[0],
fiber=fiber)
recipe.plot.add_stat('KW_SHAPE_DY', value=shapelocal[1],
fiber=fiber)
recipe.plot.add_stat('KW_SHAPE_A', value=shapelocal[2],
fiber=fiber)
recipe.plot.add_stat('KW_SHAPE_B', value=shapelocal[3],
fiber=fiber)
recipe.plot.add_stat('KW_SHAPE_C', value=shapelocal[4],
fiber=fiber)
recipe.plot.add_stat('KW_SHAPE_D', value=shapelocal[5],
fiber=fiber)
recipe.plot.add_stat('KW_EXT_START', value=eprops['START_ORDER'],
fiber=fiber)
recipe.plot.add_stat('KW_EXT_END', value=eprops['END_ORDER'],
fiber=fiber)
recipe.plot.add_stat('KW_EXT_RANGE1', value=eprops['RANGE1'],
fiber=fiber)
recipe.plot.add_stat('KW_EXT_RANGE2', value=eprops['RANGE2'],
fiber=fiber)
recipe.plot.add_stat('KW_COSMIC', value=eprops['COSMIC'],
fiber=fiber)
recipe.plot.add_stat('KW_COSMIC_CUT', value=eprops['COSMIC_SIGCUT'],
fiber=fiber)
recipe.plot.add_stat('KW_COSMIC_THRES', fiber=fiber,
value=eprops['COSMIC_THRESHOLD'])
def qc_leak_master(params, medcubes):
# output storage
qc_params = dict()
passed = True
# loop around fibers
for fiber in medcubes:
# log that we are doing qc for a specific fiber
WLOG(params, 'info', TextEntry('40-016-00026', args=[fiber]))
# set passed variable and fail message list
fail_msg, qc_values, qc_names, qc_logic, qc_pass = [], [], [], [], []
textdict = TextDict(params['INSTRUMENT'], params['LANGUAGE'])
# no quality control currently
qc_values.append('None')
qc_names.append('None')
qc_logic.append('None')
qc_pass.append(1)
# ------------------------------------------------------------------
# finally log the failed messages and set QC = 1 if we pass the
# quality control QC = 0 if we fail quality control
if np.sum(qc_pass) == len(qc_pass):
WLOG(params, 'info', TextEntry('40-005-10001'))
passed_fiber = 1
else:
for farg in fail_msg:
WLOG(params, 'warning', TextEntry('40-005-10002') + farg)
passed_fiber = 0
# store in qc_params
qc_params_fiber = [qc_names, qc_values, qc_logic, qc_pass]
# append to storage
qc_params[fiber] = qc_params_fiber
passed &= passed_fiber
# return qc_params and passed
return qc_params, passed
def qc_leak(params, props, **kwargs):
# set function name
func_name = __NAME__ + '.qc_leak()'
# get outputs from props
outputs = props['OUTPUTS']
# get leak extract file
extname = pcheck(params, 'LEAK_EXTRACT_FILE', 'extname', kwargs,
func_name)
# output storage
qc_params = dict()
passed = True
# loop around fibers
for fiber in outputs:
# log that we are doing qc for a specific fiber
WLOG(params, 'info', TextEntry('40-016-00026', args=[fiber]))
# set passed variable and fail message list
fail_msg = []
textdict = TextDict(params['INSTRUMENT'], params['LANGUAGE'])
# ------------------------------------------------------------------
# deal with old qc params
# ------------------------------------------------------------------
# get extfile
extfile = outputs[fiber][extname]
# copy the quality control from header
qc_names, qc_values, qc_logic, qc_pass = extfile.get_qckeys()
# ------------------------------------------------------------------
# finally log the failed messages and set QC = 1 if we pass the
# quality control QC = 0 if we fail quality control
if np.sum(qc_pass) == len(qc_pass):
WLOG(params, 'info', TextEntry('40-005-10001'))
passed_fiber = 1
else:
for farg in fail_msg:
WLOG(params, 'warning', TextEntry('40-005-10002') + farg)
passed_fiber = 0
# store in qc_params
qc_params_fiber = [qc_names, qc_values, qc_logic, qc_pass]
# append to storage
qc_params[fiber] = qc_params_fiber
passed &= passed_fiber
# return qc_params and passed
return qc_params, passed
def write_leak_master(params, recipe, rawfiles, medcubes, qc_params, props):
# loop around fibers
for fiber in medcubes:
# get outfile for this fiber
outfile = medcubes[fiber]
# get qc_params for this fiber
qc_params_fiber = qc_params[fiber]
# ------------------------------------------------------------------
# have already copied original keys in master_dark_fp_cube function
# data is already added as well
# so just need other keys
# ------------------------------------------------------------------
# add version
outfile.add_hkey('KW_VERSION', value=params['DRS_VERSION'])
# add dates
outfile.add_hkey('KW_DRS_DATE', value=params['DRS_DATE'])
outfile.add_hkey('KW_DRS_DATE_NOW', value=params['DATE_NOW'])
# add process id
outfile.add_hkey('KW_PID', value=params['PID'])
# add output tag
outfile.add_hkey('KW_OUTPUT', value=outfile.name)
# add input files
outfile.add_hkey_1d('KW_INFILE1', values=rawfiles, dim1name='file')
# add qc parameters
outfile.add_qckeys(qc_params_fiber)
# add leak parameters from props (if set)
if props is not None:
outfile.add_hkey('KW_LEAK_BP_U',
value=props['LEAK_BCKGRD_PERCENTILE'])
outfile.add_hkey('KW_LEAK_NP_U',
value=props['LEAK_NORM_PERCENTILE'])
outfile.add_hkey('KW_LEAK_WSMOOTH', value=props['LEAKM_WSMOOTH'])
outfile.add_hkey('KW_LEAK_KERSIZE', value=props['LEAKM_KERSIZE'])
# log that we are saving rotated image
wargs = [fiber, outfile.filename]
WLOG(params, '', TextEntry('40-016-00025', args=wargs))
# write image to file
outfile.write_file()
# add to output files (for indexing)
recipe.add_output_file(outfile)
# update med cubes (as it was shallow copied this is just for sanity
# check)
medcubes[fiber] = outfile
# return medcubes
return medcubes
def write_leak(params, recipe, inputs, props, qc_params, **kwargs):
# set function name
func_name = __NAME__ + '.write_leak()'
# get outputs from props
outputs = props['OUTPUTS']
s1dw_outs = props['S1DW']
s1dv_outs = props['S1DV']
# set header keys to add
keys = ['<KEY>', 'KW_LEAK_NP_U', 'KW_LEAK_LP_U', 'KW_LEAK_UP_U',
'KW_LEAK_BADR_U']
values = ['LEAK_BCKGRD_PERCENTILE_USED', 'LEAK_NORM_PERCENTILE_USED',
'LEAK_LOW_PERCENTILE_USED', 'LEAK_HIGH_PERCENTILE_USED',
'LEAK_BAD_RATIO_OFFSET_USED']
# ----------------------------------------------------------------------
# 2D files
# ----------------------------------------------------------------------
# loop around fibers
for fiber in outputs:
# loop around files
for extname in outputs[fiber]:
# get the s1d in file type
extfile = outputs[fiber][extname]
# add leak corr key
extfile.add_hkey('KW_LEAK_CORR', value=True)
# loop around leak keys to add
for it in range(len(keys)):
extfile.add_hkey(keys[it], value=props[values[it]])
# add qc parameters
extfile.add_qckeys(qc_params[fiber])
# log that we are saving file
wargs = [fiber, extname, extfile.filename]
WLOG(params, '', TextEntry('40-016-00030', args=wargs))
# write image to file
extfile.write_file()
# add back to outputs (used for s1d)
outputs[fiber][extname] = extfile
# add to output files (for indexing)
recipe.add_output_file(extfile)
# ----------------------------------------------------------------------
# S1D files
# ----------------------------------------------------------------------
# get the leak extract file type
s1dextfile = pcheck(params, 'EXT_S1D_INFILE', 's1dextfile', kwargs,
func_name)
# loop around fibers
for fiber in outputs:
# get extfile
extfile = outputs[fiber][s1dextfile]
# get s1d props for this fiber
swprops = s1dw_outs[fiber]
svprops = s1dv_outs[fiber]
# get input extraction file (1D case)
s1dwfile = inputs[fiber]['S1D_W_FILE']
s1dvfile = inputs[fiber]['S1D_V_FILE']
# ------------------------------------------------------------------
# Store S1D_W in file
# ------------------------------------------------------------------
# copy header from e2dsff file
s1dwfile.copy_header(extfile)
# set output key
s1dwfile.add_hkey('KW_OUTPUT', value=s1dwfile.name)
# add new header keys
s1dwfile = add_s1d_keys(s1dwfile, swprops)
# copy data
s1dwfile.data = swprops['S1DTABLE']
# must change the datatype to 'table'
s1dwfile.datatype = 'table'
# ------------------------------------------------------------------
# log that we are saving rotated image
wargs = [fiber, 'wave', s1dwfile.filename]
WLOG(params, '', TextEntry('40-016-00031', args=wargs))
# write image to file
s1dwfile.write_file()
# add to output files (for indexing)
recipe.add_output_file(s1dwfile)
# ------------------------------------------------------------------
# Store S1D_V in file
# ------------------------------------------------------------------
# copy header from e2dsff file
s1dvfile.copy_header(extfile)
# add new header keys
s1dvfile = add_s1d_keys(s1dvfile, svprops)
# set output key
s1dvfile.add_hkey('KW_OUTPUT', value=s1dvfile.name)
# copy data
s1dvfile.data = svprops['S1DTABLE']
# must change the datatype to 'table'
s1dvfile.datatype = 'table'
# ------------------------------------------------------------------
# log that we are saving rotated image
wargs = [fiber, 'velocity', s1dvfile.filename]
WLOG(params, '', TextEntry('40-016-00031', args=wargs))
# write image to file
s1dvfile.write_file()
# add to output files (for indexing)
recipe.add_output_file(s1dvfile)
# ------------------------------------------------------------------
# =============================================================================
# Start of code
# =============================================================================
# Main code here
if __name__ == "__main__":
# ----------------------------------------------------------------------
# print 'Hello World!'
print("Hello World!")
# =============================================================================
# End of code
# =============================================================================
| [
"apero.science.extract.berv.add_berv_keys",
"apero.science.calib.general.add_calibs_to_header",
"numpy.nanpercentile",
"numpy.sum",
"apero.science.calib.localisation.load_orderp",
"apero.io.drs_path.copyfile",
"numpy.ones",
"numpy.isnan",
"apero.science.calib.flat_blaze.get_blaze",
"numpy.arange",... | [((1092, 1122), 'apero.core.constants.load', 'constants.load', (['__INSTRUMENT__'], {}), '(__INSTRUMENT__)\n', (1106, 1122), False, 'from apero.core import constants\n'), ((1735, 1755), 'astropy.constants.c.to', 'cc.c.to', (['(uu.m / uu.s)'], {}), '(uu.m / uu.s)\n', (1742, 1755), True, 'from astropy import constants as cc\n'), ((1821, 1842), 'astropy.constants.c.to', 'cc.c.to', (['(uu.km / uu.s)'], {}), '(uu.km / uu.s)\n', (1828, 1842), True, 'from astropy import constants as cc\n'), ((7362, 7399), 'apero.core.constants.pload', 'constants.pload', (["params['INSTRUMENT']"], {}), "(params['INSTRUMENT'])\n", (7377, 7399), False, 'from apero.core import constants\n'), ((7680, 7723), 'apero.science.calib.wave.get_masterwave_filename', 'wave.get_masterwave_filename', (['params', 'fiber'], {}), '(params, fiber)\n', (7708, 7723), False, 'from apero.science.calib import wave\n'), ((7763, 7820), 'apero.science.calib.wave.get_wavesolution', 'wave.get_wavesolution', (['params', 'recipe'], {'filename': 'mwavefile'}), '(params, recipe, filename=mwavefile)\n', (7784, 7820), False, 'from apero.science.calib import wave\n'), ((10830, 10894), 'apero.core.get_file_definition', 'core.get_file_definition', (['kind', "params['INSTRUMENT']"], {'kind': '"""red"""'}), "(kind, params['INSTRUMENT'], kind='red')\n", (10854, 10894), False, 'from apero import core\n'), ((11126, 11195), 'apero.science.calib.general.get_input_files', 'general.get_input_files', (['params', '"""THERMALFILE"""', 'key', 'header', 'filename'], {}), "(params, 'THERMALFILE', key, header, filename)\n", (11149, 11195), False, 'from apero.science.calib import general\n'), ((11364, 11427), 'apero.science.calib.general.load_calib_file', 'general.load_calib_file', (['params', 'key', 'header'], {'filename': 'filename'}), '(params, key, header, filename=filename)\n', (11387, 11427), False, 'from apero.science.calib import general\n'), ((12871, 12919), 'apero.io.drs_data.load_tapas', 'drs_data.load_tapas', (['params'], {'filename': 'tapas_file'}), '(params, filename=tapas_file)\n', (12890, 12919), False, 'from apero.io import drs_data\n'), ((13132, 13161), 'apero.core.math.iuv_spline', 'mp.iuv_spline', (['wtapas', 'ttapas'], {}), '(wtapas, ttapas)\n', (13145, 13161), True, 'from apero.core import math as mp\n'), ((13546, 13591), 'numpy.zeros_like', 'np.zeros_like', (['wavemap[torder, :]'], {'dtype': 'bool'}), '(wavemap[torder, :], dtype=bool)\n', (13559, 13591), True, 'import numpy as np\n'), ((14285, 14328), 'apero.core.math.nanmedian', 'mp.nanmedian', (['(thermal_torder / image_torder)'], {}), '(thermal_torder / image_torder)\n', (14297, 14328), True, 'from apero.core import math as mp\n'), ((16295, 16309), 'numpy.zeros', 'np.zeros', (['dim2'], {}), '(dim2)\n', (16303, 16309), True, 'import numpy as np\n'), ((17533, 17579), 'apero.core.math.nanmedian', 'mp.nanmedian', (['(thermal_torder / envelope_torder)'], {}), '(thermal_torder / envelope_torder)\n', (17545, 17579), True, 'from apero.core import math as mp\n'), ((19084, 19136), 'numpy.arange', 'np.arange', (['(-ker_size * w_smooth)', '(ker_size * w_smooth)'], {}), '(-ker_size * w_smooth, ker_size * w_smooth)\n', (19093, 19136), True, 'import numpy as np\n'), ((19151, 19191), 'numpy.exp', 'np.exp', (['(-0.5 * (xkernel / w_smooth) ** 2)'], {}), '(-0.5 * (xkernel / w_smooth) ** 2)\n', (19157, 19191), True, 'import numpy as np\n'), ((19268, 19305), 'apero.core.constants.pload', 'constants.pload', (["params['INSTRUMENT']"], {}), "(params['INSTRUMENT'])\n", (19283, 19305), False, 'from apero.core import constants\n'), ((20444, 20466), 'numpy.array', 'np.array', (['reffile.data'], {}), '(reffile.data)\n', (20452, 20466), True, 'import numpy as np\n'), ((20837, 20860), 'numpy.zeros_like', 'np.zeros_like', (['refimage'], {}), '(refimage)\n', (20850, 20860), True, 'import numpy as np\n'), ((25261, 25298), 'apero.core.constants.pload', 'constants.pload', (["params['INSTRUMENT']"], {}), "(params['INSTRUMENT'])\n", (25276, 25298), False, 'from apero.core import constants\n'), ((25640, 25663), 'numpy.array', 'np.array', (['ref_file.data'], {}), '(ref_file.data)\n', (25648, 25663), True, 'import numpy as np\n'), ((26324, 26337), 'numpy.zeros', 'np.zeros', (['nbo'], {}), '(nbo)\n', (26332, 26337), True, 'import numpy as np\n'), ((26358, 26371), 'numpy.zeros', 'np.zeros', (['nbo'], {}), '(nbo)\n', (26366, 26371), True, 'import numpy as np\n'), ((26395, 26408), 'numpy.zeros', 'np.zeros', (['nbo'], {}), '(nbo)\n', (26403, 26408), True, 'import numpy as np\n'), ((34679, 34743), 'apero.core.get_file_definition', 'core.get_file_definition', (['kind', "params['INSTRUMENT']"], {'kind': '"""red"""'}), "(kind, params['INSTRUMENT'], kind='red')\n", (34703, 34743), False, 'from apero import core\n'), ((34972, 35038), 'apero.science.calib.general.get_input_files', 'general.get_input_files', (['params', '"""LEAKFILE"""', 'key', 'header', 'filename'], {}), "(params, 'LEAKFILE', key, header, filename)\n", (34995, 35038), False, 'from apero.science.calib import general\n'), ((35201, 35264), 'apero.science.calib.general.load_calib_file', 'general.load_calib_file', (['params', 'key', 'header'], {'filename': 'filename'}), '(params, key, header, filename=filename)\n', (35224, 35264), False, 'from apero.science.calib import general\n'), ((37101, 37138), 'apero.core.constants.pload', 'constants.pload', (["params['INSTRUMENT']"], {}), "(params['INSTRUMENT'])\n", (37116, 37138), False, 'from apero.core import constants\n'), ((37543, 37604), 'os.path.join', 'os.path.join', (["params['DRS_DATA_WORKING']", "params['NIGHTNAME']"], {}), "(params['DRS_DATA_WORKING'], params['NIGHTNAME'])\n", (37555, 37604), False, 'import os\n'), ((37815, 37891), 'apero.core.core.drs_startup.find_recipe', 'drs_startup.find_recipe', (['extname', "params['INSTRUMENT']"], {'mod': 'recipe.recipemod'}), "(extname, params['INSTRUMENT'], mod=recipe.recipemod)\n", (37838, 37891), False, 'from apero.core.core import drs_startup\n'), ((40787, 40824), 'apero.core.constants.pload', 'constants.pload', (["params['INSTRUMENT']"], {}), "(params['INSTRUMENT'])\n", (40802, 40824), False, 'from apero.core import constants\n'), ((41571, 41629), 'apero.science.calib.wave.get_wavelines', 'wave.get_wavelines', (['params', 'recipe', 'fiber'], {'infile': 'e2dsfile'}), '(params, recipe, fiber, infile=e2dsfile)\n', (41589, 41629), False, 'from apero.science.calib import wave\n'), ((41960, 42007), 'apero.science.calib.wave.get_master_lines', 'wave.get_master_lines', (['params', 'recipe'], {}), '(params, recipe, **fpargs)\n', (41981, 42007), False, 'from apero.science.calib import wave\n'), ((44543, 44590), 'numpy.arange', 'np.arange', (['(-smooth_size * 3)', '(smooth_size * 3)', '(1)'], {}), '(-smooth_size * 3, smooth_size * 3, 1)\n', (44552, 44590), True, 'import numpy as np\n'), ((44601, 44641), 'numpy.exp', 'np.exp', (['(-0.5 * (xker / smooth_size) ** 2)'], {}), '(-0.5 * (xker / smooth_size) ** 2)\n', (44607, 44641), True, 'import numpy as np\n'), ((44683, 44708), 'numpy.ones', 'np.ones', (['npix'], {'dtype': 'bool'}), '(npix, dtype=bool)\n', (44690, 44708), True, 'import numpy as np\n'), ((44932, 44952), 'numpy.zeros_like', 'np.zeros_like', (['blaze'], {}), '(blaze)\n', (44945, 44952), True, 'import numpy as np\n'), ((46186, 46209), 'numpy.zeros_like', 'np.zeros_like', (['wavegrid'], {}), '(wavegrid)\n', (46199, 46209), True, 'import numpy as np\n'), ((46223, 46246), 'numpy.zeros_like', 'np.zeros_like', (['wavegrid'], {}), '(wavegrid)\n', (46236, 46246), True, 'import numpy as np\n'), ((48290, 48315), 'numpy.zeros_like', 'np.zeros_like', (['w_out_spec'], {}), '(w_out_spec)\n', (48303, 48315), True, 'import numpy as np\n'), ((48375, 48382), 'astropy.table.Table', 'Table', ([], {}), '()\n', (48380, 48382), False, 'from astropy.table import Table\n'), ((53002, 53047), 'apero.science.calib.general.add_calibs_to_header', 'general.add_calibs_to_header', (['e2dsfile', 'props'], {}), '(e2dsfile, props)\n', (53030, 53047), False, 'from apero.science.calib import general\n'), ((55973, 56017), 'apero.science.calib.wave.add_wave_keys', 'wave.add_wave_keys', (['params', 'e2dsfile', 'wprops'], {}), '(params, e2dsfile, wprops)\n', (55991, 56017), False, 'from apero.science.calib import wave\n'), ((56146, 56190), 'apero.science.extract.berv.add_berv_keys', 'berv.add_berv_keys', (['params', 'e2dsfile', 'bprops'], {}), '(params, e2dsfile, bprops)\n', (56164, 56190), False, 'from apero.science.extract import berv\n'), ((62979, 63024), 'apero.science.calib.general.add_calibs_to_header', 'general.add_calibs_to_header', (['e2dsfile', 'props'], {}), '(e2dsfile, props)\n', (63007, 63024), False, 'from apero.science.calib import general\n'), ((3124, 3213), 'apero.science.calib.general.get_input_files', 'general.get_input_files', (['params', '"""ORDERPFILE"""', 'key', 'header'], {'default': 'filenames[fiber]'}), "(params, 'ORDERPFILE', key, header, default=\n filenames[fiber])\n", (3147, 3213), False, 'from apero.science.calib import general\n'), ((13997, 14042), 'apero.core.math.medfilt_1d', 'mp.medfilt_1d', (['thermal[order_num]', 'filter_wid'], {}), '(thermal[order_num], filter_wid)\n', (14010, 14042), True, 'from apero.core import math as mp\n'), ((17045, 17090), 'apero.core.math.medfilt_1d', 'mp.medfilt_1d', (['thermal[order_num]', 'filter_wid'], {}), '(thermal[order_num], filter_wid)\n', (17058, 17090), True, 'from apero.core import math as mp\n'), ((21044, 21100), 'numpy.nanpercentile', 'np.nanpercentile', (['refimage[order_num]', 'bckgrd_percentile'], {}), '(refimage[order_num], bckgrd_percentile)\n', (21060, 21100), True, 'import numpy as np\n'), ((21213, 21267), 'numpy.nanpercentile', 'np.nanpercentile', (['refimage[order_num]', 'norm_percentile'], {}), '(refimage[order_num], norm_percentile)\n', (21229, 21267), True, 'import numpy as np\n'), ((22151, 22173), 'numpy.array', 'np.array', (['scifile.data'], {}), '(scifile.data)\n', (22159, 22173), True, 'import numpy as np\n'), ((26800, 26856), 'numpy.nanpercentile', 'np.nanpercentile', (['refimage[order_num]', 'bckgrd_percentile'], {}), '(refimage[order_num], bckgrd_percentile)\n', (26816, 26856), True, 'import numpy as np\n'), ((27817, 27871), 'numpy.nanpercentile', 'np.nanpercentile', (['refimage[order_num]', 'norm_percentile'], {}), '(refimage[order_num], norm_percentile)\n', (27833, 27871), True, 'import numpy as np\n'), ((28063, 28122), 'apero.core.math.nansum', 'mp.nansum', (['(master_ref_ord[mask] * refimage[order_num][mask])'], {}), '(master_ref_ord[mask] * refimage[order_num][mask])\n', (28072, 28122), True, 'from apero.core import math as mp\n'), ((28139, 28180), 'apero.core.math.nansum', 'mp.nansum', (['(refimage[order_num][mask] ** 2)'], {}), '(refimage[order_num][mask] ** 2)\n', (28148, 28180), True, 'from apero.core import math as mp\n'), ((33605, 33648), 'apero.science.calib.flat_blaze.get_blaze', 'flat_blaze.get_blaze', (['params', 'header', 'fiber'], {}), '(params, header, fiber)\n', (33625, 33648), False, 'from apero.science.calib import flat_blaze\n'), ((33789, 33847), 'apero.science.calib.wave.get_wavesolution', 'wave.get_wavesolution', (['params', 'recipe', 'header'], {'fiber': 'fiber'}), '(params, recipe, header, fiber=fiber)\n', (33810, 33847), False, 'from apero.science.calib import wave\n'), ((36439, 36453), 'numpy.array', 'np.array', (['cube'], {}), '(cube)\n', (36447, 36453), True, 'import numpy as np\n'), ((36514, 36540), 'apero.core.math.nanmedian', 'mp.nanmedian', (['cube'], {'axis': '(0)'}), '(cube, axis=0)\n', (36526, 36540), True, 'from apero.core import math as mp\n'), ((37655, 37688), 'os.path.join', 'os.path.join', (['pppath', 'rawfiles[0]'], {}), '(pppath, rawfiles[0])\n', (37667, 37688), False, 'import os\n'), ((43560, 43614), 'numpy.arange', 'np.arange', (['wavestart', '(waveend + binwave / 2.0)', 'binwave'], {}), '(wavestart, waveend + binwave / 2.0, binwave)\n', (43569, 43614), True, 'import numpy as np\n'), ((43690, 43717), 'numpy.log', 'np.log', (['(waveend / wavestart)'], {}), '(waveend / wavestart)\n', (43696, 43717), True, 'import numpy as np\n'), ((43736, 43784), 'numpy.round', 'np.round', (['(speed_of_light_kms / binvelo * flambda)'], {}), '(speed_of_light_kms / binvelo * flambda)\n', (43744, 43784), True, 'import numpy as np\n'), ((45095, 45121), 'numpy.array', 'np.array', (['blaze[order_num]'], {}), '(blaze[order_num])\n', (45103, 45121), True, 'import numpy as np\n'), ((45435, 45471), 'numpy.convolve', 'np.convolve', (['valid', 'ker'], {'mode': '"""same"""'}), "(valid, ker, mode='same')\n", (45446, 45471), True, 'import numpy as np\n'), ((45823, 45838), 'numpy.array', 'np.array', (['blaze'], {}), '(blaze)\n', (45831, 45838), True, 'import numpy as np\n'), ((45865, 45879), 'numpy.array', 'np.array', (['e2ds'], {}), '(e2ds)\n', (45873, 45879), True, 'import numpy as np\n'), ((46740, 46786), 'apero.core.math.iuv_spline', 'mp.iuv_spline', (['owave[valid]', 'oe2ds'], {'k': '(5)', 'ext': '(1)'}), '(owave[valid], oe2ds, k=5, ext=1)\n', (46753, 46786), True, 'from apero.core import math as mp\n'), ((46807, 46847), 'apero.core.math.iuv_spline', 'mp.iuv_spline', (['owave', 'oblaze'], {'k': '(1)', 'ext': '(1)'}), '(owave, oblaze, k=1, ext=1)\n', (46820, 46847), True, 'from apero.core import math as mp\n'), ((47047, 47092), 'apero.core.math.iuv_spline', 'mp.iuv_spline', (['owave', 'valid_float'], {'k': '(1)', 'ext': '(1)'}), '(owave, valid_float, k=1, ext=1)\n', (47060, 47092), True, 'from apero.core import math as mp\n'), ((47459, 47494), 'numpy.zeros_like', 'np.zeros_like', (['wavegrid'], {'dtype': 'bool'}), '(wavegrid, dtype=bool)\n', (47472, 47494), True, 'import numpy as np\n'), ((48160, 48196), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (48183, 48196), False, 'import warnings\n'), ((50994, 51009), 'numpy.sum', 'np.sum', (['qc_pass'], {}), '(qc_pass)\n', (51000, 51009), True, 'import numpy as np\n'), ((55494, 55530), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (55517, 55530), False, 'import warnings\n'), ((55561, 55590), 'apero.core.math.nanmax', 'mp.nanmax', (["eprops['FLUX_VAL']"], {}), "(eprops['FLUX_VAL'])\n", (55570, 55590), True, 'from apero.core import math as mp\n'), ((65305, 65341), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (65328, 65341), False, 'import warnings\n'), ((65372, 65401), 'apero.core.math.nanmax', 'mp.nanmax', (["eprops['FLUX_VAL']"], {}), "(eprops['FLUX_VAL'])\n", (65381, 65401), True, 'from apero.core import math as mp\n'), ((4405, 4477), 'apero.science.calib.localisation.load_orderp', 'localisation.load_orderp', (['params', 'header'], {'fiber': 'fiber', 'filename': 'filename'}), '(params, header, fiber=fiber, filename=filename)\n', (4429, 4477), False, 'from apero.science.calib import localisation\n'), ((4615, 4689), 'apero.science.calib.shape.ea_transform', 'shape.ea_transform', (['params', 'orderp', 'shapelocal'], {'dxmap': 'shapex', 'dymap': 'shapey'}), '(params, orderp, shapelocal, dxmap=shapex, dymap=shapey)\n', (4633, 4689), False, 'from apero.science.calib import shape\n'), ((12679, 12697), 'apero.core.math.nansum', 'mp.nansum', (['thermal'], {}), '(thermal)\n', (12688, 12697), True, 'from apero.core import math as mp\n'), ((16056, 16074), 'apero.core.math.nansum', 'mp.nansum', (['thermal'], {}), '(thermal)\n', (16065, 16074), True, 'from apero.core import math as mp\n'), ((16735, 16771), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (16758, 16771), False, 'import warnings\n'), ((16807, 16851), 'numpy.nanpercentile', 'np.nanpercentile', (['imagebox', 'envelope_percent'], {}), '(imagebox, envelope_percent)\n', (16823, 16851), True, 'import numpy as np\n'), ((22476, 22518), 'apero.core.math.medfilt_1d', 'mp.medfilt_1d', (['sciimage[order_num]', 'medfac'], {}), '(sciimage[order_num], medfac)\n', (22489, 22518), True, 'from apero.core import math as mp\n'), ((22692, 22714), 'numpy.ones_like', 'np.ones_like', (['tmpimage'], {}), '(tmpimage)\n', (22704, 22714), True, 'import numpy as np\n'), ((22809, 22852), 'numpy.convolve', 'np.convolve', (['tmpimage', 'ykernel'], {'mode': '"""same"""'}), "(tmpimage, ykernel, mode='same')\n", (22820, 22852), True, 'import numpy as np\n'), ((22873, 22912), 'numpy.convolve', 'np.convolve', (['mask', 'ykernel'], {'mode': '"""same"""'}), "(mask, ykernel, mode='same')\n", (22884, 22912), True, 'import numpy as np\n'), ((27221, 27257), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (27244, 27257), False, 'import warnings\n'), ((27318, 27366), 'numpy.nanpercentile', 'np.nanpercentile', (['refimage[order_num]', 'bpercents'], {}), '(refimage[order_num], bpercents)\n', (27334, 27366), True, 'import numpy as np\n'), ((27393, 27436), 'numpy.nanpercentile', 'np.nanpercentile', (['master_ref_ord', 'bpercents'], {}), '(master_ref_ord, bpercents)\n', (27409, 27436), True, 'import numpy as np\n'), ((29996, 30018), 'numpy.array', 'np.array', (['extfile.data'], {}), '(extfile.data)\n', (30004, 30018), True, 'import numpy as np\n'), ((30655, 30668), 'numpy.zeros', 'np.zeros', (['nbo'], {}), '(nbo)\n', (30663, 30668), True, 'import numpy as np\n'), ((40255, 40283), 'os.path.join', 'os.path.join', (['indir', 'outfile'], {}), '(indir, outfile)\n', (40267, 40283), False, 'import os\n'), ((40321, 40363), 'apero.io.drs_path.copyfile', 'drs_path.copyfile', (['params', 'inpath', 'outpath'], {}), '(params, inpath, outpath)\n', (40338, 40363), False, 'from apero.io import drs_path\n'), ((43876, 43924), 'numpy.exp', 'np.exp', (['(nlambda * (binvelo / speed_of_light_kms))'], {}), '(nlambda * (binvelo / speed_of_light_kms))\n', (43882, 43924), True, 'import numpy as np\n'), ((43980, 43998), 'numpy.arange', 'np.arange', (['nlambda'], {}), '(nlambda)\n', (43989, 43998), True, 'import numpy as np\n'), ((45170, 45189), 'numpy.isfinite', 'np.isfinite', (['oblaze'], {}), '(oblaze)\n', (45181, 45189), True, 'import numpy as np\n'), ((45192, 45220), 'numpy.isfinite', 'np.isfinite', (['e2ds[order_num]'], {}), '(e2ds[order_num])\n', (45203, 45220), True, 'import numpy as np\n'), ((45234, 45270), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (45257, 45270), False, 'import warnings\n'), ((45520, 45556), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (45543, 45556), False, 'import warnings\n'), ((46363, 46392), 'numpy.isfinite', 'np.isfinite', (['se2ds[order_num]'], {}), '(se2ds[order_num])\n', (46374, 46392), True, 'import numpy as np\n'), ((46395, 46425), 'numpy.isfinite', 'np.isfinite', (['sblaze[order_num]'], {}), '(sblaze[order_num])\n', (46406, 46425), True, 'import numpy as np\n'), ((46491, 46504), 'numpy.sum', 'np.sum', (['valid'], {}), '(valid)\n', (46497, 46504), True, 'import numpy as np\n'), ((47176, 47199), 'apero.core.math.nanmin', 'mp.nanmin', (['owave[valid]'], {}), '(owave[valid])\n', (47185, 47199), True, 'from apero.core import math as mp\n'), ((47237, 47260), 'apero.core.math.nanmax', 'mp.nanmax', (['owave[valid]'], {}), '(owave[valid])\n', (47246, 47260), True, 'from apero.core import math as mp\n'), ((50471, 50498), 'numpy.isfinite', 'np.isfinite', (["eprops['E2DS']"], {}), "(eprops['E2DS'])\n", (50482, 50498), True, 'import numpy as np\n'), ((69931, 69946), 'numpy.sum', 'np.sum', (['qc_pass'], {}), '(qc_pass)\n', (69937, 69946), True, 'import numpy as np\n'), ((71730, 71745), 'numpy.sum', 'np.sum', (['qc_pass'], {}), '(qc_pass)\n', (71736, 71745), True, 'import numpy as np\n'), ((12713, 12733), 'numpy.isfinite', 'np.isfinite', (['thermal'], {}), '(thermal)\n', (12724, 12733), True, 'import numpy as np\n'), ((16090, 16110), 'numpy.isfinite', 'np.isfinite', (['thermal'], {}), '(thermal)\n', (16101, 16110), True, 'import numpy as np\n'), ((22577, 22595), 'numpy.isnan', 'np.isnan', (['tmpimage'], {}), '(tmpimage)\n', (22585, 22595), True, 'import numpy as np\n'), ((22930, 22966), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (22953, 22966), False, 'import warnings\n'), ((30288, 30350), 'apero.science.calib.flat_blaze.get_flat', 'flat_blaze.get_flat', (['params', 'extfile.header', 'fiber'], {'quiet': '(True)'}), '(params, extfile.header, fiber, quiet=True)\n', (30307, 30350), False, 'from apero.science.calib import flat_blaze\n'), ((30483, 30505), 'numpy.ones_like', 'np.ones_like', (['extimage'], {}), '(extimage)\n', (30495, 30505), True, 'import numpy as np\n'), ((31198, 31252), 'numpy.nanpercentile', 'np.nanpercentile', (['refimage[order_num]', 'norm_percentile'], {}), '(refimage[order_num], norm_percentile)\n', (31214, 31252), True, 'import numpy as np\n'), ((31278, 31311), 'apero.core.math.nanmedian', 'mp.nanmedian', (['extimage[order_num]'], {}), '(extimage[order_num])\n', (31290, 31311), True, 'from apero.core import math as mp\n'), ((39522, 39554), 'os.path.exists', 'os.path.exists', (['extfile.filename'], {}), '(extfile.filename)\n', (39536, 39554), False, 'import os\n'), ((45595, 45613), 'apero.core.math.nanmin', 'mp.nanmin', (['oweight'], {}), '(oweight)\n', (45604, 45613), True, 'from apero.core import math as mp\n'), ((45646, 45664), 'apero.core.math.nanmax', 'mp.nanmax', (['oweight'], {}), '(oweight)\n', (45655, 45664), True, 'from apero.core import math as mp\n'), ((46993, 47003), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (47000, 47003), True, 'import numpy as np\n'), ((44055, 44082), 'numpy.log', 'np.log', (['(waveend / wavestart)'], {}), '(waveend / wavestart)\n', (44061, 44082), True, 'import numpy as np\n'), ((45320, 45337), 'apero.core.math.nanmax', 'mp.nanmax', (['oblaze'], {}), '(oblaze)\n', (45329, 45337), True, 'from apero.core import math as mp\n')] |
import numpy as np
import pandas as pd
from IMLearn.learners.classifiers import Perceptron, LDA, GaussianNaiveBayes
from typing import Tuple
from IMLearn.metrics import accuracy
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from math import atan2, pi
def load_dataset(filename: str) -> Tuple[np.ndarray, np.ndarray]:
"""
Load dataset for comparing the Gaussian Naive Bayes and LDA classifiers. File is assumed to be an
ndarray of shape (n_samples, 3) where the first 2 columns represent features and the third column the class
Parameters
----------
filename: str
Path to .npy data file
Returns
-------
X: ndarray of shape (n_samples, 2)
Design matrix to be used
y: ndarray of shape (n_samples,)
Class vector specifying for each sample its class
"""
data = np.load(filename)
return data[:, :2], data[:, 2].astype(int)
def run_perceptron():
"""
Fit and plot fit progression of the Perceptron algorithm over both the linearly separable and inseparable datasets
Create a line plot that shows the perceptron algorithm's training loss values (y-axis)
as a function of the training iterations (x-axis).
"""
def perceptron_callback(fit: Perceptron, x: np.ndarray, res: int):
"""
callback function to be given to fitted perceptron.
fit is the Perceptron object being fitted, x the sample the perceptron was wrong on, y response.
"""
losses.append(fit.loss(X, y))
for n, f in [("Linearly Separable", "../datasets/linearly_separable.npy"),
("Linearly Inseparable", "../datasets/linearly_inseparable.npy")]:
# Load dataset
dataset = np.load(f)
X, y = dataset[:, :-1], dataset[:, -1]
# Fit Perceptron and record loss in each fit iteration
losses = []
perceptron = Perceptron(callback=perceptron_callback)
perceptron.fit(X, y)
# Plot figure of loss as function of fitting iteration
fig = px.line(x=np.arange(1, len(losses) + 1, 1), y=losses)
fig.update_layout(title_text=f"Fitting Perceptron With {n} Data:<br><sup>"
"Misclassification error during algorithm iterations</sup>",
xaxis_title="Iteration",
yaxis_title="Loss", title_x=0.5,
title_font_size=25,
height=500,
width=800)
fig.show()
def get_ellipse(mu: np.ndarray, cov: np.ndarray):
"""
Draw an ellipse centered at given location and according to specified covariance matrix
Parameters
----------
mu : ndarray of shape (2,)
Center of ellipse
cov: ndarray of shape (2,2)
Covariance of Gaussian
Returns
-------
scatter: A plotly trace object of the ellipse
"""
l1, l2 = tuple(np.linalg.eigvalsh(cov)[::-1])
theta = atan2(l1 - cov[0, 0], cov[0, 1]) if cov[0, 1] != 0 else (np.pi / 2 if cov[0, 0] < cov[1, 1] else 0)
t = np.linspace(0, 2 * pi, 100)
xs = (l1 * np.cos(theta) * np.cos(t)) - (l2 * np.sin(theta) * np.sin(t))
ys = (l1 * np.sin(theta) * np.cos(t)) + (l2 * np.cos(theta) * np.sin(t))
return go.Scatter(x=mu[0] + xs, y=mu[1] + ys, mode="lines", marker=dict(color="black"))
def get_marker(mu: np.ndarray):
"""
Draw a marker centered at given location.
Parameters
----------
mu : ndarray of shape (2,)
Center of marker
"""
return go.Scatter(x=[mu[0]], y=[mu[1]], mode="markers", marker=dict(color="black",
size=10,
symbol="x"))
def compare_gaussian_classifiers():
"""
Fit both Gaussian Naive Bayes and LDA classifiers on both gaussians1 and gaussians2 datasets
"""
for n, f in [("Gaussian-1", "../datasets/gaussian1.npy"), ("Gaussian-2", "../datasets/gaussian2.npy")]:
models = [("Naive Bayes", GaussianNaiveBayes), ("LDA", LDA)]
model_names = [model[0] for model in models]
fig = make_subplots(rows=1, cols=2, subplot_titles=[f"{m}$" for m in model_names],
horizontal_spacing=0.05, vertical_spacing=.03)
for i, (name, model) in enumerate(models):
# Load dataset
# Fit models and predict over training set
dataset = np.load(f)
X, y = dataset[:, :-1], dataset[:, -1]
classifier = model()
classifier.fit(X, y)
classes = classifier.predict(X)
df = pd.DataFrame(np.column_stack((X, y, classes)),
columns=["Feature 1", "Feature 2", "class", "prediction"])
fig.add_trace(go.Scatter(x=df["Feature 1"], y=df["Feature 2"], mode="markers", showlegend=False,
marker=dict(color=df["prediction"], symbol=df["class"],
colorscale=custom[0:3],
line=dict(color="black", width=1))),
col=(i + 1), row=1)
fig.layout.annotations[i].update(text=f"{name}, accuracy: {accuracy(y, classes).__round__(3)}")
for j, class_ in enumerate(classifier.classes_):
if type(classifier) is GaussianNaiveBayes:
fig.add_trace(get_ellipse(classifier.mu_[j, :], np.diag(classifier.vars_[j, :])), col=(i + 1),
row=1)
elif type(classifier) is LDA:
fig.add_trace(get_ellipse(classifier.mu_[j, :], classifier.cov_), col=(i + 1), row=1)
fig.add_trace(get_marker(classifier.mu_[j, :]), col=(i + 1), row=1)
# Add traces for data-points setting symbols and colors
# raise NotImplementedError()
# Add `X` dots specifying fitted Gaussians' means
# raise NotImplementedError()
# Add ellipses depicting the covariances of the fitted Gaussians
# raise NotImplementedError()
fig.update_layout(title=fr"<b>Classification: Performance of probabilistic classifiers on {n} data<b>",
margin=dict(t=100),
title_x=0.5,
title_font_size=25,
width=1000,
height=600,
showlegend=False)
fig.show()
if __name__ == '__main__':
np.random.seed(0)
run_perceptron()
compare_gaussian_classifiers()
| [
"numpy.load",
"numpy.random.seed",
"IMLearn.learners.classifiers.Perceptron",
"math.atan2",
"numpy.column_stack",
"numpy.linalg.eigvalsh",
"numpy.sin",
"numpy.linspace",
"numpy.cos",
"plotly.subplots.make_subplots",
"numpy.diag",
"IMLearn.metrics.accuracy"
] | [((885, 902), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (892, 902), True, 'import numpy as np\n'), ((3117, 3144), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * pi)', '(100)'], {}), '(0, 2 * pi, 100)\n', (3128, 3144), True, 'import numpy as np\n'), ((6635, 6652), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (6649, 6652), True, 'import numpy as np\n'), ((1760, 1770), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (1767, 1770), True, 'import numpy as np\n'), ((1923, 1963), 'IMLearn.learners.classifiers.Perceptron', 'Perceptron', ([], {'callback': 'perceptron_callback'}), '(callback=perceptron_callback)\n', (1933, 1963), False, 'from IMLearn.learners.classifiers import Perceptron, LDA, GaussianNaiveBayes\n'), ((3009, 3041), 'math.atan2', 'atan2', (['(l1 - cov[0, 0])', 'cov[0, 1]'], {}), '(l1 - cov[0, 0], cov[0, 1])\n', (3014, 3041), False, 'from math import atan2, pi\n'), ((4223, 4351), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(1)', 'cols': '(2)', 'subplot_titles': "[f'{m}$' for m in model_names]", 'horizontal_spacing': '(0.05)', 'vertical_spacing': '(0.03)'}), "(rows=1, cols=2, subplot_titles=[f'{m}$' for m in model_names],\n horizontal_spacing=0.05, vertical_spacing=0.03)\n", (4236, 4351), False, 'from plotly.subplots import make_subplots\n'), ((2966, 2989), 'numpy.linalg.eigvalsh', 'np.linalg.eigvalsh', (['cov'], {}), '(cov)\n', (2984, 2989), True, 'import numpy as np\n'), ((3176, 3185), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (3182, 3185), True, 'import numpy as np\n'), ((3211, 3220), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (3217, 3220), True, 'import numpy as np\n'), ((3253, 3262), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (3259, 3262), True, 'import numpy as np\n'), ((3288, 3297), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (3294, 3297), True, 'import numpy as np\n'), ((4530, 4540), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (4537, 4540), True, 'import numpy as np\n'), ((3160, 3173), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (3166, 3173), True, 'import numpy as np\n'), ((3195, 3208), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (3201, 3208), True, 'import numpy as np\n'), ((3237, 3250), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (3243, 3250), True, 'import numpy as np\n'), ((3272, 3285), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (3278, 3285), True, 'import numpy as np\n'), ((4733, 4765), 'numpy.column_stack', 'np.column_stack', (['(X, y, classes)'], {}), '((X, y, classes))\n', (4748, 4765), True, 'import numpy as np\n'), ((5561, 5592), 'numpy.diag', 'np.diag', (['classifier.vars_[j, :]'], {}), '(classifier.vars_[j, :])\n', (5568, 5592), True, 'import numpy as np\n'), ((5335, 5355), 'IMLearn.metrics.accuracy', 'accuracy', (['y', 'classes'], {}), '(y, classes)\n', (5343, 5355), False, 'from IMLearn.metrics import accuracy\n')] |
#Copyright (c) 2020 Ocado. All Rights Reserved.
import sys, os, pygame, argparse
from PIL import Image
import numpy as np
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)))
from amrrt.space import StateSpace
from amrrt.diffusion_map import DiffusionMap, GridGraph
from amrrt.metrics import EuclideanMetric, DiffusionMetric, GeodesicMetric
from amrrt.planners import AMRRTPlanner, RTRRTPlanner
def display(screen, planner, agent_pos, path, image, scale):
screen.blit(image, (0, 0))
for u in planner.tree.edges:
for v in planner.tree.edges[u]:
a = (int(u.pos[0]*scale), int(u.pos[1]*scale))
b = (int(v.pos[0]*scale), int(v.pos[1]*scale))
pygame.draw.line(screen, (0,0,0), a, b, 1)
display_path = [planner.space.create_state(agent_pos)] + path
for i in range(len(display_path)-1):
a = (int(display_path[i].pos[0]*scale), int(display_path[i].pos[1]*scale))
b = (int(display_path[i+1].pos[0]*scale), int(display_path[i+1].pos[1]*scale))
pygame.draw.line(screen, (30,30,255), a, b, 3)
for obstacle in planner.space.dynamic_obstacles:
pygame.draw.circle(screen, (0,0,0), tuple((obstacle.pos*scale).astype(int)), int(obstacle.radius*scale))
pygame.draw.circle(screen, (255,30,30), tuple((agent_pos*scale).astype(int)), 4)
if planner.goal is not None:
pygame.draw.circle(screen, (30,255,30), tuple((planner.goal.pos*scale).astype(int)), 4)
pygame.display.update()
def visualiser(space, planner_type, metric_type):
pygame.init()
grid_graph = GridGraph(space)
if metric_type == "euclidean" : assisting_metric = EuclideanMetric()
elif metric_type == "diffusion" : assisting_metric = DiffusionMetric(DiffusionMap(space, grid_graph=grid_graph))
else : assisting_metric = GeodesicMetric(grid_graph)
planner = None
agent_pos = None
image = space.display_image()
image_size = 800
scale = image_size/min(image.size[0],image.size[1])
width, height = int(image.size[0]*scale), int(image.size[1]*scale)
screen = pygame.display.set_mode((width, height))
image = image.resize((width, height), resample=Image.NEAREST)
image = pygame.image.frombuffer(image.tobytes(), image.size, image.mode)
while planner is None:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
pos = np.array(pygame.mouse.get_pos()) / scale
if planner_type == "rtrrt" : planner = RTRRTPlanner(space, pos, assisting_metric=assisting_metric)
else : planner = AMRRTPlanner(space, pos, assisting_metric=assisting_metric)
agent_pos = pos
screen.blit(image, (0, 0))
pygame.display.update()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
pos = np.array(pygame.mouse.get_pos()) / scale
if pygame.key.get_pressed()[pygame.K_d]:
planner.add_dynamic_obstacle(pos, 3)
else:
planner.set_goal(pos)
path = []
waypoint = planner.plan(agent_pos).pos.copy()
path = planner.goal_path()
agent_pos += min(1, 0.7/np.linalg.norm(waypoint-agent_pos)) * (waypoint-agent_pos)
display(screen, planner, agent_pos, path, image, scale)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='AM-RRT* & RT-RRT* graphical visualiser')
parser.add_argument('image', type=str, help='Filename of an image containing the environment')
parser.add_argument('planner', choices=['rtrrt', 'amrrt'], help='Name of planner (choices: "rtrrt", "amrrt")')
parser.add_argument('metric_type', choices=['euclidean', 'diffusion', 'geodesic'], help='Name of assisting metric (choices: "euclidean", "diffusion", "geodesic")')
args = parser.parse_args()
visualiser(StateSpace.from_image(args.image), args.planner, args.metric_type)
| [
"amrrt.metrics.GeodesicMetric",
"amrrt.diffusion_map.DiffusionMap",
"amrrt.diffusion_map.GridGraph",
"pygame.draw.line",
"argparse.ArgumentParser",
"pygame.event.get",
"pygame.display.set_mode",
"os.path.realpath",
"amrrt.planners.AMRRTPlanner",
"pygame.init",
"amrrt.space.StateSpace.from_image"... | [((1503, 1526), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (1524, 1526), False, 'import sys, os, pygame, argparse\n'), ((1583, 1596), 'pygame.init', 'pygame.init', ([], {}), '()\n', (1594, 1596), False, 'import sys, os, pygame, argparse\n'), ((1614, 1630), 'amrrt.diffusion_map.GridGraph', 'GridGraph', (['space'], {}), '(space)\n', (1623, 1630), False, 'from amrrt.diffusion_map import DiffusionMap, GridGraph\n'), ((2113, 2153), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(width, height)'], {}), '((width, height))\n', (2136, 2153), False, 'import sys, os, pygame, argparse\n'), ((3583, 3660), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""AM-RRT* & RT-RRT* graphical visualiser"""'}), "(description='AM-RRT* & RT-RRT* graphical visualiser')\n", (3606, 3660), False, 'import sys, os, pygame, argparse\n'), ((1072, 1120), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(30, 30, 255)', 'a', 'b', '(3)'], {}), '(screen, (30, 30, 255), a, b, 3)\n', (1088, 1120), False, 'import sys, os, pygame, argparse\n'), ((1686, 1703), 'amrrt.metrics.EuclideanMetric', 'EuclideanMetric', ([], {}), '()\n', (1701, 1703), False, 'from amrrt.metrics import EuclideanMetric, DiffusionMetric, GeodesicMetric\n'), ((2346, 2364), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2362, 2364), False, 'import sys, os, pygame, argparse\n'), ((2834, 2857), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (2855, 2857), False, 'import sys, os, pygame, argparse\n'), ((2896, 2914), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2912, 2914), False, 'import sys, os, pygame, argparse\n'), ((4089, 4122), 'amrrt.space.StateSpace.from_image', 'StateSpace.from_image', (['args.image'], {}), '(args.image)\n', (4110, 4122), False, 'from amrrt.space import StateSpace\n'), ((744, 788), 'pygame.draw.line', 'pygame.draw.line', (['screen', '(0, 0, 0)', 'a', 'b', '(1)'], {}), '(screen, (0, 0, 0), a, b, 1)\n', (760, 788), False, 'import sys, os, pygame, argparse\n'), ((1851, 1877), 'amrrt.metrics.GeodesicMetric', 'GeodesicMetric', (['grid_graph'], {}), '(grid_graph)\n', (1865, 1877), False, 'from amrrt.metrics import EuclideanMetric, DiffusionMetric, GeodesicMetric\n'), ((185, 211), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (201, 211), False, 'import sys, os, pygame, argparse\n'), ((1777, 1819), 'amrrt.diffusion_map.DiffusionMap', 'DiffusionMap', (['space'], {'grid_graph': 'grid_graph'}), '(space, grid_graph=grid_graph)\n', (1789, 1819), False, 'from amrrt.diffusion_map import DiffusionMap, GridGraph\n'), ((2424, 2434), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2432, 2434), False, 'import sys, os, pygame, argparse\n'), ((2974, 2984), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2982, 2984), False, 'import sys, os, pygame, argparse\n'), ((2606, 2665), 'amrrt.planners.RTRRTPlanner', 'RTRRTPlanner', (['space', 'pos'], {'assisting_metric': 'assisting_metric'}), '(space, pos, assisting_metric=assisting_metric)\n', (2618, 2665), False, 'from amrrt.planners import AMRRTPlanner, RTRRTPlanner\n'), ((2699, 2758), 'amrrt.planners.AMRRTPlanner', 'AMRRTPlanner', (['space', 'pos'], {'assisting_metric': 'assisting_metric'}), '(space, pos, assisting_metric=assisting_metric)\n', (2711, 2758), False, 'from amrrt.planners import AMRRTPlanner, RTRRTPlanner\n'), ((3120, 3144), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (3142, 3144), False, 'import sys, os, pygame, argparse\n'), ((3418, 3454), 'numpy.linalg.norm', 'np.linalg.norm', (['(waypoint - agent_pos)'], {}), '(waypoint - agent_pos)\n', (3432, 3454), True, 'import numpy as np\n'), ((2519, 2541), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (2539, 2541), False, 'import sys, os, pygame, argparse\n'), ((3069, 3091), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (3089, 3091), False, 'import sys, os, pygame, argparse\n')] |
import numpy as np
import copy
from pprint import pprint
from fractions import Fraction
frac = True
denom_lim = 100000
num_dec = 12
def toFrac(arg):
return Fraction(arg).limit_denominator(denom_lim)
def chkFrac(fra, arg):
return abs(float(fra) - arg) < 10**(-14)
def floatformat(arg):
if frac:
fra = toFrac(arg)
if chkFrac(fra, arg):
return str(fra)
narg = arg / np.pi
fra = toFrac(narg)
if chkFrac(fra, narg):
return str(fra) + ' π'
narg = arg / np.exp(1)
fra = toFrac(narg)
if chkFrac(fra, narg):
return str(fra) + ' e'
narg = arg**2
fra = toFrac(narg)
if chkFrac(float(fra), narg):
return '√( ' + str(fra) + ' )'
return round(arg, num_dec)
def listformat(arg, prevpoints=[]):
prevpoints = copy.copy(prevpoints)
isnparray = isinstance(arg, np.ndarray)
if isnparray:
arg = list(np.asarray(arg))
if isinstance(arg, (list, tuple, dict)):
prevpoints.append(arg)
istup = isinstance(arg, tuple)
isdict = isinstance(arg, dict)
ret = list(arg.items()) if isdict else list(copy.copy(arg))
if isdict:
arg = list(arg.items())
for i in range(len(arg)):
seen_before = False
for j in prevpoints:
if id(arg[i]) == id(j):
ret[i] = '[...]'
seen_before = True
break
if not seen_before:
if isinstance(arg[i], float):
ret[i] = floatformat(arg[i])
elif isinstance(arg[i], (list, tuple, np.ndarray)):
ret[i] = listformat(arg[i], prevpoints)
if isnparray:
return np.array(ret)
elif istup:
return tuple(ret)
elif isdict:
return dict(ret)
else:
return ret
return arg
def pfprint(arg):
if isinstance(arg, float):
print(floatformat(arg))
elif isinstance(arg, (list, tuple, dict, np.ndarray)):
data = listformat(arg, [])
if isinstance(arg, np.ndarray):
print(data)
else:
pprint(data)
else:
pprint(arg)
| [
"numpy.asarray",
"copy.copy",
"numpy.array",
"numpy.exp",
"pprint.pprint",
"fractions.Fraction"
] | [((740, 761), 'copy.copy', 'copy.copy', (['prevpoints'], {}), '(prevpoints)\n', (749, 761), False, 'import copy\n'), ((160, 173), 'fractions.Fraction', 'Fraction', (['arg'], {}), '(arg)\n', (168, 173), False, 'from fractions import Fraction\n'), ((475, 484), 'numpy.exp', 'np.exp', (['(1)'], {}), '(1)\n', (481, 484), True, 'import numpy as np\n'), ((831, 846), 'numpy.asarray', 'np.asarray', (['arg'], {}), '(arg)\n', (841, 846), True, 'import numpy as np\n'), ((1461, 1474), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (1469, 1474), True, 'import numpy as np\n'), ((1820, 1831), 'pprint.pprint', 'pprint', (['arg'], {}), '(arg)\n', (1826, 1831), False, 'from pprint import pprint\n'), ((1027, 1041), 'copy.copy', 'copy.copy', (['arg'], {}), '(arg)\n', (1036, 1041), False, 'import copy\n'), ((1798, 1810), 'pprint.pprint', 'pprint', (['data'], {}), '(data)\n', (1804, 1810), False, 'from pprint import pprint\n')] |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
__author__ = """Prof. <NAME>, Ph.D. <<EMAIL>>"""
import os
os.system('clear')
print('.-------------------------------.')
print('| |#')
print('| By.: Prof. <NAME> |#')
print('| |#')
print('| 2020 |#')
print('\'-------------------------------\'#')
print(' ################################')
print('')
print('Importing Libraries:')
# -*- coding: utf-8 -*-
#!/usr/bin/env python2.7
__author__ = """Prof. <NAME>, Ph.D. <<EMAIL>>"""
import os
os.system('clear')
import numpy as np
import matplotlib.pyplot as pl
#############################################
# COLOCA GRAO DE AREIA EM UMA POSICAO #
#############################################
def place(array,x,y,L):
ac = 0
if (x >= 0 and x < L) and (y >= 0 and y < L):
array[x][y] = array[x][y] + 1
# Toppling
if array[x][y] >= 4:
ac = ac + 1
array[x][y] = array[x][y] - 4
array,t1 = place(array, x+1, y, L)
array,t2 = place(array, x-1, y, L)
array,t3 = place(array, x, y+1, L)
array,t4 = place(array, x, y-1, L)
ac = ac + t1 + t2 + t3 + t4
return array, ac
#############################################
# INTERACAO #
#############################################
def dist(L):
sand = [[0 for n in range(L)] for m in range(L)]
massa = []
tops = [0 for n in range(50000)]
N = 500*L+100
for it in range(N):
x = np.random.binomial(L,0.5)
y = np.random.binomial(L,0.5)
sand,t = place(sand, x, y, L)
massa = np.append(massa, np.mean(sand))
tops[t] = tops[t] + 1
return massa, tops
#############################################
# MAIN #
#############################################
N = 80
a,t = dist(N)
x = []
y = []
for n in range(len(t)):
# if t[n] > 0 and n > 0:
# y = np.append(y, np.log(t[n]))
# x = np.append(x, np.log(n))
x = np.append(x,n)
y = np.append(y,t[n])
#a = np.cov(x,y)[0][1]/np.var(x)
#print(a)
pl.loglog(x,y,'.',color='gray')
pl.xlabel('Size of avalance')
pl.ylabel('Number of avalanches')
pl.axis([1E-1,1E3,1E1,1E4])
pl.savefig('../chapters/Chapter_6/figs/src/pilehist.svg')
pl.show()
quit()
| [
"matplotlib.pyplot.loglog",
"matplotlib.pyplot.show",
"numpy.random.binomial",
"matplotlib.pyplot.axis",
"os.system",
"numpy.append",
"numpy.mean",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] | [((108, 126), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (117, 126), False, 'import os\n'), ((577, 595), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (586, 595), False, 'import os\n'), ((2047, 2081), 'matplotlib.pyplot.loglog', 'pl.loglog', (['x', 'y', '"""."""'], {'color': '"""gray"""'}), "(x, y, '.', color='gray')\n", (2056, 2081), True, 'import matplotlib.pyplot as pl\n'), ((2080, 2109), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""Size of avalance"""'], {}), "('Size of avalance')\n", (2089, 2109), True, 'import matplotlib.pyplot as pl\n'), ((2110, 2143), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['"""Number of avalanches"""'], {}), "('Number of avalanches')\n", (2119, 2143), True, 'import matplotlib.pyplot as pl\n'), ((2144, 2181), 'matplotlib.pyplot.axis', 'pl.axis', (['[0.1, 1000.0, 10.0, 10000.0]'], {}), '([0.1, 1000.0, 10.0, 10000.0])\n', (2151, 2181), True, 'import matplotlib.pyplot as pl\n'), ((2172, 2229), 'matplotlib.pyplot.savefig', 'pl.savefig', (['"""../chapters/Chapter_6/figs/src/pilehist.svg"""'], {}), "('../chapters/Chapter_6/figs/src/pilehist.svg')\n", (2182, 2229), True, 'import matplotlib.pyplot as pl\n'), ((2230, 2239), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (2237, 2239), True, 'import matplotlib.pyplot as pl\n'), ((1961, 1976), 'numpy.append', 'np.append', (['x', 'n'], {}), '(x, n)\n', (1970, 1976), True, 'import numpy as np\n'), ((1984, 2002), 'numpy.append', 'np.append', (['y', 't[n]'], {}), '(y, t[n])\n', (1993, 2002), True, 'import numpy as np\n'), ((1482, 1508), 'numpy.random.binomial', 'np.random.binomial', (['L', '(0.5)'], {}), '(L, 0.5)\n', (1500, 1508), True, 'import numpy as np\n'), ((1514, 1540), 'numpy.random.binomial', 'np.random.binomial', (['L', '(0.5)'], {}), '(L, 0.5)\n', (1532, 1540), True, 'import numpy as np\n'), ((1602, 1615), 'numpy.mean', 'np.mean', (['sand'], {}), '(sand)\n', (1609, 1615), True, 'import numpy as np\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Training routine for 3D object detection with SUN RGB-D or ScanNet.
Sample usage:
python train.py --dataset sunrgbd --log_dir log_sunrgbd
To use Tensorboard:
At server:
python -m tensorboard.main --logdir=<log_dir_name> --port=6006
At local machine:
ssh -L 1237:localhost:6006 <server_name>
Then go to local browser and type:
localhost:1237
"""
import os
import sys
import numpy as np
from datetime import datetime
import argparse
import importlib
import logging
from omegaconf import OmegaConf
from models.loss_helper import get_loss as criterion
from tensorboardX import SummaryWriter
import torch
import torch.optim as optim
from torch.optim import lr_scheduler
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from models.backbone.pointnet2.pytorch_utils import BNMomentumScheduler
from models.dump_helper import dump_results
from models.ap_helper import APCalculator, parse_predictions, parse_groundtruths
def get_current_lr(epoch, config):
lr = config.optimizer.learning_rate
for i,lr_decay_epoch in enumerate(config.optimizer.lr_decay_steps):
if epoch >= lr_decay_epoch:
lr *= config.optimizer.lr_decay_rates[i]
return lr
def adjust_learning_rate(optimizer, epoch, config):
lr = get_current_lr(epoch, config)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def train_one_epoch(net, train_dataloader, optimizer, bnm_scheduler, epoch_cnt, dataset_config, writer, config):
stat_dict = {} # collect statistics
adjust_learning_rate(optimizer, epoch_cnt, config)
bnm_scheduler.step() # decay BN momentum
net.train() # set model to training mode
for batch_idx, batch_data_label in enumerate(train_dataloader):
for key in batch_data_label:
batch_data_label[key] = batch_data_label[key].cuda()
# Forward pass
optimizer.zero_grad()
inputs = {'point_clouds': batch_data_label['point_clouds']}
if 'voxel_coords' in batch_data_label:
inputs.update({
'voxel_coords': batch_data_label['voxel_coords'],
'voxel_inds': batch_data_label['voxel_inds'],
'voxel_feats': batch_data_label['voxel_feats']})
end_points = net(inputs)
# Compute loss and gradients, update parameters.
for key in batch_data_label:
assert(key not in end_points)
end_points[key] = batch_data_label[key]
loss, end_points = criterion(end_points, dataset_config)
loss.backward()
optimizer.step()
# Accumulate statistics and print out
for key in end_points:
if 'loss' in key or 'acc' in key or 'ratio' in key:
if key not in stat_dict: stat_dict[key] = 0
stat_dict[key] += end_points[key].item()
batch_interval = 10
if (batch_idx+1) % batch_interval == 0:
logging.info(' ---- batch: %03d ----' % (batch_idx+1))
for key in stat_dict:
writer.add_scalar('training/{}'.format(key), stat_dict[key]/batch_interval,
(epoch_cnt*len(train_dataloader)+batch_idx)*config.data.batch_size)
for key in sorted(stat_dict.keys()):
logging.info('mean %s: %f'%(key, stat_dict[key]/batch_interval))
stat_dict[key] = 0
def evaluate_one_epoch(net, train_dataloader, test_dataloader, config, epoch_cnt, CONFIG_DICT, writer):
stat_dict = {} # collect statistics
ap_calculator = APCalculator(ap_iou_thresh=0.5, class2type_map=CONFIG_DICT['dataset_config'].class2type)
net.eval() # set model to eval mode (for bn and dp)
for batch_idx, batch_data_label in enumerate(test_dataloader):
if batch_idx % 10 == 0:
logging.info('Eval batch: %d'%(batch_idx))
for key in batch_data_label:
batch_data_label[key] = batch_data_label[key].cuda()
# Forward pass
inputs = {'point_clouds': batch_data_label['point_clouds']}
if 'voxel_coords' in batch_data_label:
inputs.update({
'voxel_coords': batch_data_label['voxel_coords'],
'voxel_inds': batch_data_label['voxel_inds'],
'voxel_feats': batch_data_label['voxel_feats']})
with torch.no_grad():
end_points = net(inputs)
# Compute loss
for key in batch_data_label:
assert(key not in end_points)
end_points[key] = batch_data_label[key]
loss, end_points = criterion(end_points, CONFIG_DICT['dataset_config'])
# Accumulate statistics and print out
for key in end_points:
if 'loss' in key or 'acc' in key or 'ratio' in key:
if key not in stat_dict: stat_dict[key] = 0
stat_dict[key] += end_points[key].item()
batch_pred_map_cls = parse_predictions(end_points, CONFIG_DICT)
batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT)
ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)
# Dump evaluation results for visualization
if config.data.dump_results and batch_idx == 0 and epoch_cnt %10 == 0:
dump_results(end_points, 'results', CONFIG_DICT['dataset_config'])
# Log statistics
for key in sorted(stat_dict.keys()):
writer.add_scalar('validation/{}'.format(key), stat_dict[key]/float(batch_idx+1),
(epoch_cnt+1)*len(train_dataloader)*config.data.batch_size)
logging.info('eval mean %s: %f'%(key, stat_dict[key]/(float(batch_idx+1))))
# Evaluate average precision
metrics_dict = ap_calculator.compute_metrics()
for key in metrics_dict:
logging.info('eval %s: %f'%(key, metrics_dict[key]))
writer.add_scalar('validation/mAP@0.5', metrics_dict['mAP'], (epoch_cnt+1)*len(train_dataloader)*config.data.batch_size)
mean_loss = stat_dict['loss']/float(batch_idx+1)
return mean_loss
def train(net, train_dataloader, test_dataloader, dataset_config, config):
# Used for AP calculation
CONFIG_DICT = {'remove_empty_box':False, 'use_3d_nms':True,
'nms_iou':0.25, 'use_old_type_nms':False, 'cls_nms':True,
'per_class_proposal': True, 'conf_thresh':0.05,
'dataset_config': dataset_config}
# Load the Adam optimizer
optimizer = optim.Adam(net.parameters(), lr=config.optimizer.learning_rate, weight_decay=config.optimizer.weight_decay)
# writer
writer = SummaryWriter(log_dir='tensorboard')
# Load checkpoint if there is any
start_epoch = 0
CHECKPOINT_PATH = os.path.join('checkpoint.tar')
if os.path.isfile(CHECKPOINT_PATH):
checkpoint = torch.load(CHECKPOINT_PATH)
net.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
start_epoch = checkpoint['epoch']
logging.info("-> loaded checkpoint %s (epoch: %d)"%(CHECKPOINT_PATH, start_epoch))
# Decay Batchnorm momentum from 0.5 to 0.999
# note: pytorch's BN momentum (default 0.1)= 1 - tensorflow's BN momentum
BN_MOMENTUM_INIT = 0.5
BN_MOMENTUM_MAX = 0.001
BN_DECAY_STEP = config.optimizer.bn_decay_step
BN_DECAY_RATE = config.optimizer.bn_decay_rate
bn_lbmd = lambda it: max(BN_MOMENTUM_INIT * BN_DECAY_RATE**(int(it / BN_DECAY_STEP)), BN_MOMENTUM_MAX)
bnm_scheduler = BNMomentumScheduler(net, bn_lambda=bn_lbmd, last_epoch=start_epoch-1)
loss = 0
for epoch in range(start_epoch, config.optimizer.max_epoch):
logging.info('**** EPOCH %03d ****' % (epoch))
logging.info('Current learning rate: %f'%(get_current_lr(epoch, config)))
logging.info('Current BN decay momentum: %f'%(bnm_scheduler.lmbd(bnm_scheduler.last_epoch)))
logging.info(str(datetime.now()))
# Reset numpy seed.
# REF: https://github.com/pytorch/pytorch/issues/5059
np.random.seed()
train_one_epoch(net=net, train_dataloader=train_dataloader, optimizer=optimizer,
bnm_scheduler=bnm_scheduler, epoch_cnt=epoch, dataset_config=dataset_config,
writer=writer, config=config)
if epoch == 0 or epoch % 5 == 4: # Eval every 5 epochs
loss = evaluate_one_epoch(net=net, train_dataloader=train_dataloader, test_dataloader=test_dataloader,
config=config, epoch_cnt=epoch, CONFIG_DICT=CONFIG_DICT, writer=writer)
# Save checkpoint
save_dict = {'epoch': epoch+1, # after training one epoch, the start_epoch should be epoch+1
'optimizer_state_dict': optimizer.state_dict(),
'loss': loss,
}
try: # with nn.DataParallel() the net is added as a submodule of DataParallel
save_dict['state_dict'] = net.module.state_dict()
except:
save_dict['state_dict'] = net.state_dict()
torch.save(save_dict, 'checkpoint.tar')
OmegaConf.save(config, 'config.yaml')
| [
"models.loss_helper.get_loss",
"tensorboardX.SummaryWriter",
"omegaconf.OmegaConf.save",
"numpy.random.seed",
"warnings.simplefilter",
"torch.no_grad",
"models.ap_helper.APCalculator",
"torch.load",
"datetime.datetime.now",
"torch.save",
"logging.info",
"os.path.isfile",
"models.dump_helper.... | [((882, 944), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (903, 944), False, 'import warnings\n'), ((3735, 3828), 'models.ap_helper.APCalculator', 'APCalculator', ([], {'ap_iou_thresh': '(0.5)', 'class2type_map': "CONFIG_DICT['dataset_config'].class2type"}), "(ap_iou_thresh=0.5, class2type_map=CONFIG_DICT['dataset_config'\n ].class2type)\n", (3747, 3828), False, 'from models.ap_helper import APCalculator, parse_predictions, parse_groundtruths\n'), ((6708, 6744), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {'log_dir': '"""tensorboard"""'}), "(log_dir='tensorboard')\n", (6721, 6744), False, 'from tensorboardX import SummaryWriter\n'), ((6826, 6856), 'os.path.join', 'os.path.join', (['"""checkpoint.tar"""'], {}), "('checkpoint.tar')\n", (6838, 6856), False, 'import os\n'), ((6864, 6895), 'os.path.isfile', 'os.path.isfile', (['CHECKPOINT_PATH'], {}), '(CHECKPOINT_PATH)\n', (6878, 6895), False, 'import os\n'), ((7615, 7686), 'models.backbone.pointnet2.pytorch_utils.BNMomentumScheduler', 'BNMomentumScheduler', (['net'], {'bn_lambda': 'bn_lbmd', 'last_epoch': '(start_epoch - 1)'}), '(net, bn_lambda=bn_lbmd, last_epoch=start_epoch - 1)\n', (7634, 7686), False, 'from models.backbone.pointnet2.pytorch_utils import BNMomentumScheduler\n'), ((2686, 2723), 'models.loss_helper.get_loss', 'criterion', (['end_points', 'dataset_config'], {}), '(end_points, dataset_config)\n', (2695, 2723), True, 'from models.loss_helper import get_loss as criterion\n'), ((4757, 4809), 'models.loss_helper.get_loss', 'criterion', (['end_points', "CONFIG_DICT['dataset_config']"], {}), "(end_points, CONFIG_DICT['dataset_config'])\n", (4766, 4809), True, 'from models.loss_helper import get_loss as criterion\n'), ((5099, 5141), 'models.ap_helper.parse_predictions', 'parse_predictions', (['end_points', 'CONFIG_DICT'], {}), '(end_points, CONFIG_DICT)\n', (5116, 5141), False, 'from models.ap_helper import APCalculator, parse_predictions, parse_groundtruths\n'), ((5170, 5213), 'models.ap_helper.parse_groundtruths', 'parse_groundtruths', (['end_points', 'CONFIG_DICT'], {}), '(end_points, CONFIG_DICT)\n', (5188, 5213), False, 'from models.ap_helper import APCalculator, parse_predictions, parse_groundtruths\n'), ((5937, 5991), 'logging.info', 'logging.info', (["('eval %s: %f' % (key, metrics_dict[key]))"], {}), "('eval %s: %f' % (key, metrics_dict[key]))\n", (5949, 5991), False, 'import logging\n'), ((6918, 6945), 'torch.load', 'torch.load', (['CHECKPOINT_PATH'], {}), '(CHECKPOINT_PATH)\n', (6928, 6945), False, 'import torch\n'), ((7120, 7208), 'logging.info', 'logging.info', (["('-> loaded checkpoint %s (epoch: %d)' % (CHECKPOINT_PATH, start_epoch))"], {}), "('-> loaded checkpoint %s (epoch: %d)' % (CHECKPOINT_PATH,\n start_epoch))\n", (7132, 7208), False, 'import logging\n'), ((7772, 7816), 'logging.info', 'logging.info', (["('**** EPOCH %03d ****' % epoch)"], {}), "('**** EPOCH %03d ****' % epoch)\n", (7784, 7816), False, 'import logging\n'), ((8142, 8158), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (8156, 8158), True, 'import numpy as np\n'), ((9172, 9211), 'torch.save', 'torch.save', (['save_dict', '"""checkpoint.tar"""'], {}), "(save_dict, 'checkpoint.tar')\n", (9182, 9211), False, 'import torch\n'), ((9220, 9257), 'omegaconf.OmegaConf.save', 'OmegaConf.save', (['config', '"""config.yaml"""'], {}), "(config, 'config.yaml')\n", (9234, 9257), False, 'from omegaconf import OmegaConf\n'), ((3121, 3177), 'logging.info', 'logging.info', (["(' ---- batch: %03d ----' % (batch_idx + 1))"], {}), "(' ---- batch: %03d ----' % (batch_idx + 1))\n", (3133, 3177), False, 'import logging\n'), ((3991, 4033), 'logging.info', 'logging.info', (["('Eval batch: %d' % batch_idx)"], {}), "('Eval batch: %d' % batch_idx)\n", (4003, 4033), False, 'import logging\n'), ((4521, 4536), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4534, 4536), False, 'import torch\n'), ((5424, 5490), 'models.dump_helper.dump_results', 'dump_results', (['end_points', '"""results"""', "CONFIG_DICT['dataset_config']"], {}), "(end_points, 'results', CONFIG_DICT['dataset_config'])\n", (5436, 5490), False, 'from models.dump_helper import dump_results\n'), ((3470, 3538), 'logging.info', 'logging.info', (["('mean %s: %f' % (key, stat_dict[key] / batch_interval))"], {}), "('mean %s: %f' % (key, stat_dict[key] / batch_interval))\n", (3482, 3538), False, 'import logging\n'), ((8027, 8041), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8039, 8041), False, 'from datetime import datetime\n')] |
import collections
import logging
import numpy as np
import gym
import cv2
from core.log import do_logging
from utility.utils import infer_dtype, convert_dtype
from utility.typing import AttrDict
from env.typing import EnvOutput, GymOutput
# stop using GPU
cv2.ocl.setUseOpenCL(False)
logger = logging.getLogger(__name__)
def post_wrap(env, config):
""" Does some post processing and bookkeeping.
Does not change anything that will affect the agent's performance
"""
env = DataProcess(env, config.get('precision', 32))
env = EnvStats(
env, config.get('max_episode_steps', None),
timeout_done=config.get('timeout_done', False),
auto_reset=config.get('auto_reset', True))
return env
""" Wrappers from OpenAI's baselines.
Some modifications are done to meet specific requirements """
class LazyFrames:
def __init__(self, frames):
""" Different from the official implementation from OpenAI's baselines,
we do not cache the results to save memory. Also, notice we do not define
functions like __getitem__ avoid unintended overhead introduced by
not caching the results. This means we do not support something like the
following
# error as __getitem is not defined
np.array([LazyFrames(frames) for _ in range(4)])
"""
self._frames = list(frames)
self._concat = len(frames[0].shape) == 3
def __array__(self):
if self._concat:
out = np.concatenate(self._frames, -1)
else:
out = np.stack(self._frames, -1)
return out
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, frame_skip=4):
"""Return only every `frame_skip`-th frame"""
super().__init__(env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self.frame_skip = frame_skip
def step(self, action, frame_skip=None, **kwargs):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
frame_skip = frame_skip or self.frame_skip
for i in range(frame_skip):
obs, reward, done, info = self.env.step(action, **kwargs)
if i == frame_skip - 2: self._obs_buffer[0] = obs
if i == frame_skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
info['frame_skip'] = i+1
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
""" Custom wrappers """
class NormalizeActions(gym.Wrapper):
""" Normalize infinite action dimension in range [-1, 1] """
def __init__(self, env):
super().__init__(env)
self._act_mask = np.logical_and(
np.isfinite(env.action_space.low),
np.isfinite(env.action_space.high))
self._low = np.where(self._act_mask, env.action_space.low, -1)
self._high = np.where(self._act_mask, env.action_space.high, 1)
low = np.where(self._act_mask, -np.ones_like(self._low), self._low)
high = np.where(self._act_mask, np.ones_like(self._low), self._high)
self.action_space = gym.spaces.Box(low, high, dtype=np.float32)
def step(self, action, **kwargs):
original = (action + 1) / 2 * (self._high - self._low) + self._low
original = np.where(self._act_mask, original, action)
return self.env.step(original, **kwargs)
class GrayScale(gym.ObservationWrapper):
def __init__(self, env):
super().__init__(env)
original_space = self.observation_space
new_space = gym.spaces.Box(
low=0,
high=255,
shape=(*original_space.shape[:2], 1),
dtype=np.uint8,
)
assert original_space.dtype == np.uint8, original_space.dtype
assert len(original_space.shape) == 3, original_space.shape
self.observation_space = new_space
def observation(self, obs):
obs = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
obs = np.expand_dims(obs, -1)
return obs
class FrameSkip(gym.Wrapper):
""" Unlike MaxAndSkipEnv defined in baselines
this wrapper does not max pool observations.
This is useful for RGB observations
"""
def __init__(self, env, frame_skip=1):
super().__init__(env)
self.frame_skip = frame_skip
def step(self, action, frame_skip=None, **kwargs):
total_reward = 0
frame_skip = frame_skip or self.frame_skip
for i in range(1, frame_skip+1):
obs, reward, done, info = self.env.step(action, **kwargs)
total_reward += reward
if done:
break
info['frame_skip'] = i
return obs, total_reward, done, info
class FrameDiff(gym.Wrapper):
def __init__(self, env, gray_scale, distance=1):
super().__init__(env)
self._gray_scale = gray_scale
self._distance = distance
self._residual_channel = 1 if self._gray_scale else 3
w, h, c = self.observation_space.shape
assert c == 3, self.observation_space.shape
assert self.observation_space.dtype == np.uint8, self.observation_space.dtype
assert len(self.observation_space.shape) == 3, self.observation_space.shape
new_space = gym.spaces.Box(
low=0,
high=255,
shape=(w, h, c+self._residual_channel),
dtype=np.uint8,
)
self.observation_space = new_space
self._buff = np.zeros((w, h, self._residual_channel*(self._distance+1)))
def _append_residual(self, obs):
res = (self._buff[..., -self._residual_channel:].astype(np.int16)
- self._buff[..., :self._residual_channel].astype(np.int16))
res = (res + 255) // 2
obs = np.concatenate([obs, res.astype(np.uint8)], axis=-1)
assert obs.dtype == np.uint8
return obs
def _add_obs_to_buff(self, obs):
self._buff = np.roll(self._buff, -self._residual_channel, axis=-1)
if self._gray_scale:
self._buff[..., -1] = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
else:
self._buff[..., -self._residual_channel:] = obs
def reset(self):
obs = self.env.reset()
buff_obs = np.expand_dims(cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY), -1) \
if self._gray_scale else obs
self._buff = np.tile(buff_obs, [1, 1, self._distance+1])
obs = self._append_residual(obs)
return obs
def step(self, action):
obs, rew, done, info = self.env.step(action)
self._add_obs_to_buff(obs)
res_obs = self._append_residual(obs)
# self._plot(obs, res_obs)
return res_obs, rew, done, info
def _plot(self, obs, res_obs):
import matplotlib.pyplot as plt
res_obs = np.squeeze(res_obs[..., -self._residual_channel:])
fig, axs = plt.subplots(1, 6, figsize=(20, 6))
fig.suptitle("FrameDifference Plot")
axs[0].imshow(np.squeeze(self._buff[:, :, :self._residual_channel]))
axs[0].set_title("oldest frame")
axs[1].imshow(np.squeeze(self._buff[:, :, -self._residual_channel:]))
axs[1].set_title("newest frame")
axs[2].imshow(res_obs)
axs[2].set_title("frame diff")
axs[3].imshow(obs)
axs[3].set_title("newest obs")
axs[4].hist(res_obs.flatten())
axs[4].set_title("Frame difference histogram")
axs[5].hist(obs.flatten())
axs[5].set_title("Observation histogram")
print(obs.min())
print(obs.max())
print(res_obs.mean())
print(res_obs.std())
print()
plt.show()
class CumulativeRewardObs(gym.Wrapper):
"""Append cumulative reward to observation
"""
def __init__(self, env, obs_reward_scale):
super().__init__(env)
self._cumulative_reward = 0
self._reward_scale = obs_reward_scale
low = self.env.observation_space.low
high = self.env.observation_space.high
reward_channel_low = np.zeros((*low.shape[:-1], 1), dtype=np.float32)
reward_channel_high = np.ones((*high.shape[:-1], 1), dtype=np.float32) * np.inf
low = np.concatenate([low, reward_channel_low], axis=-1)
high = np.concatenate([high, reward_channel_high], axis=-1)
self.observation_space = gym.spaces.Box(low=low, high=high, dtype=low.dtype)
def _get_ob(self, ob):
reward_channel = np.ones((*ob.shape[:-1], 1), dtype=np.float32) \
* self._reward_scale * self._cumulative_reward
return np.concatenate([ob, reward_channel], axis=-1)
def reset(self):
ob = self.env.reset()
self._cumulative_reward = 0
return self._get_ob(ob)
def step(self, action):
ob, reward, done, info = self.env.step(action)
self._cumulative_reward += reward
return self._get_ob(ob), reward, done, info
class RewardHack(gym.Wrapper):
def __init__(self, env, reward_scale=1, reward_min=None, reward_max=None, **kwargs):
super().__init__(env)
self.reward_scale = reward_scale
self.reward_min = reward_min
self.reward_max = reward_max
def step(self, action, **kwargs):
obs, reward, done, info = self.env.step(action, **kwargs)
info['reward'] = reward
reward = reward * self.reward_scale
if self.reward_min is not None or self.reward_max is not None:
reward = np.clip(reward, self.reward_min, self.reward_max)
return obs, reward, done, info
class FrameStack(gym.Wrapper):
def __init__(self, env, k, np_obs):
super().__init__(env)
self.k = k
self.np_obs = np_obs
self.frames = collections.deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = gym.spaces.Box(low=0, high=255, shape=(shp[:-1] + (shp[-1] * k,)), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action, **kwargs):
ob, reward, done, info = self.env.step(action, **kwargs)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return np.concatenate(self.frames, axis=-1) \
if self.np_obs else LazyFrames(list(self.frames))
class DataProcess(gym.Wrapper):
""" Convert observation to np.float32 or np.float16 """
def __init__(self, env, precision=32):
super().__init__(env)
self.precision = precision
self.float_dtype = np.float32 if precision == 32 else np.float16
self.is_action_discrete = getattr(self.env, 'is_action_discrete',
isinstance(self.action_space, gym.spaces.Discrete))
if not self.is_action_discrete and precision == 16:
self.action_space = gym.spaces.Box(
self.action_space.low, self.action_space.high,
self.action_space.shape, self.float_dtype)
self.obs_shape = self.observation_space.shape
self.action_shape = self.action_space.shape
self.action_dim = self.action_space.n if self.is_action_discrete else self.action_shape[0]
self.obs_dtype = infer_dtype(self.observation_space.dtype, precision)
self.action_dtype = np.int32 if self.is_action_discrete \
else infer_dtype(self.action_space.dtype, self.precision)
def observation(self, observation):
if isinstance(observation, np.ndarray):
return convert_dtype(observation, self.precision)
elif isinstance(observation, dict):
for k, v in observation.items():
observation[k] = convert_dtype(v, self.precision)
return observation
# def action(self, action):
# if isinstance(action, np.ndarray):
# return convert_dtype(action, self.precision)
# return np.int32(action) # always keep int32 for integers as tf.one_hot does not support int16
def reset(self):
obs = self.env.reset()
return self.observation(obs)
def step(self, action, **kwargs):
obs, reward, done, info = self.env.step(action, **kwargs)
return self.observation(obs), reward, done, info
""" Subclasses of EnvStatsBase change the gym API:
Both <reset> and <step> return EnvOutput of form
(obs, reward, discount, reset), where
- obs is a dict regardless of the original form of obs
- reward is the reward from the original env
- discount=1-done is the discount factor
- reset indicates if the environment has been reset.
By default, EnvStats automatically
reset the environment when the environment is done.
Explicitly calling EnvStats.reset turns off auto-reset.
For some environments truncated by max episode steps,
we recommand to retrieve the last observation of an
episode using method "prev_obs"
We distinguish several signals:
done: an episode is done, may due to life loss(Atari)
game over: a game is over, may due to timeout. Life
loss in Atari is not game over. Do store <game_over>
in <info> for multi-agent environments.
reset: a new episode starts after done. In auto-reset
mode, environment resets when the game's over.
Life loss should be automatically handled by
the environment/previous wrapper.
"""
class EnvStatsBase(gym.Wrapper):
def __init__(self, env, max_episode_steps=None, timeout_done=False,
auto_reset=True):
""" Records environment statistics """
super().__init__(env)
if max_episode_steps is None:
if hasattr(self.env, 'max_episode_steps'):
max_episode_steps = self.env.max_episode_steps
elif hasattr(self.env, 'spec'):
max_episode_steps = self.env.spec.max_episode_steps
else:
max_episode_steps = int(1e9)
self.max_episode_steps = max_episode_steps
# if we take timeout as done
self.timeout_done = timeout_done
self.auto_reset = auto_reset
# game_over indicates whether an episode is finished,
# either due to timeout or due to environment done
self._game_over = True
self._score = 0
self._epslen = 0
self._info = {}
self._output = None
self.float_dtype = getattr(self.env, 'float_dtype', np.float32)
self._stats = AttrDict(
obs_shape=env.obs_shape,
obs_dtype=env.obs_dtype,
action_shape=env.action_shape,
action_dtype=env.action_dtype,
action_dim=env.action_dim,
is_action_discrete=env.is_action_discrete,
n_trainable_agents=getattr(env, 'n_trainable_agents', 1),
n_controllable_agents=getattr(env, 'n_trainable_agents', 1),
n_agents=getattr(env, 'n_agents', 1),
global_state_shape=getattr(env, 'global_state_shape', ()),
global_state_dtype=getattr(env, 'global_state_dtype', None),
use_life_mask=getattr(env, 'use_life_mask', False),
use_action_mask=getattr(env, 'use_action_mask', False),
)
if timeout_done:
do_logging('Timeout is treated as done', logger=logger)
self._reset()
def observation(self, obs):
if not isinstance(obs, dict):
obs = dict(obs=obs)
return obs
def stats(self):
return self._stats
def reset(self):
raise NotImplementedError
def _reset(self):
obs = self.env.reset()
if len(obs) == 1:
assert False, obs
self._score = 0
self._epslen = 0
self._game_over = False
return self.observation(obs)
def score(self, **kwargs):
return self._info.get('score', self._score)
def epslen(self, **kwargs):
return self._info.get('epslen', self._epslen)
def mask(self, **kwargs):
return self._info.get('mask', True)
def game_over(self):
return self._game_over
def prev_obs(self):
return self._info['prev_env_output'].obs
def info(self):
return self._info
def output(self):
return self._output
class EnvStats(EnvStatsBase):
manual_reset_warning = True
def reset(self):
if self.auto_reset:
self.auto_reset = False
if EnvStats.manual_reset_warning:
do_logging('Explicitly resetting turns off auto-reset. Maker sure this is done intentionally at evaluation', logger=logger)
EnvStats.manual_reset_warning = False
if not self._output.reset:
return self._reset()
else:
if EnvStats.manual_reset_warning:
logger.debug('Repetitively calling reset results in no environment interaction')
return self._output
def _reset(self):
obs = super()._reset()
reward = self.float_dtype(0)
discount = self.float_dtype(1)
reset = self.float_dtype(True)
self._output = EnvOutput(obs, reward, discount, reset)
return self._output
def step(self, action, **kwargs):
if self.game_over():
assert self.auto_reset == False
# step after the game is over
reward = self.float_dtype(0)
discount = self.float_dtype(0)
reset = self.float_dtype(0)
self._output = EnvOutput(self._output.obs, reward, discount, reset)
self._info['mask'] = False
return self._output
assert not np.any(np.isnan(action)), action
obs, reward, done, info = self.env.step(action, **kwargs)
if 'score' in info:
self._score = info['score']
else:
self._score += info.get('reward', reward)
if 'epslen' in info:
self._epslen = info['epslen']
else:
self._epslen += info.get('frame_skip', 1)
self._game_over = bool(info.get('game_over', done))
if self._epslen >= self.max_episode_steps:
self._game_over = True
done = self.timeout_done
info['timeout'] = True
reward = self.float_dtype(reward)
discount = self.float_dtype(1-done)
# we expect auto-reset environments, which artificially reset due to life loss,
# return reset in info when resetting
reset = self.float_dtype(info.get('reset', False))
# store previous env output for later retrieval
info['prev_env_output'] = GymOutput(obs, reward, discount)
assert isinstance(self._game_over, bool), self._game_over
# reset env
if self._game_over:
info['game_over'] = self._game_over
info['score'] = self._score
info['epslen'] = self._epslen
if self.auto_reset:
# when resetting, we override the obs and reset but keep the others
obs, _, _, reset = self._reset()
obs = self.observation(obs)
self._info = info
self._output = EnvOutput(obs, reward, discount, reset)
return self._output
class MASimEnvStats(EnvStatsBase):
""" Wrapper for multi-agent simutaneous environments
<MASimEnvStats> expects agent-wise reward and done signal per step.
Otherwise, go for <EnvStats>
"""
manual_reset_warning = True
def __init__(self,
env,
max_episode_steps=None,
timeout_done=False,
auto_reset=True):
super().__init__(
env,
max_episode_steps=max_episode_steps,
timeout_done=timeout_done,
auto_reset=auto_reset
)
self._stats.update({
'global_state_shape': self.global_state_shape,
'global_state_dtype': self.global_state_dtype,
'use_life_mask': self.use_life_mask,
'use_action_mask': self.use_action_mask,
})
def reset(self):
if self.auto_reset:
self.auto_reset = False
if EnvStats.manual_reset_warning:
do_logging('Explicitly resetting turns off auto-reset. Maker sure this is done intentionally at evaluation', logger=logger)
EnvStats.manual_reset_warning = False
if not np.any(self._output.reset):
return self._reset()
else:
logger.debug('Repetitively calling reset results in no environment interaction')
return self._output
def _reset(self):
obs = super()._reset()
if len(obs) == 1:
assert False, obs
reward = np.zeros(self.n_agents, self.float_dtype)
discount = np.ones(self.n_agents, self.float_dtype)
reset = np.ones(self.n_agents, self.float_dtype)
self._output = EnvOutput(obs, reward, discount, reset)
return self._output
def step(self, action, **kwargs):
if self.game_over():
assert self.auto_reset == False
# step after the game is over
reward = np.zeros_like(self._output.reward, self.float_dtype)
discount = np.zeros_like(self._output.discount, self.float_dtype)
reset = np.zeros_like(self._output.reset, self.float_dtype)
self._output = EnvOutput(self._output.obs, reward, discount, reset)
self._info['mask'] = np.zeros(self.n_agents, np.bool)
return self._output
# assert not np.any(np.isnan(action)), action
obs, reward, done, info = self.env.step(action, **kwargs)
# define score, epslen, and game_over in info as multi-agent environments may vary in metrics
self._score = info['score']
self._epslen = info['epslen']
self._game_over = info['game_over']
if self._epslen >= self.max_episode_steps:
self._game_over = True
if self.timeout_done:
done = np.ones_like(done)
info['timeout'] = True
discount = 1-np.array(done, self.float_dtype)
# store previous env output for later retrieval
info['prev_env_output'] = GymOutput(obs, reward, discount)
# reset env
if self._game_over and self.auto_reset:
# when resetting, we override the obs and reset but keep the others
obs, _, _, reset = self._reset()
else:
reset = np.zeros(self.n_agents, self.float_dtype)
obs = self.observation(obs)
self._info = info
self._output = EnvOutput(obs, reward, discount, reset)
# assert np.all(done) == info.get('game_over', False), (reset, info['game_over'])
# assert np.all(reset) == info.get('game_over', False), (reset, info['game_over'])
return self._output
def get_wrapper_by_name(env, classname):
currentenv = env
while True:
if classname == currentenv.__class__.__name__:
return currentenv
elif hasattr(currentenv, 'env'):
currentenv = currentenv.env
else:
# don't raise error here, only return None
return None
if __name__ == '__main__':
from env.func import create_env
env = create_env(dict(
name='smac_3s5z',
seed=0
))
for i in range(10000):
a = env.random_action()
out = env.step(a)
print(out[2:])
if np.all(out.reset):
info = env.info()
print(info['score'], info['epslen'])
| [
"utility.utils.convert_dtype",
"numpy.ones",
"numpy.clip",
"numpy.isnan",
"numpy.tile",
"utility.utils.infer_dtype",
"collections.deque",
"core.log.do_logging",
"numpy.zeros_like",
"cv2.cvtColor",
"numpy.isfinite",
"env.typing.GymOutput",
"matplotlib.pyplot.subplots",
"numpy.stack",
"mat... | [((259, 286), 'cv2.ocl.setUseOpenCL', 'cv2.ocl.setUseOpenCL', (['(False)'], {}), '(False)\n', (279, 286), False, 'import cv2\n'), ((296, 323), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (313, 323), False, 'import logging\n'), ((1874, 1934), 'numpy.zeros', 'np.zeros', (['((2,) + env.observation_space.shape)'], {'dtype': 'np.uint8'}), '((2,) + env.observation_space.shape, dtype=np.uint8)\n', (1882, 1934), True, 'import numpy as np\n'), ((3139, 3189), 'numpy.where', 'np.where', (['self._act_mask', 'env.action_space.low', '(-1)'], {}), '(self._act_mask, env.action_space.low, -1)\n', (3147, 3189), True, 'import numpy as np\n'), ((3211, 3261), 'numpy.where', 'np.where', (['self._act_mask', 'env.action_space.high', '(1)'], {}), '(self._act_mask, env.action_space.high, 1)\n', (3219, 3261), True, 'import numpy as np\n'), ((3444, 3487), 'gym.spaces.Box', 'gym.spaces.Box', (['low', 'high'], {'dtype': 'np.float32'}), '(low, high, dtype=np.float32)\n', (3458, 3487), False, 'import gym\n'), ((3621, 3663), 'numpy.where', 'np.where', (['self._act_mask', 'original', 'action'], {}), '(self._act_mask, original, action)\n', (3629, 3663), True, 'import numpy as np\n'), ((3884, 3974), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(*original_space.shape[:2], 1)', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=(*original_space.shape[:2], 1), dtype\n =np.uint8)\n', (3898, 3974), False, 'import gym\n'), ((4261, 4298), 'cv2.cvtColor', 'cv2.cvtColor', (['obs', 'cv2.COLOR_RGB2GRAY'], {}), '(obs, cv2.COLOR_RGB2GRAY)\n', (4273, 4298), False, 'import cv2\n'), ((4313, 4336), 'numpy.expand_dims', 'np.expand_dims', (['obs', '(-1)'], {}), '(obs, -1)\n', (4327, 4336), True, 'import numpy as np\n'), ((5590, 5683), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(w, h, c + self._residual_channel)', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=(w, h, c + self._residual_channel),\n dtype=np.uint8)\n', (5604, 5683), False, 'import gym\n'), ((5801, 5864), 'numpy.zeros', 'np.zeros', (['(w, h, self._residual_channel * (self._distance + 1))'], {}), '((w, h, self._residual_channel * (self._distance + 1)))\n', (5809, 5864), True, 'import numpy as np\n'), ((6268, 6321), 'numpy.roll', 'np.roll', (['self._buff', '(-self._residual_channel)'], {'axis': '(-1)'}), '(self._buff, -self._residual_channel, axis=-1)\n', (6275, 6321), True, 'import numpy as np\n'), ((6701, 6746), 'numpy.tile', 'np.tile', (['buff_obs', '[1, 1, self._distance + 1]'], {}), '(buff_obs, [1, 1, self._distance + 1])\n', (6708, 6746), True, 'import numpy as np\n'), ((7146, 7196), 'numpy.squeeze', 'np.squeeze', (['res_obs[..., -self._residual_channel:]'], {}), '(res_obs[..., -self._residual_channel:])\n', (7156, 7196), True, 'import numpy as np\n'), ((7216, 7251), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(6)'], {'figsize': '(20, 6)'}), '(1, 6, figsize=(20, 6))\n', (7228, 7251), True, 'import matplotlib.pyplot as plt\n'), ((7982, 7992), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7990, 7992), True, 'import matplotlib.pyplot as plt\n'), ((8371, 8419), 'numpy.zeros', 'np.zeros', (['(*low.shape[:-1], 1)'], {'dtype': 'np.float32'}), '((*low.shape[:-1], 1), dtype=np.float32)\n', (8379, 8419), True, 'import numpy as np\n'), ((8522, 8572), 'numpy.concatenate', 'np.concatenate', (['[low, reward_channel_low]'], {'axis': '(-1)'}), '([low, reward_channel_low], axis=-1)\n', (8536, 8572), True, 'import numpy as np\n'), ((8588, 8640), 'numpy.concatenate', 'np.concatenate', (['[high, reward_channel_high]'], {'axis': '(-1)'}), '([high, reward_channel_high], axis=-1)\n', (8602, 8640), True, 'import numpy as np\n'), ((8674, 8725), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': 'low', 'high': 'high', 'dtype': 'low.dtype'}), '(low=low, high=high, dtype=low.dtype)\n', (8688, 8725), False, 'import gym\n'), ((8902, 8947), 'numpy.concatenate', 'np.concatenate', (['[ob, reward_channel]'], {'axis': '(-1)'}), '([ob, reward_channel], axis=-1)\n', (8916, 8947), True, 'import numpy as np\n'), ((10048, 10079), 'collections.deque', 'collections.deque', (['[]'], {'maxlen': 'k'}), '([], maxlen=k)\n', (10065, 10079), False, 'import collections\n'), ((10155, 10259), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(shp[:-1] + (shp[-1] * k,))', 'dtype': 'env.observation_space.dtype'}), '(low=0, high=255, shape=shp[:-1] + (shp[-1] * k,), dtype=env.\n observation_space.dtype)\n', (10169, 10259), False, 'import gym\n'), ((11649, 11701), 'utility.utils.infer_dtype', 'infer_dtype', (['self.observation_space.dtype', 'precision'], {}), '(self.observation_space.dtype, precision)\n', (11660, 11701), False, 'from utility.utils import infer_dtype, convert_dtype\n'), ((17474, 17513), 'env.typing.EnvOutput', 'EnvOutput', (['obs', 'reward', 'discount', 'reset'], {}), '(obs, reward, discount, reset)\n', (17483, 17513), False, 'from env.typing import EnvOutput, GymOutput\n'), ((18954, 18986), 'env.typing.GymOutput', 'GymOutput', (['obs', 'reward', 'discount'], {}), '(obs, reward, discount)\n', (18963, 18986), False, 'from env.typing import EnvOutput, GymOutput\n'), ((19483, 19522), 'env.typing.EnvOutput', 'EnvOutput', (['obs', 'reward', 'discount', 'reset'], {}), '(obs, reward, discount, reset)\n', (19492, 19522), False, 'from env.typing import EnvOutput, GymOutput\n'), ((21058, 21099), 'numpy.zeros', 'np.zeros', (['self.n_agents', 'self.float_dtype'], {}), '(self.n_agents, self.float_dtype)\n', (21066, 21099), True, 'import numpy as np\n'), ((21119, 21159), 'numpy.ones', 'np.ones', (['self.n_agents', 'self.float_dtype'], {}), '(self.n_agents, self.float_dtype)\n', (21126, 21159), True, 'import numpy as np\n'), ((21176, 21216), 'numpy.ones', 'np.ones', (['self.n_agents', 'self.float_dtype'], {}), '(self.n_agents, self.float_dtype)\n', (21183, 21216), True, 'import numpy as np\n'), ((21240, 21279), 'env.typing.EnvOutput', 'EnvOutput', (['obs', 'reward', 'discount', 'reset'], {}), '(obs, reward, discount, reset)\n', (21249, 21279), False, 'from env.typing import EnvOutput, GymOutput\n'), ((22549, 22581), 'env.typing.GymOutput', 'GymOutput', (['obs', 'reward', 'discount'], {}), '(obs, reward, discount)\n', (22558, 22581), False, 'from env.typing import EnvOutput, GymOutput\n'), ((22938, 22977), 'env.typing.EnvOutput', 'EnvOutput', (['obs', 'reward', 'discount', 'reset'], {}), '(obs, reward, discount, reset)\n', (22947, 22977), False, 'from env.typing import EnvOutput, GymOutput\n'), ((23786, 23803), 'numpy.all', 'np.all', (['out.reset'], {}), '(out.reset)\n', (23792, 23803), True, 'import numpy as np\n'), ((1497, 1529), 'numpy.concatenate', 'np.concatenate', (['self._frames', '(-1)'], {}), '(self._frames, -1)\n', (1511, 1529), True, 'import numpy as np\n'), ((1562, 1588), 'numpy.stack', 'np.stack', (['self._frames', '(-1)'], {}), '(self._frames, -1)\n', (1570, 1588), True, 'import numpy as np\n'), ((3036, 3069), 'numpy.isfinite', 'np.isfinite', (['env.action_space.low'], {}), '(env.action_space.low)\n', (3047, 3069), True, 'import numpy as np\n'), ((3083, 3117), 'numpy.isfinite', 'np.isfinite', (['env.action_space.high'], {}), '(env.action_space.high)\n', (3094, 3117), True, 'import numpy as np\n'), ((3379, 3402), 'numpy.ones_like', 'np.ones_like', (['self._low'], {}), '(self._low)\n', (3391, 3402), True, 'import numpy as np\n'), ((6386, 6423), 'cv2.cvtColor', 'cv2.cvtColor', (['obs', 'cv2.COLOR_RGB2GRAY'], {}), '(obs, cv2.COLOR_RGB2GRAY)\n', (6398, 6423), False, 'import cv2\n'), ((7319, 7372), 'numpy.squeeze', 'np.squeeze', (['self._buff[:, :, :self._residual_channel]'], {}), '(self._buff[:, :, :self._residual_channel])\n', (7329, 7372), True, 'import numpy as np\n'), ((7437, 7491), 'numpy.squeeze', 'np.squeeze', (['self._buff[:, :, -self._residual_channel:]'], {}), '(self._buff[:, :, -self._residual_channel:])\n', (7447, 7491), True, 'import numpy as np\n'), ((8450, 8498), 'numpy.ones', 'np.ones', (['(*high.shape[:-1], 1)'], {'dtype': 'np.float32'}), '((*high.shape[:-1], 1), dtype=np.float32)\n', (8457, 8498), True, 'import numpy as np\n'), ((9786, 9835), 'numpy.clip', 'np.clip', (['reward', 'self.reward_min', 'self.reward_max'], {}), '(reward, self.reward_min, self.reward_max)\n', (9793, 9835), True, 'import numpy as np\n'), ((10672, 10708), 'numpy.concatenate', 'np.concatenate', (['self.frames'], {'axis': '(-1)'}), '(self.frames, axis=-1)\n', (10686, 10708), True, 'import numpy as np\n'), ((11279, 11388), 'gym.spaces.Box', 'gym.spaces.Box', (['self.action_space.low', 'self.action_space.high', 'self.action_space.shape', 'self.float_dtype'], {}), '(self.action_space.low, self.action_space.high, self.\n action_space.shape, self.float_dtype)\n', (11293, 11388), False, 'import gym\n'), ((11785, 11837), 'utility.utils.infer_dtype', 'infer_dtype', (['self.action_space.dtype', 'self.precision'], {}), '(self.action_space.dtype, self.precision)\n', (11796, 11837), False, 'from utility.utils import infer_dtype, convert_dtype\n'), ((11946, 11988), 'utility.utils.convert_dtype', 'convert_dtype', (['observation', 'self.precision'], {}), '(observation, self.precision)\n', (11959, 11988), False, 'from utility.utils import infer_dtype, convert_dtype\n'), ((15616, 15671), 'core.log.do_logging', 'do_logging', (['"""Timeout is treated as done"""'], {'logger': 'logger'}), "('Timeout is treated as done', logger=logger)\n", (15626, 15671), False, 'from core.log import do_logging\n'), ((17848, 17900), 'env.typing.EnvOutput', 'EnvOutput', (['self._output.obs', 'reward', 'discount', 'reset'], {}), '(self._output.obs, reward, discount, reset)\n', (17857, 17900), False, 'from env.typing import EnvOutput, GymOutput\n'), ((20731, 20757), 'numpy.any', 'np.any', (['self._output.reset'], {}), '(self._output.reset)\n', (20737, 20757), True, 'import numpy as np\n'), ((21484, 21536), 'numpy.zeros_like', 'np.zeros_like', (['self._output.reward', 'self.float_dtype'], {}), '(self._output.reward, self.float_dtype)\n', (21497, 21536), True, 'import numpy as np\n'), ((21560, 21614), 'numpy.zeros_like', 'np.zeros_like', (['self._output.discount', 'self.float_dtype'], {}), '(self._output.discount, self.float_dtype)\n', (21573, 21614), True, 'import numpy as np\n'), ((21635, 21686), 'numpy.zeros_like', 'np.zeros_like', (['self._output.reset', 'self.float_dtype'], {}), '(self._output.reset, self.float_dtype)\n', (21648, 21686), True, 'import numpy as np\n'), ((21714, 21766), 'env.typing.EnvOutput', 'EnvOutput', (['self._output.obs', 'reward', 'discount', 'reset'], {}), '(self._output.obs, reward, discount, reset)\n', (21723, 21766), False, 'from env.typing import EnvOutput, GymOutput\n'), ((21800, 21832), 'numpy.zeros', 'np.zeros', (['self.n_agents', 'np.bool'], {}), '(self.n_agents, np.bool)\n', (21808, 21832), True, 'import numpy as np\n'), ((22425, 22457), 'numpy.array', 'np.array', (['done', 'self.float_dtype'], {}), '(done, self.float_dtype)\n', (22433, 22457), True, 'import numpy as np\n'), ((22810, 22851), 'numpy.zeros', 'np.zeros', (['self.n_agents', 'self.float_dtype'], {}), '(self.n_agents, self.float_dtype)\n', (22818, 22851), True, 'import numpy as np\n'), ((3303, 3326), 'numpy.ones_like', 'np.ones_like', (['self._low'], {}), '(self._low)\n', (3315, 3326), True, 'import numpy as np\n'), ((6594, 6631), 'cv2.cvtColor', 'cv2.cvtColor', (['obs', 'cv2.COLOR_RGB2GRAY'], {}), '(obs, cv2.COLOR_RGB2GRAY)\n', (6606, 6631), False, 'import cv2\n'), ((8779, 8825), 'numpy.ones', 'np.ones', (['(*ob.shape[:-1], 1)'], {'dtype': 'np.float32'}), '((*ob.shape[:-1], 1), dtype=np.float32)\n', (8786, 8825), True, 'import numpy as np\n'), ((16847, 16980), 'core.log.do_logging', 'do_logging', (['"""Explicitly resetting turns off auto-reset. Maker sure this is done intentionally at evaluation"""'], {'logger': 'logger'}), "(\n 'Explicitly resetting turns off auto-reset. Maker sure this is done intentionally at evaluation'\n , logger=logger)\n", (16857, 16980), False, 'from core.log import do_logging\n'), ((17999, 18015), 'numpy.isnan', 'np.isnan', (['action'], {}), '(action)\n', (18007, 18015), True, 'import numpy as np\n'), ((20538, 20671), 'core.log.do_logging', 'do_logging', (['"""Explicitly resetting turns off auto-reset. Maker sure this is done intentionally at evaluation"""'], {'logger': 'logger'}), "(\n 'Explicitly resetting turns off auto-reset. Maker sure this is done intentionally at evaluation'\n , logger=logger)\n", (20548, 20671), False, 'from core.log import do_logging\n'), ((22350, 22368), 'numpy.ones_like', 'np.ones_like', (['done'], {}), '(done)\n', (22362, 22368), True, 'import numpy as np\n'), ((12111, 12143), 'utility.utils.convert_dtype', 'convert_dtype', (['v', 'self.precision'], {}), '(v, self.precision)\n', (12124, 12143), False, 'from utility.utils import infer_dtype, convert_dtype\n')] |
import numpy as np
import numba as nb
import mcmc.util as util
import mcmc.util_2D as u2
import mcmc.fourier as fourier
fourier_type = nb.deferred_type()
fourier_type.define(fourier.FourierAnalysis.class_type.instance_type)
spec = [
('fourier',fourier_type),
('sqrt_beta',nb.float64),
('current_L',nb.complex128[:,::1]),
('latest_computed_L',nb.complex128[:,::1]),
]
@nb.jitclass(spec)
class Lmatrix():
def __init__(self,f,sqrt_beta):
self.fourier = f
self.sqrt_beta = sqrt_beta
#initialize self.lastComputedL as zero
self.current_L = np.zeros((2*self.fourier.basis_number-1,2*self.fourier.basis_number-1),dtype=np.complex128)
self.latest_computed_L = self.current_L
def construct_from(self,uHalf):
assert uHalf.shape[0] == self.fourier.basis_number
Ku_pow_min_nu = self.fourier.constructMatexplicit(uHalf,util.kappa_pow_min_nu)
Ku_pow_half = self.fourier.constructMatexplicit(uHalf,util.kappa_pow_half)
L = ( util.matMulti(Ku_pow_min_nu,self.fourier.Dmatrix) - Ku_pow_half)/self.sqrt_beta
#set LatestComputedL as L, but dont change currentL
self.latest_computed_L = L
return L
def construct_from_with_sqrt_beta(self,uHalf,sqrt_beta):
assert uHalf.shape[0] == self.fourier.basis_number
Ku_pow_min_nu = self.fourier.constructMatexplicit(uHalf,util.kappa_pow_min_nu)
Ku_pow_half = self.fourier.constructMatexplicit(uHalf,util.kappa_pow_half)
L = ( util.matMulti(Ku_pow_min_nu,self.fourier.Dmatrix) - Ku_pow_half)/sqrt_beta
#set LatestComputedL as L, but dont change currentL
self.latest_computed_L = L
return L
def logDet(self,new):
"""
# The determinant of a Hermitian matrix is real;the determinant is the product of the matrix's eigenvalues
# L^dagger L is Hermitian
"""
if new:
return (np.linalg.slogdet(self.latest_computed_L)[1])
else:
return (np.linalg.slogdet(self.current_L)[1])
def set_current_L_to_latest(self):
self.current_L = self.latest_computed_L.copy()
def is_current_L_equals_to_the_latest(self):
return np.all(self.current_L == self.latest_computed_L)
# fourier_type = nb.deferred_type()
# fourier_type.define(fourier.FourierAnalysis_2D.class_type.instance_type)
# spec = [
# ('fourier',fourier_type),
# ('sqrt_beta',nb.float64),
# ('current_L',nb.complex128[:,::1]),
# ('latest_computed_L',nb.complex128[:,::1]),
# ]
# @nb.jitclass(spec)
# class Lmatrix_2D:
# def __init__(self,f,sqrt_beta):
# self.fourier = f
# self.sqrt_beta = sqrt_beta
# #initialize self.lastComputedL as zero
# self.current_L = np.zeros((self.fourier.basis_number_2D_sym,self.fourier.basis_number_2D_sym),dtype=np.complex128)
# self.latest_computed_L = self.current_L
# def construct_from(self,uHalf):
# assert uHalf.shape[0] == self.fourier.basis_number
# Ku_pow_min_nu = self.fourier.constructMatexplicit(uHalf,u2.kappa_pow_min_nu)
# Ku_pow_half = self.fourier.constructMatexplicit(uHalf,u2.kappa_pow_half)
# L = ( util.matMulti(Ku_pow_min_nu,self.fourier.Dmatrix) - Ku_pow_half)/self.sqrt_beta
# #set LatestComputedL as L, but dont change currentL
# self.latest_computed_L = L
# return L
# def logDet(self,new):
# """
# # The determinant of a Hermitian matrix is real;the determinant is the product of the matrix's eigenvalues
# # L^dagger L is Hermitian
# """
# if new:
# return (np.linalg.slogdet(self.latest_computed_L)[1])
# else:
# return (np.linalg.slogdet(self.current_L)[1])
# def set_current_L_to_latest(self):
# self.current_L = self.latest_computed_L
# def is_current_L_equals_to_the_latest(self):
# return np.all(self.current_L == self.latest_computed_L)
| [
"mcmc.util.matMulti",
"numba.jitclass",
"numpy.zeros",
"numba.deferred_type",
"numpy.linalg.slogdet",
"numpy.all"
] | [((136, 154), 'numba.deferred_type', 'nb.deferred_type', ([], {}), '()\n', (152, 154), True, 'import numba as nb\n'), ((388, 405), 'numba.jitclass', 'nb.jitclass', (['spec'], {}), '(spec)\n', (399, 405), True, 'import numba as nb\n'), ((592, 697), 'numpy.zeros', 'np.zeros', (['(2 * self.fourier.basis_number - 1, 2 * self.fourier.basis_number - 1)'], {'dtype': 'np.complex128'}), '((2 * self.fourier.basis_number - 1, 2 * self.fourier.basis_number -\n 1), dtype=np.complex128)\n', (600, 697), True, 'import numpy as np\n'), ((2255, 2303), 'numpy.all', 'np.all', (['(self.current_L == self.latest_computed_L)'], {}), '(self.current_L == self.latest_computed_L)\n', (2261, 2303), True, 'import numpy as np\n'), ((1021, 1071), 'mcmc.util.matMulti', 'util.matMulti', (['Ku_pow_min_nu', 'self.fourier.Dmatrix'], {}), '(Ku_pow_min_nu, self.fourier.Dmatrix)\n', (1034, 1071), True, 'import mcmc.util as util\n'), ((1527, 1577), 'mcmc.util.matMulti', 'util.matMulti', (['Ku_pow_min_nu', 'self.fourier.Dmatrix'], {}), '(Ku_pow_min_nu, self.fourier.Dmatrix)\n', (1540, 1577), True, 'import mcmc.util as util\n'), ((1959, 2000), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['self.latest_computed_L'], {}), '(self.latest_computed_L)\n', (1976, 2000), True, 'import numpy as np\n'), ((2039, 2072), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['self.current_L'], {}), '(self.current_L)\n', (2056, 2072), True, 'import numpy as np\n')] |
# Copyright (c) Jack.Wang. All rights reserved.
import os
import cv2
import numpy as np
from loguru import logger
from argparse import ArgumentParser
import torch
from mmcls.apis import init_model
from mmcv.parallel import collate, scatter
from mmcls.datasets.pipelines import Compose
from tools.custom_tools.utils import mkdir
def get_results(model, inputs, CLASSES) -> dict:
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
if isinstance(inputs, str):
if cfg.data.test.pipeline[0]['type'] != 'LoadImageFromFile':
cfg.data.test.pipeline.insert(0, dict(type='LoadImageFromFile'))
data = dict(img_info=dict(filename=inputs), img_prefix=None)
else:
if cfg.data.test.pipeline[0]['type'] == 'LoadImageFromFile':
cfg.data.test.pipeline.pop(0)
data = dict(img=inputs)
test_pipeline = Compose(cfg.data.test.pipeline)
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
with torch.no_grad():
scores = model(return_loss=False, **data)
topk_pred_score, topk_pred_index, topk_pred_class, cnt = [], [], [], 0
for c in range(len(CLASSES)):
score = scores[0][cnt: cnt+len(CLASSES[c])]
pred_score = np.max(score)
pred_index = np.argmax(score) + cnt
pred_label = model.CLASSES[pred_index] if pred_score > 0.5 else 'others'
topk_pred_score.append(pred_score)
topk_pred_index.append(pred_index)
topk_pred_class.append(pred_label)
cnt += len(CLASSES[c])
results = {
'topk_pred_score': topk_pred_score,
'topk_pred_index': topk_pred_index,
'topk_pred_class': topk_pred_class,
}
logger.info(f"Inference results for {inputs} as follow: \n{results}")
return results
def save_image(inputs, output, results, ATTRIBUTES, is_save=False):
img = cv2.imread(inputs) if isinstance(inputs, str) else inputs
for i in range(len(ATTRIBUTES)):
label = "{}: {}={:.2f}%".format(
ATTRIBUTES[i],
results['topk_pred_class'][i],
results['topk_pred_score'][i] * 100,
)
cv2.putText(
img, label, (10, (i * 30) + 25),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2
)
if is_save:
dst = os.path.join(output, os.path.split(inputs)[-1])
cv2.imwrite(dst, img)
logger.info(f'Successful saved to {dst}')
def save_video(model, inputs, output, CLASSES, ATTRIBUTES):
union_sizes = (224, 224)
total_frame = 1
dst = os.path.join(output, os.path.split(inputs)[-1]+'.mp4')
video_writer = cv2.VideoWriter(
dst,
cv2.VideoWriter_fourcc(*'mp4v'),
total_frame,
union_sizes,
)
img_array = []
for filename in os.listdir(inputs):
cur_file = os.path.join(inputs, filename)
img = cv2.imread(cur_file)
img = cv2.resize(img, union_sizes)
results = get_results(model, cur_file, CLASSES)
save_image(img, output, results, ATTRIBUTES, False)
img_array.append(img)
video_writer.write(img)
for i in range(len(img_array)):
video_writer.write(img_array[i])
video_writer.release()
logger.info(f'Successful saved to {dst}')
def main():
parser = ArgumentParser()
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument('src', default=None, help='Image input path')
parser.add_argument('dst', default=False, help='Debug')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
args = parser.parse_args()
ATTRIBUTES = ['types', 'colors']
CLASSES = [
['car', 'suv', 'truck', 'van'],
['black', 'blue', 'coffee', 'gray', 'green', 'orange', 'red', 'white', 'yellow']
]
mkdir(args.dst, is_remove=False)
# build the model from a config file and a checkpoint file.
model = init_model(args.config, args.checkpoint, args.device)
# save image
if os.path.isfile(args.src):
# inference image and obtain results.
results = get_results(model, args.src, CLASSES)
# plot result on image and saved.
save_image(args.src, args.dst, results, ATTRIBUTES, True)
elif os.path.isdir(args.src):
save_video(model, args.src, args.dst, CLASSES, ATTRIBUTES)
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"cv2.VideoWriter_fourcc",
"numpy.argmax",
"tools.custom_tools.utils.mkdir",
"os.path.isfile",
"mmcls.apis.init_model",
"torch.no_grad",
"os.path.join",
"cv2.imwrite",
"mmcv.parallel.scatter",
"numpy.max",
"cv2.resize",
"mmcls.datasets.pipelines.Compose",
"mmcv.pa... | [((912, 943), 'mmcls.datasets.pipelines.Compose', 'Compose', (['cfg.data.test.pipeline'], {}), '(cfg.data.test.pipeline)\n', (919, 943), False, 'from mmcls.datasets.pipelines import Compose\n'), ((986, 1020), 'mmcv.parallel.collate', 'collate', (['[data]'], {'samples_per_gpu': '(1)'}), '([data], samples_per_gpu=1)\n', (993, 1020), False, 'from mmcv.parallel import collate, scatter\n'), ((1896, 1968), 'loguru.logger.info', 'logger.info', (['f"""Inference results for {inputs} as follow: \n{results}"""'], {}), '(f"""Inference results for {inputs} as follow: \n{results}""")\n', (1907, 1968), False, 'from loguru import logger\n'), ((2984, 3002), 'os.listdir', 'os.listdir', (['inputs'], {}), '(inputs)\n', (2994, 3002), False, 'import os\n'), ((3418, 3459), 'loguru.logger.info', 'logger.info', (['f"""Successful saved to {dst}"""'], {}), "(f'Successful saved to {dst}')\n", (3429, 3459), False, 'from loguru import logger\n'), ((3487, 3503), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (3501, 3503), False, 'from argparse import ArgumentParser\n'), ((4072, 4104), 'tools.custom_tools.utils.mkdir', 'mkdir', (['args.dst'], {'is_remove': '(False)'}), '(args.dst, is_remove=False)\n', (4077, 4104), False, 'from tools.custom_tools.utils import mkdir\n'), ((4182, 4235), 'mmcls.apis.init_model', 'init_model', (['args.config', 'args.checkpoint', 'args.device'], {}), '(args.config, args.checkpoint, args.device)\n', (4192, 4235), False, 'from mmcls.apis import init_model\n'), ((4261, 4285), 'os.path.isfile', 'os.path.isfile', (['args.src'], {}), '(args.src)\n', (4275, 4285), False, 'import os\n'), ((1149, 1164), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1162, 1164), False, 'import torch\n'), ((2066, 2084), 'cv2.imread', 'cv2.imread', (['inputs'], {}), '(inputs)\n', (2076, 2084), False, 'import cv2\n'), ((2341, 2437), 'cv2.putText', 'cv2.putText', (['img', 'label', '(10, i * 30 + 25)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 255, 255)', '(2)'], {}), '(img, label, (10, i * 30 + 25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (\n 0, 255, 255), 2)\n', (2352, 2437), False, 'import cv2\n'), ((2556, 2577), 'cv2.imwrite', 'cv2.imwrite', (['dst', 'img'], {}), '(dst, img)\n', (2567, 2577), False, 'import cv2\n'), ((2586, 2627), 'loguru.logger.info', 'logger.info', (['f"""Successful saved to {dst}"""'], {}), "(f'Successful saved to {dst}')\n", (2597, 2627), False, 'from loguru import logger\n'), ((2862, 2893), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'mp4v'"], {}), "(*'mp4v')\n", (2884, 2893), False, 'import cv2\n'), ((3023, 3053), 'os.path.join', 'os.path.join', (['inputs', 'filename'], {}), '(inputs, filename)\n', (3035, 3053), False, 'import os\n'), ((3068, 3088), 'cv2.imread', 'cv2.imread', (['cur_file'], {}), '(cur_file)\n', (3078, 3088), False, 'import cv2\n'), ((3103, 3131), 'cv2.resize', 'cv2.resize', (['img', 'union_sizes'], {}), '(img, union_sizes)\n', (3113, 3131), False, 'import cv2\n'), ((4506, 4529), 'os.path.isdir', 'os.path.isdir', (['args.src'], {}), '(args.src)\n', (4519, 4529), False, 'import os\n'), ((1112, 1135), 'mmcv.parallel.scatter', 'scatter', (['data', '[device]'], {}), '(data, [device])\n', (1119, 1135), False, 'from mmcv.parallel import collate, scatter\n'), ((1415, 1428), 'numpy.max', 'np.max', (['score'], {}), '(score)\n', (1421, 1428), True, 'import numpy as np\n'), ((1454, 1470), 'numpy.argmax', 'np.argmax', (['score'], {}), '(score)\n', (1463, 1470), True, 'import numpy as np\n'), ((2521, 2542), 'os.path.split', 'os.path.split', (['inputs'], {}), '(inputs)\n', (2534, 2542), False, 'import os\n'), ((2770, 2791), 'os.path.split', 'os.path.split', (['inputs'], {}), '(inputs)\n', (2783, 2791), False, 'import os\n')] |
"""This script transforms MNIST dataset provided
available at http://yann.lecun.com/exdb/mnist/
into a pickled version optimised for the neural
network."""
import numpy as np
import pickle
from Dataset import OriginalMNISTDataset, OptimizedDataset
def generateOptimizedDataSet():
origin = OriginalMNISTDataset()
print("Reshaping samples...")
shape = origin.trainX.shape
origin.trainX = origin.trainX.reshape(shape[0], shape[1] * shape[2])
shape = origin.testX.shape
origin.testX = origin.testX.reshape(shape[0], shape[1] * shape[2])
print("Samples reshaped.")
print()
print("Splitting dataset into train/valid/tests, divide pixel values by 255...")
ratio = 0.96 # percentageOfTrainingSetForValidation
train = [()] * int(len(origin.trainX)*ratio)
valid = [()] * int(len(origin.trainX)*(1-ratio))
tests = [()] * len(origin.testX)
for i in range(len(train)):
train[i] = (np.array([origin.trainX[i]]).T.astype(dtype="double") / 255, labelAsArray(origin.trainY[i]), origin.trainY[i])
for j in range(len(train), len(train) + len(valid)):
i = j - len(train)
valid[i] = (np.array([origin.trainX[j]]).T.astype(dtype="double") / 255, labelAsArray(origin.trainY[j]), origin.trainY[j])
for i in range(len(tests)):
tests[i] = (np.array([origin.testX[i]]).T.astype(dtype="double") / 255, labelAsArray(origin.testY[i]), origin.testY[i])
print("Done:")
print("\ttraining set:", len(train), "samples")
print("\tvalidation set:", len(valid), "samples")
print("\ttesting set:", len(tests), "samples")
print("\nSaving entire dataset...")
optim = OptimizedDataset()
optim.train = train
optim.valid = valid
optim.tests = tests
f_write = open('../data/pickledMNIST/data.pkl', 'bw')
pickle.dump(optim, f_write, protocol=4, fix_imports=False)
f_write.close()
print("Done")
print("\nSaving smaller dataset for debug...")
optim = OptimizedDataset()
optim.train = train[:180]
optim.valid = valid[:20]
optim.tests = tests
f_write = open('../data/pickledSmallMNIST/data.pkl', 'bw')
pickle.dump(optim, f_write, protocol=4, fix_imports=False)
f_write.close()
print("Done")
print("\nSaving medium dataset for debug...")
optim = OptimizedDataset()
optim.train = train[:1800]
optim.valid = valid[:200]
optim.tests = tests
f_write = open('../data/pickledMediumMNIST/data.pkl', 'bw')
pickle.dump(optim, f_write, protocol=4, fix_imports=False)
f_write.close()
print("Done")
print("\nSample of the dataset:\n", train[4])
def labelAsArray(label):
array = np.zeros((10, 1))
array[label] = 1
return array
if __name__ == '__main__':
generateOptimizedDataSet()
| [
"pickle.dump",
"Dataset.OptimizedDataset",
"numpy.zeros",
"Dataset.OriginalMNISTDataset",
"numpy.array"
] | [((296, 318), 'Dataset.OriginalMNISTDataset', 'OriginalMNISTDataset', ([], {}), '()\n', (316, 318), False, 'from Dataset import OriginalMNISTDataset, OptimizedDataset\n'), ((1656, 1674), 'Dataset.OptimizedDataset', 'OptimizedDataset', ([], {}), '()\n', (1672, 1674), False, 'from Dataset import OriginalMNISTDataset, OptimizedDataset\n'), ((1810, 1868), 'pickle.dump', 'pickle.dump', (['optim', 'f_write'], {'protocol': '(4)', 'fix_imports': '(False)'}), '(optim, f_write, protocol=4, fix_imports=False)\n', (1821, 1868), False, 'import pickle\n'), ((1971, 1989), 'Dataset.OptimizedDataset', 'OptimizedDataset', ([], {}), '()\n', (1987, 1989), False, 'from Dataset import OriginalMNISTDataset, OptimizedDataset\n'), ((2140, 2198), 'pickle.dump', 'pickle.dump', (['optim', 'f_write'], {'protocol': '(4)', 'fix_imports': '(False)'}), '(optim, f_write, protocol=4, fix_imports=False)\n', (2151, 2198), False, 'import pickle\n'), ((2300, 2318), 'Dataset.OptimizedDataset', 'OptimizedDataset', ([], {}), '()\n', (2316, 2318), False, 'from Dataset import OriginalMNISTDataset, OptimizedDataset\n'), ((2472, 2530), 'pickle.dump', 'pickle.dump', (['optim', 'f_write'], {'protocol': '(4)', 'fix_imports': '(False)'}), '(optim, f_write, protocol=4, fix_imports=False)\n', (2483, 2530), False, 'import pickle\n'), ((2659, 2676), 'numpy.zeros', 'np.zeros', (['(10, 1)'], {}), '((10, 1))\n', (2667, 2676), True, 'import numpy as np\n'), ((939, 967), 'numpy.array', 'np.array', (['[origin.trainX[i]]'], {}), '([origin.trainX[i]])\n', (947, 967), True, 'import numpy as np\n'), ((1155, 1183), 'numpy.array', 'np.array', (['[origin.trainX[j]]'], {}), '([origin.trainX[j]])\n', (1163, 1183), True, 'import numpy as np\n'), ((1319, 1346), 'numpy.array', 'np.array', (['[origin.testX[i]]'], {}), '([origin.testX[i]])\n', (1327, 1346), True, 'import numpy as np\n')] |
import starry
import numpy as np
import matplotlib.pyplot as plt
import time
import os
from tqdm import tqdm
starry.config.quiet = True
ntimes = 100
alpha = 0.05
def timeit(ydeg=10, nt=10, nw=100, vsini=50000.0):
wav = np.linspace(642.85, 643.15, nw)
wav0 = np.linspace(642.00, 644.00, nw)
map = starry.DopplerMap(
ydeg=ydeg, nt=nt, wav=wav, wav0=wav0, lazy=False, vsini_max=vsini
)
map.spectrum = np.random.random(map.nw0)
map[:, :] = np.random.randn(map.Ny)
flux = map.flux()
times = np.zeros(ntimes)
for n in range(ntimes):
start = time.time()
flux = map.flux()
times[n] = time.time() - start
return times * 1e3
# Set up
fig, ax = plt.subplots(2, 2, sharey=True, figsize=(8, 4))
fig.subplots_adjust(hspace=0.4, wspace=0.1)
ax[0, 0].set_ylim(0, 25)
# Versus spherical harmonic degree
ydegs = np.arange(1, 21)
times = np.zeros((len(ydegs), ntimes))
for n, ydeg in tqdm(
enumerate(ydegs),
total=len(ydegs),
disable=os.getenv("CI", "false") == "true",
):
times[n] = timeit(ydeg=ydeg)
ax[0, 0].plot(ydegs, times, "C0-", lw=1, alpha=alpha)
ax[0, 0].plot(ydegs, np.median(times, axis=1), "C0-", lw=2)
ax[0, 0].set_xlabel("spherical harmonic degree", fontsize=12)
ax[0, 0].set_xticks([0, 5, 10, 15, 20])
ax[0, 0].set_xlim(0, 20)
ax[0, 0].set_yticks([0, 5, 10, 15, 20, 25])
# Versus number of epochs
nts = np.array(np.linspace(1, 51, 20), dtype=int)
times = np.zeros((len(nts), ntimes))
for n, nt in tqdm(
enumerate(nts),
total=len(nts),
disable=os.getenv("CI", "false") == "true",
):
times[n] = timeit(nt=nt)
ax[0, 1].plot(nts, times, "C0-", lw=1, alpha=alpha)
ax[0, 1].plot(nts, np.median(times, axis=1), "C0-", lw=2)
ax[0, 1].set_xlabel("number of epochs", fontsize=12)
ax[0, 1].set_xticks([0, 10, 20, 30, 40, 50])
ax[0, 1].set_xlim(0, 50)
# Versus number of wavelength bins
nws = np.array(np.logspace(1, 3, 20), dtype=int)
times = np.zeros((len(nws), ntimes))
for n, nw in tqdm(
enumerate(nws),
total=len(nws),
disable=os.getenv("CI", "false") == "true",
):
times[n] = timeit(nw=nw)
ax[1, 0].plot(nws, times, "C0-", lw=1, alpha=alpha)
ax[1, 0].plot(nws, np.median(times, axis=1), "C0-", lw=2)
ax[1, 0].set_xlabel("number of wavelength bins", fontsize=12)
ax[1, 0].set_xticks([0, 250, 500, 750, 1000])
ax[1, 0].set_xlim(0, 1000)
# Versus vsini
vsinis = np.linspace(1, 100, 20) * 1000
times = np.zeros((len(vsinis), ntimes))
for n, vsini in tqdm(
enumerate(vsinis),
total=len(vsinis),
disable=os.getenv("CI", "false") == "true",
):
times[n] = timeit(vsini=vsini)
ax[1, 1].plot(vsinis / 1000, times, "C0-", lw=1, alpha=alpha)
ax[1, 1].plot(vsinis / 1000, np.median(times, axis=1), "C0-", lw=2)
ax[1, 1].set_xlabel(r"$v\sin i$ [km/s]", fontsize=12)
ax[1, 1].set_xticks([0, 25, 50, 75, 100])
ax[1, 1].set_xlim(0, 100)
# Tweak appearance
for axis in ax.flatten():
for tick in axis.get_xticklabels() + axis.get_yticklabels():
tick.set_fontsize(10)
axl = fig.add_subplot(111)
axl.set_zorder(ax[0, 0].zorder - 1)
axl.spines["top"].set_color("none")
axl.spines["bottom"].set_color("none")
axl.spines["left"].set_color("none")
axl.spines["right"].set_color("none")
axl.set_yticks([])
axl.set_xticks([])
axl.set_ylabel("model evaluation time [ms]", labelpad=35, fontsize=14)
# Save
fig.savefig("runtime.pdf", bbox_inches="tight") | [
"numpy.random.randn",
"numpy.median",
"numpy.logspace",
"numpy.zeros",
"time.time",
"numpy.random.random",
"numpy.arange",
"starry.DopplerMap",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"os.getenv"
] | [((712, 759), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'sharey': '(True)', 'figsize': '(8, 4)'}), '(2, 2, sharey=True, figsize=(8, 4))\n', (724, 759), True, 'import matplotlib.pyplot as plt\n'), ((874, 890), 'numpy.arange', 'np.arange', (['(1)', '(21)'], {}), '(1, 21)\n', (883, 890), True, 'import numpy as np\n'), ((227, 258), 'numpy.linspace', 'np.linspace', (['(642.85)', '(643.15)', 'nw'], {}), '(642.85, 643.15, nw)\n', (238, 258), True, 'import numpy as np\n'), ((270, 299), 'numpy.linspace', 'np.linspace', (['(642.0)', '(644.0)', 'nw'], {}), '(642.0, 644.0, nw)\n', (281, 299), True, 'import numpy as np\n'), ((312, 400), 'starry.DopplerMap', 'starry.DopplerMap', ([], {'ydeg': 'ydeg', 'nt': 'nt', 'wav': 'wav', 'wav0': 'wav0', 'lazy': '(False)', 'vsini_max': 'vsini'}), '(ydeg=ydeg, nt=nt, wav=wav, wav0=wav0, lazy=False,\n vsini_max=vsini)\n', (329, 400), False, 'import starry\n'), ((430, 455), 'numpy.random.random', 'np.random.random', (['map.nw0'], {}), '(map.nw0)\n', (446, 455), True, 'import numpy as np\n'), ((472, 495), 'numpy.random.randn', 'np.random.randn', (['map.Ny'], {}), '(map.Ny)\n', (487, 495), True, 'import numpy as np\n'), ((530, 546), 'numpy.zeros', 'np.zeros', (['ntimes'], {}), '(ntimes)\n', (538, 546), True, 'import numpy as np\n'), ((1154, 1178), 'numpy.median', 'np.median', (['times'], {'axis': '(1)'}), '(times, axis=1)\n', (1163, 1178), True, 'import numpy as np\n'), ((1406, 1428), 'numpy.linspace', 'np.linspace', (['(1)', '(51)', '(20)'], {}), '(1, 51, 20)\n', (1417, 1428), True, 'import numpy as np\n'), ((1688, 1712), 'numpy.median', 'np.median', (['times'], {'axis': '(1)'}), '(times, axis=1)\n', (1697, 1712), True, 'import numpy as np\n'), ((1902, 1923), 'numpy.logspace', 'np.logspace', (['(1)', '(3)', '(20)'], {}), '(1, 3, 20)\n', (1913, 1923), True, 'import numpy as np\n'), ((2183, 2207), 'numpy.median', 'np.median', (['times'], {'axis': '(1)'}), '(times, axis=1)\n', (2192, 2207), True, 'import numpy as np\n'), ((2383, 2406), 'numpy.linspace', 'np.linspace', (['(1)', '(100)', '(20)'], {}), '(1, 100, 20)\n', (2394, 2406), True, 'import numpy as np\n'), ((2699, 2723), 'numpy.median', 'np.median', (['times'], {'axis': '(1)'}), '(times, axis=1)\n', (2708, 2723), True, 'import numpy as np\n'), ((591, 602), 'time.time', 'time.time', ([], {}), '()\n', (600, 602), False, 'import time\n'), ((648, 659), 'time.time', 'time.time', ([], {}), '()\n', (657, 659), False, 'import time\n'), ((1007, 1031), 'os.getenv', 'os.getenv', (['"""CI"""', '"""false"""'], {}), "('CI', 'false')\n", (1016, 1031), False, 'import os\n'), ((1549, 1573), 'os.getenv', 'os.getenv', (['"""CI"""', '"""false"""'], {}), "('CI', 'false')\n", (1558, 1573), False, 'import os\n'), ((2044, 2068), 'os.getenv', 'os.getenv', (['"""CI"""', '"""false"""'], {}), "('CI', 'false')\n", (2053, 2068), False, 'import os\n'), ((2534, 2558), 'os.getenv', 'os.getenv', (['"""CI"""', '"""false"""'], {}), "('CI', 'false')\n", (2543, 2558), False, 'import os\n')] |
import numpy as np
from src.decision_tree import DecisionTree
class Stacking:
def __init__(self, tuples = [(DecisionTree(), 2)]):
self.tuples = tuples
def cross_validation_split(self, X,y, folds=3):
dataset_split = list()
dataset_splity = list()
dataset_copy = list(X)
dataset_copyy = list(y)
fold_size = int(len(X) / folds)
for i in range(folds):
fold = list()
foldy = list()
while len(fold) < fold_size:
index = randrange(len(dataset_copy))
fold.append(dataset_copy)
foldy.append(dataset_copyy)
dataset_split.append(fold)
dataset_splity.append(foldy)
fold_size = len(X) - int(float(len(X))/float(folds))
X_split,y_split = X.iloc[:fold_size],y.iloc[:fold_size]
dataset_split = X_split
dataset_splity = y_split
#print(len(dataset_split),len(dataset_splity))
return dataset_split, dataset_splity
def fit(self,X_train, X_test, y_train):
Predictions = []
for i in range(len(self.tuples)):
Xtrain, ytrain = self.cross_validation_split(X_train, y_train, folds=self.tuples[i][1])
#print(np.array(Xtrain).shape)
for j in range(self.tuples[i][1]):
if isinstance(self.tuples[i][0], DecisionTree):
ytrain_new = ytrain[0]
self.tuples[i][0].build_tree(Xtrain , ytrain_new)
elif isinstance(self.tuples[i][0], RandomForest):
preds = self.tuples[i][0].fit_predict(Xtrain , ytrain, X_test)
else:
self.tuples[i][0].fit(Xtrain_list , ytrain_list)
if isinstance(self.tuples[i][0], DecisionTree):
preds=self.tuples[i][0].predict_new(X_test)
elif isinstance(self.tuples[i][0], RandomForest):
preds=self.tuples[i][0].fit_predict(Xtrain,ytrain,X_test)
else:
preds=self.tuples[i][0].predict(X_test)
Predictions.append(preds)
return Predictions
def predict(self,Predictions):
pred = stats.mode(Predictions)
pred = np.array(pred.mode[0])
return pred | [
"numpy.array",
"src.decision_tree.DecisionTree"
] | [((1853, 1875), 'numpy.array', 'np.array', (['pred.mode[0]'], {}), '(pred.mode[0])\n', (1861, 1875), True, 'import numpy as np\n'), ((111, 125), 'src.decision_tree.DecisionTree', 'DecisionTree', ([], {}), '()\n', (123, 125), False, 'from src.decision_tree import DecisionTree\n')] |
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from unittest.mock import Mock
import numpy as np
from miplearn.classifiers import Classifier
from miplearn.classifiers.threshold import MinPrecisionThreshold
def test_threshold_dynamic() -> None:
clf = Mock(spec=Classifier)
clf.predict_proba = Mock(
return_value=np.array(
[
[0.10, 0.90],
[0.25, 0.75],
[0.40, 0.60],
[0.90, 0.10],
]
)
)
x_train = np.array(
[
[0],
[1],
[2],
[3],
]
)
y_train = np.array(
[
[False, True],
[False, True],
[True, False],
[True, False],
]
)
threshold = MinPrecisionThreshold(min_precision=[1.0, 1.0])
threshold.fit(clf, x_train, y_train)
assert threshold.predict(x_train) == [0.40, 0.75]
# threshold = MinPrecisionThreshold(min_precision=0.65)
# threshold.fit(clf, x_train, y_train)
# assert threshold.predict(x_train) == [0.0, 0.80]
# threshold = MinPrecisionThreshold(min_precision=0.50)
# threshold.fit(clf, x_train, y_train)
# assert threshold.predict(x_train) == [0.0, 0.70]
#
# threshold = MinPrecisionThreshold(min_precision=0.00)
# threshold.fit(clf, x_train, y_train)
# assert threshold.predict(x_train) == [0.0, 0.70]
| [
"miplearn.classifiers.threshold.MinPrecisionThreshold",
"unittest.mock.Mock",
"numpy.array"
] | [((444, 465), 'unittest.mock.Mock', 'Mock', ([], {'spec': 'Classifier'}), '(spec=Classifier)\n', (448, 465), False, 'from unittest.mock import Mock\n'), ((705, 735), 'numpy.array', 'np.array', (['[[0], [1], [2], [3]]'], {}), '([[0], [1], [2], [3]])\n', (713, 735), True, 'import numpy as np\n'), ((823, 893), 'numpy.array', 'np.array', (['[[False, True], [False, True], [True, False], [True, False]]'], {}), '([[False, True], [False, True], [True, False], [True, False]])\n', (831, 893), True, 'import numpy as np\n'), ((984, 1031), 'miplearn.classifiers.threshold.MinPrecisionThreshold', 'MinPrecisionThreshold', ([], {'min_precision': '[1.0, 1.0]'}), '(min_precision=[1.0, 1.0])\n', (1005, 1031), False, 'from miplearn.classifiers.threshold import MinPrecisionThreshold\n'), ((517, 577), 'numpy.array', 'np.array', (['[[0.1, 0.9], [0.25, 0.75], [0.4, 0.6], [0.9, 0.1]]'], {}), '([[0.1, 0.9], [0.25, 0.75], [0.4, 0.6], [0.9, 0.1]])\n', (525, 577), True, 'import numpy as np\n')] |
import numpy as np
import math
class Camera:
''' Camera class '''
def __init__(self, blender_cam, width, height, matrix=None, angle=None):
# create the camera vectors from the data
# note that we can override the camera matrix for viewport rendering
aspect_ratio = width / height
t0, t1 = 0.0, 1.0
focus_dist = 10.0
aperture = 0.0
theta = blender_cam.data.angle / aspect_ratio if angle is None else angle / aspect_ratio
h = math.tan(theta/2.0)
cam_mat = blender_cam.matrix_world if matrix is None else matrix
look_from = np.array([cam_mat[0][3], cam_mat[1][3], cam_mat[2][3]])
self.u = np.array([cam_mat[0][0], cam_mat[1][0], cam_mat[2][0]])
self.v = np.array([cam_mat[0][1], cam_mat[1][1], cam_mat[2][1]])
w = np.array([cam_mat[0][2], cam_mat[1][2], cam_mat[2][2]])
# camera position and orientation
viewport_height = 2.0 * h
viewport_width = aspect_ratio * viewport_height
self.origin = look_from
self.horizontal = focus_dist * viewport_width * self.u
self.vertical = focus_dist * viewport_height * self.v
self.lower_left_corner = self.origin - self.horizontal/2.0 - \
self.vertical/2.0 - focus_dist * w
self.lens_radius = aperture / 2.0
self.t0, self.t1 = t0, t1
def get_data(self):
return self.u, self.v, self.origin, self.horizontal, self.vertical, self.lower_left_corner
| [
"math.tan",
"numpy.array"
] | [((499, 520), 'math.tan', 'math.tan', (['(theta / 2.0)'], {}), '(theta / 2.0)\n', (507, 520), False, 'import math\n'), ((613, 668), 'numpy.array', 'np.array', (['[cam_mat[0][3], cam_mat[1][3], cam_mat[2][3]]'], {}), '([cam_mat[0][3], cam_mat[1][3], cam_mat[2][3]])\n', (621, 668), True, 'import numpy as np\n'), ((686, 741), 'numpy.array', 'np.array', (['[cam_mat[0][0], cam_mat[1][0], cam_mat[2][0]]'], {}), '([cam_mat[0][0], cam_mat[1][0], cam_mat[2][0]])\n', (694, 741), True, 'import numpy as np\n'), ((759, 814), 'numpy.array', 'np.array', (['[cam_mat[0][1], cam_mat[1][1], cam_mat[2][1]]'], {}), '([cam_mat[0][1], cam_mat[1][1], cam_mat[2][1]])\n', (767, 814), True, 'import numpy as np\n'), ((827, 882), 'numpy.array', 'np.array', (['[cam_mat[0][2], cam_mat[1][2], cam_mat[2][2]]'], {}), '([cam_mat[0][2], cam_mat[1][2], cam_mat[2][2]])\n', (835, 882), True, 'import numpy as np\n')] |
from typing import Sequence, Union, cast
import numpy as np
import pymap3d as pm
import transforms3d
def compute_agent_pose(agent_centroid_m: np.ndarray, agent_yaw_rad: float) -> np.ndarray:
"""Return the agent pose as a 3x3 matrix. This corresponds to world_from_agent matrix.
Args:
agent_centroid_m (np.ndarry): 2D coordinates of the agent
agent_yaw_rad (float): yaw of the agent
Returns:
(np.ndarray): 3x3 world_from_agent matrix
"""
# Compute agent pose from its position and heading
return np.array(
[
[np.cos(agent_yaw_rad), -np.sin(agent_yaw_rad), agent_centroid_m[0]],
[np.sin(agent_yaw_rad), np.cos(agent_yaw_rad), agent_centroid_m[1]],
[0, 0, 1],
]
)
def rotation33_as_yaw(rotation: np.ndarray) -> float:
"""Compute the yaw component of given 3x3 rotation matrix.
Args:
rotation (np.ndarray): 3x3 rotation matrix (np.float64 dtype recommended)
Returns:
float: yaw rotation in radians
"""
return cast(float, transforms3d.euler.mat2euler(rotation)[2])
def yaw_as_rotation33(yaw: float) -> np.ndarray:
"""Create a 3x3 rotation matrix from given yaw.
The rotation is counter-clockwise and it is equivalent to:
[cos(yaw), -sin(yaw), 0.0],
[sin(yaw), cos(yaw), 0.0],
[0.0, 0.0, 1.0],
Args:
yaw (float): yaw rotation in radians
Returns:
np.ndarray: 3x3 rotation matrix
"""
return transforms3d.euler.euler2mat(0, 0, yaw)
def vertical_flip(tm: np.ndarray, y_dim_size: int) -> np.ndarray:
"""Return a new matrix that also performs a flip on the y axis.
Args:
tm: the original 3x3 matrix
y_dim_size: this should match the resolution on y. It makes all coordinates positive
Returns: a new 3x3 matrix.
"""
flip_y = np.eye(3)
flip_y[1, 1] = -1
tm = np.matmul(flip_y, tm)
tm[1, 2] += y_dim_size
return tm
def transform_points(points: np.ndarray, transf_matrix: np.ndarray) -> np.ndarray:
"""
Transform a set of 2D/3D points using the given transformation matrix.
Assumes row major ordering of the input points. The transform function has 3 modes:
- points (N, F), transf_matrix (F+1, F+1)
all points are transformed using the matrix and the output points have shape (N, F).
- points (B, N, F), transf_matrix (F+1, F+1)
all sequences of points are transformed using the same matrix and the output points have shape (B, N, F).
transf_matrix is broadcasted.
- points (B, N, F), transf_matrix (B, F+1, F+1)
each sequence of points is transformed using its own matrix and the output points have shape (B, N, F).
Note this function assumes points.shape[-1] == matrix.shape[-1] - 1, which means that last
rows in the matrices do not influence the final results.
For 2D points only the first 2x3 parts of the matrices will be used.
Args:
points (np.ndarray): Input points of shape (N, F) or (B, N, F)
with F = 2 or 3 depending on input points are 2D or 3D points.
transf_matrix (np.ndarray): Transformation matrix of shape (F+1, F+1) or (B, F+1, F+1) with F = 2 or 3.
Returns:
np.ndarray: Transformed points of shape (N, F) or (B, N, F) depending on the dimensions of the input points.
"""
points_log = f" received points with shape {points.shape} "
matrix_log = f" received matrices with shape {transf_matrix.shape} "
assert points.ndim in [2, 3], f"points should have ndim in [2,3],{points_log}"
assert transf_matrix.ndim in [2, 3], f"matrix should have ndim in [2,3],{matrix_log}"
assert points.ndim >= transf_matrix.ndim, f"points ndim should be >= than matrix,{points_log},{matrix_log}"
points_feat = points.shape[-1]
assert points_feat in [2, 3], f"last points dimension must be 2 or 3,{points_log}"
assert transf_matrix.shape[-1] == transf_matrix.shape[-2], f"matrix should be a square matrix,{matrix_log}"
matrix_feat = transf_matrix.shape[-1]
assert matrix_feat in [3, 4], f"last matrix dimension must be 3 or 4,{matrix_log}"
assert points_feat == matrix_feat - 1, f"points last dim should be one less than matrix,{points_log},{matrix_log}"
def _transform(points: np.ndarray, transf_matrix: np.ndarray) -> np.ndarray:
num_dims = transf_matrix.shape[-1] - 1
transf_matrix = np.transpose(transf_matrix, (0, 2, 1))
return points @ transf_matrix[:, :num_dims, :num_dims] + transf_matrix[:, -1:, :num_dims]
if points.ndim == transf_matrix.ndim == 2:
points = np.expand_dims(points, 0)
transf_matrix = np.expand_dims(transf_matrix, 0)
return _transform(points, transf_matrix)[0]
elif points.ndim == transf_matrix.ndim == 3:
return _transform(points, transf_matrix)
elif points.ndim == 3 and transf_matrix.ndim == 2:
transf_matrix = np.expand_dims(transf_matrix, 0)
return _transform(points, transf_matrix)
else:
raise NotImplementedError(f"unsupported case!{points_log},{matrix_log}")
def transform_point(point: np.ndarray, transf_matrix: np.ndarray) -> np.ndarray:
"""Transform a single vector using transformation matrix.
This function call transform_points internally
Args:
point (np.ndarray): vector of shape (N)
transf_matrix (np.ndarray): transformation matrix of shape (N+1, N+1)
Returns:
np.ndarray: vector of same shape as input point
"""
point = np.expand_dims(point, 0)
return transform_points(point, transf_matrix)[0]
def ecef_to_geodetic(point: Union[np.ndarray, Sequence[float]]) -> np.ndarray:
"""Convert given ECEF coordinate into latitude, longitude, altitude.
Args:
point (Union[np.ndarray, Sequence[float]]): ECEF coordinate vector
Returns:
np.ndarray: latitude, altitude, longitude
"""
return np.array(pm.ecef2geodetic(point[0], point[1], point[2]))
def geodetic_to_ecef(lla_point: Union[np.ndarray, Sequence[float]]) -> np.ndarray:
"""Convert given latitude, longitude, and optionally altitude into ECEF
coordinates. If no altitude is given, altitude 0 is assumed.
Args:
lla_point (Union[np.ndarray, Sequence[float]]): Latitude, Longitude and optionally Altitude
Returns:
np.ndarray: 3D ECEF coordinate
"""
if len(lla_point) == 2:
return np.array(pm.geodetic2ecef(lla_point[0], lla_point[1], 0), dtype=np.float64)
else:
return np.array(pm.geodetic2ecef(lla_point[0], lla_point[1], lla_point[2]), dtype=np.float64)
| [
"transforms3d.euler.mat2euler",
"numpy.transpose",
"numpy.expand_dims",
"transforms3d.euler.euler2mat",
"pymap3d.geodetic2ecef",
"numpy.sin",
"numpy.matmul",
"numpy.cos",
"numpy.eye",
"pymap3d.ecef2geodetic"
] | [((1488, 1527), 'transforms3d.euler.euler2mat', 'transforms3d.euler.euler2mat', (['(0)', '(0)', 'yaw'], {}), '(0, 0, yaw)\n', (1516, 1527), False, 'import transforms3d\n'), ((1858, 1867), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1864, 1867), True, 'import numpy as np\n'), ((1899, 1920), 'numpy.matmul', 'np.matmul', (['flip_y', 'tm'], {}), '(flip_y, tm)\n', (1908, 1920), True, 'import numpy as np\n'), ((5531, 5555), 'numpy.expand_dims', 'np.expand_dims', (['point', '(0)'], {}), '(point, 0)\n', (5545, 5555), True, 'import numpy as np\n'), ((4416, 4454), 'numpy.transpose', 'np.transpose', (['transf_matrix', '(0, 2, 1)'], {}), '(transf_matrix, (0, 2, 1))\n', (4428, 4454), True, 'import numpy as np\n'), ((4618, 4643), 'numpy.expand_dims', 'np.expand_dims', (['points', '(0)'], {}), '(points, 0)\n', (4632, 4643), True, 'import numpy as np\n'), ((4668, 4700), 'numpy.expand_dims', 'np.expand_dims', (['transf_matrix', '(0)'], {}), '(transf_matrix, 0)\n', (4682, 4700), True, 'import numpy as np\n'), ((5941, 5987), 'pymap3d.ecef2geodetic', 'pm.ecef2geodetic', (['point[0]', 'point[1]', 'point[2]'], {}), '(point[0], point[1], point[2])\n', (5957, 5987), True, 'import pymap3d as pm\n'), ((1066, 1104), 'transforms3d.euler.mat2euler', 'transforms3d.euler.mat2euler', (['rotation'], {}), '(rotation)\n', (1094, 1104), False, 'import transforms3d\n'), ((6439, 6486), 'pymap3d.geodetic2ecef', 'pm.geodetic2ecef', (['lla_point[0]', 'lla_point[1]', '(0)'], {}), '(lla_point[0], lla_point[1], 0)\n', (6455, 6486), True, 'import pymap3d as pm\n'), ((6540, 6598), 'pymap3d.geodetic2ecef', 'pm.geodetic2ecef', (['lla_point[0]', 'lla_point[1]', 'lla_point[2]'], {}), '(lla_point[0], lla_point[1], lla_point[2])\n', (6556, 6598), True, 'import pymap3d as pm\n'), ((581, 602), 'numpy.cos', 'np.cos', (['agent_yaw_rad'], {}), '(agent_yaw_rad)\n', (587, 602), True, 'import numpy as np\n'), ((663, 684), 'numpy.sin', 'np.sin', (['agent_yaw_rad'], {}), '(agent_yaw_rad)\n', (669, 684), True, 'import numpy as np\n'), ((686, 707), 'numpy.cos', 'np.cos', (['agent_yaw_rad'], {}), '(agent_yaw_rad)\n', (692, 707), True, 'import numpy as np\n'), ((4932, 4964), 'numpy.expand_dims', 'np.expand_dims', (['transf_matrix', '(0)'], {}), '(transf_matrix, 0)\n', (4946, 4964), True, 'import numpy as np\n'), ((605, 626), 'numpy.sin', 'np.sin', (['agent_yaw_rad'], {}), '(agent_yaw_rad)\n', (611, 626), True, 'import numpy as np\n')] |
# This allows for running the example when the repo has been cloned
import sys
from os.path import abspath
sys.path.extend([abspath(".")])
import recolo
import numpy as np
import matplotlib.pyplot as plt
import os
cwd = os.path.dirname(os.path.realpath(__file__))
# Minimal example of pressure load reconstruction based on input from Abaqus. The deflection field from Abaqus is
# used to generate images used for deflectometry. The images used for deflectometry has to be interpolated from the
# displacement fields to produce a high resolution grid image. Minor deviations from the correct pressure field
# occurs due to the use of central differences to determine the acceleration fields, introduction significant errors
# when the temporal resolution is low as here.
# plate and model parameters
mat_E = 210.e9 # Young's modulus [Pa]
mat_nu = 0.33 # Poisson's ratio []
density = 7700
plate_thick = 5e-3
plate = recolo.make_plate(mat_E, mat_nu, density, plate_thick)
# Image noise
noise_std = 0.004
# Reconstruction settings
win_size = 30 # Should be 30 or larger for this noise level
# Deflectometry settings
run_deflectometry = True # False will bypass deflectometry and use slope fields directly from Abaqus.
abq_to_img_scale = 8
mirror_grid_dist = 0.8
grid_pitch = 5. # pixels
# Load Abaqus data
abq_sim_fields = recolo.load_abaqus_rpts(os.path.join(cwd, "AbaqusExampleData/"))
# The deflectometry return the slopes of the plate which has to be integrated in order to determine the deflection
if run_deflectometry:
pixel_size_on_mirror = abq_sim_fields.pixel_size_x
slopes_x = []
slopes_y = []
undeformed_grid = recolo.artificial_grid_deformation.deform_grid_from_deflection(
abq_sim_fields.disp_fields[0, :, :],
pixel_size=pixel_size_on_mirror,
mirror_grid_dist=mirror_grid_dist,
grid_pitch=grid_pitch,
img_upscale=abq_to_img_scale,
img_noise_std=0)
for disp_field in abq_sim_fields.disp_fields:
deformed_grid = recolo.artificial_grid_deformation.deform_grid_from_deflection(disp_field,
pixel_size=pixel_size_on_mirror,
mirror_grid_dist=mirror_grid_dist,
grid_pitch=grid_pitch,
img_upscale=abq_to_img_scale,
img_noise_std=noise_std)
disp_x, disp_y = recolo.deflectomerty.disp_from_grids(undeformed_grid, deformed_grid, grid_pitch)
slope_x = recolo.deflectomerty.angle_from_disp(disp_x * pixel_size_on_mirror/abq_to_img_scale, mirror_grid_dist)
slope_y = recolo.deflectomerty.angle_from_disp(disp_y * pixel_size_on_mirror/abq_to_img_scale, mirror_grid_dist)
slopes_x.append(slope_x)
slopes_y.append(slope_y)
slopes_x = np.array(slopes_x)
slopes_y = np.array(slopes_y)
pixel_size_on_mirror = abq_sim_fields.pixel_size_x / abq_to_img_scale
else:
pixel_size_on_mirror = abq_sim_fields.pixel_size_x
slopes_x, slopes_y = np.gradient(abq_sim_fields.disp_fields, pixel_size_on_mirror, axis=(1, 2))
# Integrate slopes to get deflection fields
disp_fields = recolo.slope_integration.disp_from_slopes(slopes_x, slopes_y, pixel_size_on_mirror,
zero_at="bottom corners", zero_at_size=5,
extrapolate_edge=0, downsample=1)
# Kinematic fields from deflection field
kin_fields = recolo.kinematic_fields_from_deflections(disp_fields,
pixel_size=pixel_size_on_mirror,
sampling_rate=abq_sim_fields.sampling_rate,
filter_space_sigma=10)
# Reconstruct pressure using the virtual fields method
virtual_field = recolo.virtual_fields.Hermite16(win_size, pixel_size_on_mirror)
pressure_fields = np.array(
[recolo.solver_VFM.calc_pressure_thin_elastic_plate(field, plate, virtual_field) for field in kin_fields])
# Plot the results
# Correct pressure history used in the Abaqus simulation
times = np.array([0.0, 0.00005, 0.00010, 0.0003, 0.001]) * 1000
pressures = np.array([0.0, 0.0, 1.0, 0.0, 0.0]) * 1e5
plt.plot(times, pressures, '-', label="Correct pressure")
# Reconstructed pressure from VFM
center = int(pressure_fields.shape[1] / 2)
plt.plot(abq_sim_fields.times * 1000., pressure_fields[:, center, center], "-o", label="Reconstructed pressure")
plt.xlim(left=0.000, right=0.3)
plt.ylim(top=110000, bottom=-15)
plt.xlabel("Time [ms]")
plt.ylabel(r"Overpressure [kPa]")
plt.legend(frameon=False)
plt.tight_layout()
plt.show()
| [
"recolo.solver_VFM.calc_pressure_thin_elastic_plate",
"matplotlib.pyplot.tight_layout",
"os.path.join",
"recolo.make_plate",
"os.path.abspath",
"recolo.deflectomerty.disp_from_grids",
"recolo.virtual_fields.Hermite16",
"recolo.kinematic_fields_from_deflections",
"matplotlib.pyplot.show",
"matplotl... | [((922, 976), 'recolo.make_plate', 'recolo.make_plate', (['mat_E', 'mat_nu', 'density', 'plate_thick'], {}), '(mat_E, mat_nu, density, plate_thick)\n', (939, 976), False, 'import recolo\n'), ((3445, 3612), 'recolo.slope_integration.disp_from_slopes', 'recolo.slope_integration.disp_from_slopes', (['slopes_x', 'slopes_y', 'pixel_size_on_mirror'], {'zero_at': '"""bottom corners"""', 'zero_at_size': '(5)', 'extrapolate_edge': '(0)', 'downsample': '(1)'}), "(slopes_x, slopes_y,\n pixel_size_on_mirror, zero_at='bottom corners', zero_at_size=5,\n extrapolate_edge=0, downsample=1)\n", (3486, 3612), False, 'import recolo\n'), ((3772, 3934), 'recolo.kinematic_fields_from_deflections', 'recolo.kinematic_fields_from_deflections', (['disp_fields'], {'pixel_size': 'pixel_size_on_mirror', 'sampling_rate': 'abq_sim_fields.sampling_rate', 'filter_space_sigma': '(10)'}), '(disp_fields, pixel_size=\n pixel_size_on_mirror, sampling_rate=abq_sim_fields.sampling_rate,\n filter_space_sigma=10)\n', (3812, 3934), False, 'import recolo\n'), ((4160, 4223), 'recolo.virtual_fields.Hermite16', 'recolo.virtual_fields.Hermite16', (['win_size', 'pixel_size_on_mirror'], {}), '(win_size, pixel_size_on_mirror)\n', (4191, 4223), False, 'import recolo\n'), ((4558, 4615), 'matplotlib.pyplot.plot', 'plt.plot', (['times', 'pressures', '"""-"""'], {'label': '"""Correct pressure"""'}), "(times, pressures, '-', label='Correct pressure')\n", (4566, 4615), True, 'import matplotlib.pyplot as plt\n'), ((4694, 4811), 'matplotlib.pyplot.plot', 'plt.plot', (['(abq_sim_fields.times * 1000.0)', 'pressure_fields[:, center, center]', '"""-o"""'], {'label': '"""Reconstructed pressure"""'}), "(abq_sim_fields.times * 1000.0, pressure_fields[:, center, center],\n '-o', label='Reconstructed pressure')\n", (4702, 4811), True, 'import matplotlib.pyplot as plt\n'), ((4808, 4837), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {'left': '(0.0)', 'right': '(0.3)'}), '(left=0.0, right=0.3)\n', (4816, 4837), True, 'import matplotlib.pyplot as plt\n'), ((4840, 4872), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'top': '(110000)', 'bottom': '(-15)'}), '(top=110000, bottom=-15)\n', (4848, 4872), True, 'import matplotlib.pyplot as plt\n'), ((4873, 4896), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [ms]"""'], {}), "('Time [ms]')\n", (4883, 4896), True, 'import matplotlib.pyplot as plt\n'), ((4897, 4929), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Overpressure [kPa]"""'], {}), "('Overpressure [kPa]')\n", (4907, 4929), True, 'import matplotlib.pyplot as plt\n'), ((4932, 4957), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(False)'}), '(frameon=False)\n', (4942, 4957), True, 'import matplotlib.pyplot as plt\n'), ((4958, 4976), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4974, 4976), True, 'import matplotlib.pyplot as plt\n'), ((4977, 4987), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4985, 4987), True, 'import matplotlib.pyplot as plt\n'), ((239, 265), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (255, 265), False, 'import os\n'), ((1358, 1397), 'os.path.join', 'os.path.join', (['cwd', '"""AbaqusExampleData/"""'], {}), "(cwd, 'AbaqusExampleData/')\n", (1370, 1397), False, 'import os\n'), ((1650, 1901), 'recolo.artificial_grid_deformation.deform_grid_from_deflection', 'recolo.artificial_grid_deformation.deform_grid_from_deflection', (['abq_sim_fields.disp_fields[0, :, :]'], {'pixel_size': 'pixel_size_on_mirror', 'mirror_grid_dist': 'mirror_grid_dist', 'grid_pitch': 'grid_pitch', 'img_upscale': 'abq_to_img_scale', 'img_noise_std': '(0)'}), '(abq_sim_fields\n .disp_fields[0, :, :], pixel_size=pixel_size_on_mirror,\n mirror_grid_dist=mirror_grid_dist, grid_pitch=grid_pitch, img_upscale=\n abq_to_img_scale, img_noise_std=0)\n', (1712, 1901), False, 'import recolo\n'), ((3098, 3116), 'numpy.array', 'np.array', (['slopes_x'], {}), '(slopes_x)\n', (3106, 3116), True, 'import numpy as np\n'), ((3132, 3150), 'numpy.array', 'np.array', (['slopes_y'], {}), '(slopes_y)\n', (3140, 3150), True, 'import numpy as np\n'), ((3311, 3385), 'numpy.gradient', 'np.gradient', (['abq_sim_fields.disp_fields', 'pixel_size_on_mirror'], {'axis': '(1, 2)'}), '(abq_sim_fields.disp_fields, pixel_size_on_mirror, axis=(1, 2))\n', (3322, 3385), True, 'import numpy as np\n'), ((4448, 4493), 'numpy.array', 'np.array', (['[0.0, 5e-05, 0.0001, 0.0003, 0.001]'], {}), '([0.0, 5e-05, 0.0001, 0.0003, 0.001])\n', (4456, 4493), True, 'import numpy as np\n'), ((4516, 4551), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 1.0, 0.0, 0.0])\n', (4524, 4551), True, 'import numpy as np\n'), ((125, 137), 'os.path.abspath', 'abspath', (['"""."""'], {}), "('.')\n", (132, 137), False, 'from os.path import abspath\n'), ((2011, 2244), 'recolo.artificial_grid_deformation.deform_grid_from_deflection', 'recolo.artificial_grid_deformation.deform_grid_from_deflection', (['disp_field'], {'pixel_size': 'pixel_size_on_mirror', 'mirror_grid_dist': 'mirror_grid_dist', 'grid_pitch': 'grid_pitch', 'img_upscale': 'abq_to_img_scale', 'img_noise_std': 'noise_std'}), '(disp_field,\n pixel_size=pixel_size_on_mirror, mirror_grid_dist=mirror_grid_dist,\n grid_pitch=grid_pitch, img_upscale=abq_to_img_scale, img_noise_std=\n noise_std)\n', (2073, 2244), False, 'import recolo\n'), ((2693, 2778), 'recolo.deflectomerty.disp_from_grids', 'recolo.deflectomerty.disp_from_grids', (['undeformed_grid', 'deformed_grid', 'grid_pitch'], {}), '(undeformed_grid, deformed_grid, grid_pitch\n )\n', (2729, 2778), False, 'import recolo\n'), ((2792, 2900), 'recolo.deflectomerty.angle_from_disp', 'recolo.deflectomerty.angle_from_disp', (['(disp_x * pixel_size_on_mirror / abq_to_img_scale)', 'mirror_grid_dist'], {}), '(disp_x * pixel_size_on_mirror /\n abq_to_img_scale, mirror_grid_dist)\n', (2828, 2900), False, 'import recolo\n'), ((2913, 3021), 'recolo.deflectomerty.angle_from_disp', 'recolo.deflectomerty.angle_from_disp', (['(disp_y * pixel_size_on_mirror / abq_to_img_scale)', 'mirror_grid_dist'], {}), '(disp_y * pixel_size_on_mirror /\n abq_to_img_scale, mirror_grid_dist)\n', (2949, 3021), False, 'import recolo\n'), ((4257, 4336), 'recolo.solver_VFM.calc_pressure_thin_elastic_plate', 'recolo.solver_VFM.calc_pressure_thin_elastic_plate', (['field', 'plate', 'virtual_field'], {}), '(field, plate, virtual_field)\n', (4307, 4336), False, 'import recolo\n')] |
"""Code common to all toolkits"""
from collections import deque
import numpy as np
from .spatial import dihedral, distance
def detect_secondary_structure(res_dict):
"""Detect alpha helices and beta sheets in res_dict by phi and psi angles"""
first = res_dict[:-1]
second = res_dict[1:]
psi = dihedral(first['N'], first['CA'], first['C'], second['N'])
phi = dihedral(first['C'], second['N'], second['CA'], second['C'])
d = second['id'] - first['id']
# Alpha helices
res_mask_alpha = (((phi > -145) & (phi < -35) &
(psi > -70) & (psi < 50) & (d == 1))) # alpha
res_mask_alpha = np.union1d(np.argwhere(res_mask_alpha),
np.argwhere(res_mask_alpha))
# Ignore groups smaller than 3
for mask_group in np.split(res_mask_alpha, np.argwhere(np.diff(res_mask_alpha) != 1).flatten() + 1):
if len(mask_group) >= 3:
res_dict['isalpha'][mask_group] = True
# Alpha helices have to form H-Bonds
hbond_dist_mask = np.abs(res_dict[res_dict['isalpha']]['resnum'] -
res_dict[res_dict['isalpha']]['resnum'][:, np.newaxis]) >= 3
hbond_mask = distance(res_dict[res_dict['isalpha']]['N'],
res_dict[res_dict['isalpha']]['O']) < 3.5
p_mask = ((hbond_mask & hbond_dist_mask).any(axis=0) |
(hbond_mask & hbond_dist_mask).any(axis=1))
res_dict['isalpha'][np.argwhere(res_dict['isalpha']).flatten()[
~p_mask]] = False
# Ignore groups smaller than 3
res_mask_alpha = np.argwhere(res_dict['isalpha']).flatten()
for mask_group in np.split(res_mask_alpha, np.argwhere(np.diff(res_mask_alpha) != 1).flatten() + 1):
if 0 < len(mask_group) < 3:
res_dict['isalpha'][mask_group] = False
# Beta sheets
res_mask_beta = (((phi >= -180) & (phi < -40) &
(psi <= 180) & (psi > 90) & (d == 1)) |
((phi >= -180) & (phi < -70) &
(psi <= -165) & (d == 1))) # beta
res_mask_beta = np.union1d(np.argwhere(res_mask_beta),
np.argwhere(res_mask_beta))
# Ignore groups smaller than 3
for mask_group in np.split(res_mask_beta, np.argwhere(np.diff(res_mask_beta) != 1).flatten() + 1):
if len(mask_group) >= 3:
res_dict['isbeta'][mask_group] = True
# Beta strands have to be alongside eachother
res_dist_mask = np.abs(res_dict[res_dict['isbeta']]['resnum'] -
res_dict[res_dict['isbeta']]['resnum'][:, np.newaxis]) >= 4
hbond_mask = distance(res_dict[res_dict['isbeta']]['N'],
res_dict[res_dict['isbeta']]['O']) < 3.5
ca_mask = distance(res_dict[res_dict['isbeta']]['CA'],
res_dict[res_dict['isbeta']]['CA']) < 4.5
p_mask = ((hbond_mask & res_dist_mask).any(axis=0) |
(hbond_mask & res_dist_mask).any(axis=1) |
(ca_mask & res_dist_mask).any(axis=0))
res_dict['isbeta'][np.argwhere(res_dict['isbeta']).flatten()[
~p_mask]] = False
# Ignore groups smaller than 3
res_mask_beta = np.argwhere(res_dict['isbeta']).flatten()
for mask_group in np.split(res_mask_beta, np.argwhere(np.diff(res_mask_beta) != 1).flatten() + 1):
if 0 < len(mask_group) < 3:
res_dict['isbeta'][mask_group] = False
return res_dict
def canonize_ring_path(path):
"""Make a canonic path - list of consecutive atom IDXs bonded in a ring
sorted in an uniform fasion.
1) Move the smallest index to position 0
2) Look for the smallest first step (delta IDX)
3) Ff -1 is smallest, inverse the path and move min IDX to position 0
Parameters
----------
path : list of integers
A list of consecutive atom indices in a ring
Returns
-------
canonic_path : list of integers
Sorted list of atoms
"""
if isinstance(path, deque):
path_deque = path
path = list(path)
elif isinstance(path, list):
path_deque = deque(path)
else:
raise ValueError('Path must be a list or deque.')
# FIXME: Py2 deque does not have deque.index()
path_deque.rotate(-path.index(min(path)))
if path_deque[1] - path_deque[0] > path_deque[-1] - path_deque[0]:
path_deque.reverse()
path_deque.rotate(1)
return list(path_deque)
| [
"numpy.argwhere",
"numpy.diff",
"numpy.abs",
"collections.deque"
] | [((651, 678), 'numpy.argwhere', 'np.argwhere', (['res_mask_alpha'], {}), '(res_mask_alpha)\n', (662, 678), True, 'import numpy as np\n'), ((712, 739), 'numpy.argwhere', 'np.argwhere', (['res_mask_alpha'], {}), '(res_mask_alpha)\n', (723, 739), True, 'import numpy as np\n'), ((1030, 1139), 'numpy.abs', 'np.abs', (["(res_dict[res_dict['isalpha']]['resnum'] - res_dict[res_dict['isalpha']][\n 'resnum'][:, np.newaxis])"], {}), "(res_dict[res_dict['isalpha']]['resnum'] - res_dict[res_dict[\n 'isalpha']]['resnum'][:, np.newaxis])\n", (1036, 1139), True, 'import numpy as np\n'), ((2076, 2102), 'numpy.argwhere', 'np.argwhere', (['res_mask_beta'], {}), '(res_mask_beta)\n', (2087, 2102), True, 'import numpy as np\n'), ((2135, 2161), 'numpy.argwhere', 'np.argwhere', (['res_mask_beta'], {}), '(res_mask_beta)\n', (2146, 2161), True, 'import numpy as np\n'), ((2456, 2563), 'numpy.abs', 'np.abs', (["(res_dict[res_dict['isbeta']]['resnum'] - res_dict[res_dict['isbeta']][\n 'resnum'][:, np.newaxis])"], {}), "(res_dict[res_dict['isbeta']]['resnum'] - res_dict[res_dict['isbeta']\n ]['resnum'][:, np.newaxis])\n", (2462, 2563), True, 'import numpy as np\n'), ((1567, 1599), 'numpy.argwhere', 'np.argwhere', (["res_dict['isalpha']"], {}), "(res_dict['isalpha'])\n", (1578, 1599), True, 'import numpy as np\n'), ((3158, 3189), 'numpy.argwhere', 'np.argwhere', (["res_dict['isbeta']"], {}), "(res_dict['isbeta'])\n", (3169, 3189), True, 'import numpy as np\n'), ((4083, 4094), 'collections.deque', 'deque', (['path'], {}), '(path)\n', (4088, 4094), False, 'from collections import deque\n'), ((1440, 1472), 'numpy.argwhere', 'np.argwhere', (["res_dict['isalpha']"], {}), "(res_dict['isalpha'])\n", (1451, 1472), True, 'import numpy as np\n'), ((3033, 3064), 'numpy.argwhere', 'np.argwhere', (["res_dict['isbeta']"], {}), "(res_dict['isbeta'])\n", (3044, 3064), True, 'import numpy as np\n'), ((836, 859), 'numpy.diff', 'np.diff', (['res_mask_alpha'], {}), '(res_mask_alpha)\n', (843, 859), True, 'import numpy as np\n'), ((1669, 1692), 'numpy.diff', 'np.diff', (['res_mask_alpha'], {}), '(res_mask_alpha)\n', (1676, 1692), True, 'import numpy as np\n'), ((2257, 2279), 'numpy.diff', 'np.diff', (['res_mask_beta'], {}), '(res_mask_beta)\n', (2264, 2279), True, 'import numpy as np\n'), ((3258, 3280), 'numpy.diff', 'np.diff', (['res_mask_beta'], {}), '(res_mask_beta)\n', (3265, 3280), True, 'import numpy as np\n')] |
import os
import numpy as np
from preprocess import preprocess
def load_data():
dir_path = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.abspath(os.path.join(dir_path, '..', 'raw_data'))
directories = os.listdir(file_path)
raw_training_set = []
for directory in directories:
if directory == '.DS_Store':
continue
is_spam = True
if 'nospam' in directory:
is_spam = False
dir_path = os.path.join(file_path, directory)
temp = 0
for file_name in os.listdir(dir_path):
if file_name == '.DS_Store':
continue
file = open(os.path.join(dir_path, file_name), encoding = 'ISO-8859-1')
processed_email = preprocess(file_name, file.read())
raw_training_set.append({
'contents': processed_email,
'is_spam': is_spam,
})
return np.array(raw_training_set)
| [
"os.path.join",
"os.path.realpath",
"numpy.array",
"os.listdir"
] | [((234, 255), 'os.listdir', 'os.listdir', (['file_path'], {}), '(file_path)\n', (244, 255), False, 'import os\n'), ((950, 976), 'numpy.array', 'np.array', (['raw_training_set'], {}), '(raw_training_set)\n', (958, 976), True, 'import numpy as np\n'), ((113, 139), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (129, 139), False, 'import os\n'), ((173, 213), 'os.path.join', 'os.path.join', (['dir_path', '""".."""', '"""raw_data"""'], {}), "(dir_path, '..', 'raw_data')\n", (185, 213), False, 'import os\n'), ((489, 523), 'os.path.join', 'os.path.join', (['file_path', 'directory'], {}), '(file_path, directory)\n', (501, 523), False, 'import os\n'), ((567, 587), 'os.listdir', 'os.listdir', (['dir_path'], {}), '(dir_path)\n', (577, 587), False, 'import os\n'), ((679, 712), 'os.path.join', 'os.path.join', (['dir_path', 'file_name'], {}), '(dir_path, file_name)\n', (691, 712), False, 'import os\n')] |
'''
Created on May 20, 2019
@author: <NAME>, <NAME>
'''
from ScopeFoundry.data_browser import DataBrowser, DataBrowserView
from qtpy import QtWidgets, QtCore, QtGui
import numpy as np
import h5py
from ScopeFoundry.widgets import RegionSlicer
from FoundryDataBrowser.viewers.plot_n_fit import PlotNFit, MonoExponentialFitter, BiExponentialFitter, TauXFitter, SemiLogYPolyFitter
class PicoquantHistogramH5View(DataBrowserView):
name = 'picoquant_histogram_h5'
def is_file_supported(self, fname):
if "picoharp_histogram.h5" in fname:
self.m_base = 'measurement/{}/'.format('picoharp_histogram')
self.h_base = 'hardware/{}/'.format('picoharp')
return True
elif "hydraharp_histogram.h5" in fname:
self.m_base = 'measurement/{}/'.format('hydraharp_histogram')
self.h_base = 'hardware/{}/'.format('hydraharp')
return True
else:
return False
def setup(self):
## ui and graph plot
self.plot_n_fit = PlotNFit(
fitters=[
SemiLogYPolyFitter(),
MonoExponentialFitter(),
BiExponentialFitter(),
TauXFitter(),
],
)
self.ui = self.dockarea = self.plot_n_fit.get_docks_as_dockarea()
self.plot_n_fit.plot.setLogMode(False, True)
# data slicers
plot_data = self.plot_n_fit.data_lines[0]
self.x_slicer = RegionSlicer(plot_data,
brush = QtGui.QColor(0,255,0,50),
name='x_slicer', initial=[10,20], activated=True)
self.x_slicer.region_changed_signal.connect(self.update_display)
self.bg_slicer = RegionSlicer(plot_data,
brush = QtGui.QColor(255,255,255,50),
name='bg_subtract', initial=[0,10], activated=False)
self.bg_slicer.region_changed_signal.connect(self.update_display)
self.plot_n_fit.settings_layout.insertWidget(0,self.x_slicer.New_UI())
self.plot_n_fit.settings_layout.insertWidget(1,self.bg_slicer.New_UI())
##settings dock
self.settings.New('chan', dtype=int, initial=0)
self.settings.New('binning', dtype=int, initial=1, vmin=1)
self.settings.New('time_unit', dtype=str, initial='ns')
self.settings.New('norm_data', bool, initial = False)
self.settings.New('roll_data', int, initial = 0)
for lqname in ['chan', 'binning', 'roll_data', 'norm_data']:
getattr(self.settings, lqname).add_listener(self.update_display)
self.setdock = self.dockarea.addDock(name='Data Settings', position='below',
relativeTo=self.plot_n_fit.settings_dock,
widget=self.settings.New_UI())
# Metadata from file
self.posible_meta_data = ['ElapsedMeasTime','Tacq','Resolution','CountRate0','CountRate1',
'Binning','SyncRate','SyncDivider','count_rate0','count_rate1',
'elapsed_meas_time', 'sample']
self.setdock.layout.addWidget(QtWidgets.QLabel('<h3>Meta data </h3>'))
self.meta_data_label = QtWidgets.QLabel()
self.setdock.layout.addWidget(self.meta_data_label)
# Just for appearance
VSpacerItem = QtWidgets.QSpacerItem(0, 0, QtWidgets.QSizePolicy.Minimum,
QtWidgets.QSizePolicy.Expanding)
self.setdock.layout.addItem(VSpacerItem)
self.setdock.setStretch(1, 1)
self.plot_n_fit.settings_dock.setStretch(1, 1)
self.plot_n_fit.settings_dock.raiseDock()
@QtCore.Slot()
def update_display(self):
x,y = self.get_xy(apply_use_x_slice=False)
self.plot_n_fit.update_data(x, y, n_plot=0, is_fit_data=False)
x_fit_data, y_fit_data = self.get_xy(apply_use_x_slice=True)
self.plot_n_fit.update_fit_data(x_fit_data, y_fit_data)
text = self.plot_n_fit.result_message
title = self.plot_n_fit.fit_options.val
self.x_slicer.set_label(text, title)
def on_change_data_filename(self, fname):
try:
self.dat = h5py.File(fname, 'r')
self.meas = H = self.dat[self.m_base]
self.time_array = H['time_array'][:] * 1e-3 #ns
self.histograms = H['time_histogram'][:].reshape(-1, len(self.time_array)) #force shape (Nchan, Nbins)
n_chan = self.histograms.shape[0]
self.settings.chan.change_min_max(0, n_chan-1)
self.update_metadata()
self.dat.close()
self.update_display()
except Exception as err:
self.databrowser.ui.statusbar.showMessage("failed to load %s:\n%s" %(fname, err))
raise(err)
def update_metadata(self):
app_set = self.dat['app/settings']
hw_set = self.dat[self.h_base+'/settings']
meas_set = self.dat[self.m_base+'/settings']
data_table = []
for settings in [app_set, hw_set, meas_set]:
for lqname in self.posible_meta_data:
if lqname in settings.attrs.keys():
val = settings.attrs[lqname]
unit = ''
if lqname in settings['units'].attrs.keys():
unit = settings['units'].attrs[lqname]
data_table.append([lqname, val, unit])
html_table = _table2html(data_table, False)
self.meta_data_label.setText(html_table)
def get_xy(self, apply_use_x_slice=True):
'''
returns data as configurate.
'''
try:
y = 1.0*self.histograms[self.settings['chan']]
x = self.time_array
R = self.settings['roll_data']
if R!=0: y = np.roll(y,R,-1)
if self.bg_slicer.activated.val:
bg = y[self.bg_slicer.s_].mean()
y -= bg
binning = self.settings['binning']
if binning> 1:
x,y = bin_y_average_x(x, y, binning, -1, datapoints_lost_warning=False)
if apply_use_x_slice:
x = x[self.x_slicer.s_]
y = y[self.x_slicer.s_]
if self.settings['norm_data']:
y = norm(y)
except:
x = np.arange(120)/12
y = np.exp(-x / 10.0) + 0.001 * np.random.rand(len(x))
return (x,y)
def norm(x):
x_max = x.max()
if x_max==0:
return x*0.0
else:
return x*1.0/x_max
def bin_y_average_x(x, y, binning = 2, axis = -1, datapoints_lost_warning = True):
'''
y can be a n-dim array with length on axis `axis` equal to len(x)
'''
new_len = int(x.__len__()/binning) * binning
data_loss = x.__len__() - new_len
if data_loss is not 0 and datapoints_lost_warning:
print('bin_y_average_x() warining: lost final', data_loss, 'datapoints')
def bin_1Darray(arr, binning=binning, new_len=new_len):
return arr[:new_len].reshape((-1,binning)).sum(1)
x_ = bin_1Darray(x) / binning
y_ = np.apply_along_axis(bin_1Darray,axis,y)
return x_, y_
def _table2html(data_table, strip_latex = True, header=[]):
text = '<table border="0" alignment="center" >'
if len(header) == len(data_table[0]):
text += '<tr>'
for element in header:
text += '<th>{} </th>'.format(element)
text += '</tr>'
for line in data_table:
text += '<tr>'
for element in line:
text += '<td>{} </td>'.format(element)
text += '</tr>'
text += '</table>'
if strip_latex:
text = text.replace('\\','').replace('$','').replace('_','')
return text
if __name__ == '__main__':
import sys
app = DataBrowser(sys.argv)
app.load_view(PicoquantHistogramH5View(app))
sys.exit(app.exec_()) | [
"FoundryDataBrowser.viewers.plot_n_fit.SemiLogYPolyFitter",
"h5py.File",
"numpy.roll",
"qtpy.QtWidgets.QLabel",
"ScopeFoundry.data_browser.DataBrowser",
"qtpy.QtWidgets.QSpacerItem",
"FoundryDataBrowser.viewers.plot_n_fit.MonoExponentialFitter",
"numpy.apply_along_axis",
"qtpy.QtGui.QColor",
"nump... | [((4079, 4092), 'qtpy.QtCore.Slot', 'QtCore.Slot', ([], {}), '()\n', (4090, 4092), False, 'from qtpy import QtWidgets, QtCore, QtGui\n'), ((7778, 7819), 'numpy.apply_along_axis', 'np.apply_along_axis', (['bin_1Darray', 'axis', 'y'], {}), '(bin_1Darray, axis, y)\n', (7797, 7819), True, 'import numpy as np\n'), ((8473, 8494), 'ScopeFoundry.data_browser.DataBrowser', 'DataBrowser', (['sys.argv'], {}), '(sys.argv)\n', (8484, 8494), False, 'from ScopeFoundry.data_browser import DataBrowser, DataBrowserView\n'), ((3615, 3633), 'qtpy.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (3631, 3633), False, 'from qtpy import QtWidgets, QtCore, QtGui\n'), ((3748, 3844), 'qtpy.QtWidgets.QSpacerItem', 'QtWidgets.QSpacerItem', (['(0)', '(0)', 'QtWidgets.QSizePolicy.Minimum', 'QtWidgets.QSizePolicy.Expanding'], {}), '(0, 0, QtWidgets.QSizePolicy.Minimum, QtWidgets.\n QSizePolicy.Expanding)\n', (3769, 3844), False, 'from qtpy import QtWidgets, QtCore, QtGui\n'), ((3543, 3582), 'qtpy.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""<h3>Meta data </h3>"""'], {}), "('<h3>Meta data </h3>')\n", (3559, 3582), False, 'from qtpy import QtWidgets, QtCore, QtGui\n'), ((4635, 4656), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (4644, 4656), False, 'import h5py\n'), ((1751, 1778), 'qtpy.QtGui.QColor', 'QtGui.QColor', (['(0)', '(255)', '(0)', '(50)'], {}), '(0, 255, 0, 50)\n', (1763, 1778), False, 'from qtpy import QtWidgets, QtCore, QtGui\n'), ((2034, 2065), 'qtpy.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)', '(50)'], {}), '(255, 255, 255, 50)\n', (2046, 2065), False, 'from qtpy import QtWidgets, QtCore, QtGui\n'), ((6386, 6403), 'numpy.roll', 'np.roll', (['y', 'R', '(-1)'], {}), '(y, R, -1)\n', (6393, 6403), True, 'import numpy as np\n'), ((1161, 1181), 'FoundryDataBrowser.viewers.plot_n_fit.SemiLogYPolyFitter', 'SemiLogYPolyFitter', ([], {}), '()\n', (1179, 1181), False, 'from FoundryDataBrowser.viewers.plot_n_fit import PlotNFit, MonoExponentialFitter, BiExponentialFitter, TauXFitter, SemiLogYPolyFitter\n'), ((1221, 1244), 'FoundryDataBrowser.viewers.plot_n_fit.MonoExponentialFitter', 'MonoExponentialFitter', ([], {}), '()\n', (1242, 1244), False, 'from FoundryDataBrowser.viewers.plot_n_fit import PlotNFit, MonoExponentialFitter, BiExponentialFitter, TauXFitter, SemiLogYPolyFitter\n'), ((1284, 1305), 'FoundryDataBrowser.viewers.plot_n_fit.BiExponentialFitter', 'BiExponentialFitter', ([], {}), '()\n', (1303, 1305), False, 'from FoundryDataBrowser.viewers.plot_n_fit import PlotNFit, MonoExponentialFitter, BiExponentialFitter, TauXFitter, SemiLogYPolyFitter\n'), ((1345, 1357), 'FoundryDataBrowser.viewers.plot_n_fit.TauXFitter', 'TauXFitter', ([], {}), '()\n', (1355, 1357), False, 'from FoundryDataBrowser.viewers.plot_n_fit import PlotNFit, MonoExponentialFitter, BiExponentialFitter, TauXFitter, SemiLogYPolyFitter\n'), ((6968, 6982), 'numpy.arange', 'np.arange', (['(120)'], {}), '(120)\n', (6977, 6982), True, 'import numpy as np\n'), ((7002, 7019), 'numpy.exp', 'np.exp', (['(-x / 10.0)'], {}), '(-x / 10.0)\n', (7008, 7019), True, 'import numpy as np\n')] |
try:
# Try to use setuptools so as to enable support of the special
# "Microsoft Visual C++ Compiler for Python 2.7" (http://aka.ms/vcpython27)
# for building under Windows.
# Note setuptools >= 6.0 is required for this.
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
import sys
import os
import numpy
import numpy.distutils.misc_util as np_misc
import versioneer
versioneer.VCS = 'git'
versioneer.versionfile_source = 'numba/_version.py'
versioneer.versionfile_build = 'numba/_version.py'
versioneer.tag_prefix = ''
versioneer.parentdir_prefix = 'numba-'
cmdclass = versioneer.get_cmdclass()
setup_args = {
'long_description': open('README.rst').read(),
}
GCCFLAGS = ["-std=c89", "-Wdeclaration-after-statement", "-Werror"]
if os.environ.get("NUMBA_GCC_FLAGS"):
CFLAGS = GCCFLAGS
else:
CFLAGS = []
if sys.platform == 'darwin' and sys.version_info[:2] == (2, 6):
cpp_link_args = ['-lstdc++']
else:
cpp_link_args = []
install_name_tool_fixer = []
if sys.platform == 'darwin':
install_name_tool_fixer += ['-headerpad_max_install_names']
npymath_info = np_misc.get_info('npymath')
ext_dynfunc = Extension(name='numba._dynfunc', sources=['numba/_dynfunc.c'],
extra_compile_args=CFLAGS,
depends=["numba/_pymodule.h"])
ext_npymath_exports = Extension(name='numba._npymath_exports',
sources=['numba/_npymath_exports.c'],
include_dirs=npymath_info['include_dirs'],
libraries=npymath_info['libraries'],
library_dirs=npymath_info['library_dirs'],
define_macros=npymath_info['define_macros'])
ext_dispatcher = Extension(name="numba._dispatcher",
include_dirs=[numpy.get_include()],
sources=['numba/_dispatcher.c',
'numba/_typeof.c',
'numba/_hashtable.c',
'numba/_dispatcherimpl.cpp',
'numba/typeconv/typeconv.cpp'],
depends=["numba/_pymodule.h",
"numba/_dispatcher.h",
"numba/_typeof.h",
"numba/_hashtable.h"],
extra_link_args=cpp_link_args)
ext_helperlib = Extension(name="numba._helperlib",
include_dirs=[numpy.get_include()],
sources=["numba/_helperlib.c", "numba/_math_c99.c"],
extra_compile_args=CFLAGS,
extra_link_args=install_name_tool_fixer,
depends=["numba/_pymodule.h",
"numba/_math_c99.h",
"numba/mathnames.inc"])
ext_typeconv = Extension(name="numba.typeconv._typeconv",
sources=["numba/typeconv/typeconv.cpp",
"numba/typeconv/_typeconv.cpp"],
depends=["numba/_pymodule.h"],
extra_link_args=cpp_link_args)
ext_npyufunc_ufunc = Extension(name="numba.npyufunc._internal",
sources=["numba/npyufunc/_internal.c"],
include_dirs=[numpy.get_include()],
depends=["numba/npyufunc/_ufunc.c",
"numba/npyufunc/_internal.h",
"numba/_pymodule.h"])
ext_mviewbuf = Extension(name='numba.mviewbuf',
sources=['numba/mviewbuf.c'])
ext_nrt_python = Extension(name='numba.runtime._nrt_python',
sources=['numba/runtime/_nrt_python.c',
'numba/runtime/nrt.c'],
depends=['numba/runtime/nrt.h',
'numba/_pymodule.h'],
include_dirs=["numba"] + npymath_info['include_dirs'])
ext_modules = [ext_dynfunc, ext_npymath_exports, ext_dispatcher,
ext_helperlib, ext_typeconv, ext_npyufunc_ufunc, ext_mviewbuf,
ext_nrt_python]
def find_packages(root_dir, root_name):
"""
Recursively find packages in *root_dir*.
"""
packages = []
def rec(path, pkg_name):
packages.append(pkg_name)
for fn in sorted(os.listdir(path)):
subpath = os.path.join(path, fn)
if os.path.exists(os.path.join(subpath, "__init__.py")):
subname = "%s.%s" % (pkg_name, fn)
rec(subpath, subname)
rec(root_dir, root_name)
return packages
packages = find_packages("numba", "numba")
install_requires = ['llvmlite', 'numpy']
if sys.version_info < (3, 4):
install_requires.extend(['enum34', 'singledispatch'])
if sys.version_info < (3, 3):
install_requires.append('funcsigs')
setup(name='numba',
description="compiling Python code using LLVM",
version=versioneer.get_version(),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Software Development :: Compilers",
],
package_data={
"numba.cuda.tests.cudadrv.data": ["*.ptx"],
"numba.annotations": ["*.html"],
"numba.hsa.tests.hsadrv": ["*.brig"],
},
scripts=["numba/pycc/pycc", "bin/numba"],
author="Continuum Analytics, Inc.",
author_email="<EMAIL>",
url="http://numba.github.com",
ext_modules=ext_modules,
packages=packages,
install_requires=install_requires,
license="BSD",
cmdclass=cmdclass,
**setup_args)
| [
"versioneer.get_version",
"numpy.distutils.misc_util.get_info",
"versioneer.get_cmdclass",
"os.environ.get",
"distutils.core.Extension",
"numpy.get_include",
"os.path.join",
"os.listdir"
] | [((651, 676), 'versioneer.get_cmdclass', 'versioneer.get_cmdclass', ([], {}), '()\n', (674, 676), False, 'import versioneer\n'), ((819, 852), 'os.environ.get', 'os.environ.get', (['"""NUMBA_GCC_FLAGS"""'], {}), "('NUMBA_GCC_FLAGS')\n", (833, 852), False, 'import os\n'), ((1168, 1195), 'numpy.distutils.misc_util.get_info', 'np_misc.get_info', (['"""npymath"""'], {}), "('npymath')\n", (1184, 1195), True, 'import numpy.distutils.misc_util as np_misc\n'), ((1211, 1335), 'distutils.core.Extension', 'Extension', ([], {'name': '"""numba._dynfunc"""', 'sources': "['numba/_dynfunc.c']", 'extra_compile_args': 'CFLAGS', 'depends': "['numba/_pymodule.h']"}), "(name='numba._dynfunc', sources=['numba/_dynfunc.c'],\n extra_compile_args=CFLAGS, depends=['numba/_pymodule.h'])\n", (1220, 1335), False, 'from distutils.core import setup, Extension\n'), ((1403, 1663), 'distutils.core.Extension', 'Extension', ([], {'name': '"""numba._npymath_exports"""', 'sources': "['numba/_npymath_exports.c']", 'include_dirs': "npymath_info['include_dirs']", 'libraries': "npymath_info['libraries']", 'library_dirs': "npymath_info['library_dirs']", 'define_macros': "npymath_info['define_macros']"}), "(name='numba._npymath_exports', sources=[\n 'numba/_npymath_exports.c'], include_dirs=npymath_info['include_dirs'],\n libraries=npymath_info['libraries'], library_dirs=npymath_info[\n 'library_dirs'], define_macros=npymath_info['define_macros'])\n", (1412, 1663), False, 'from distutils.core import setup, Extension\n'), ((3021, 3208), 'distutils.core.Extension', 'Extension', ([], {'name': '"""numba.typeconv._typeconv"""', 'sources': "['numba/typeconv/typeconv.cpp', 'numba/typeconv/_typeconv.cpp']", 'depends': "['numba/_pymodule.h']", 'extra_link_args': 'cpp_link_args'}), "(name='numba.typeconv._typeconv', sources=[\n 'numba/typeconv/typeconv.cpp', 'numba/typeconv/_typeconv.cpp'], depends\n =['numba/_pymodule.h'], extra_link_args=cpp_link_args)\n", (3030, 3208), False, 'from distutils.core import setup, Extension\n'), ((3726, 3788), 'distutils.core.Extension', 'Extension', ([], {'name': '"""numba.mviewbuf"""', 'sources': "['numba/mviewbuf.c']"}), "(name='numba.mviewbuf', sources=['numba/mviewbuf.c'])\n", (3735, 3788), False, 'from distutils.core import setup, Extension\n'), ((3832, 4062), 'distutils.core.Extension', 'Extension', ([], {'name': '"""numba.runtime._nrt_python"""', 'sources': "['numba/runtime/_nrt_python.c', 'numba/runtime/nrt.c']", 'depends': "['numba/runtime/nrt.h', 'numba/_pymodule.h']", 'include_dirs': "(['numba'] + npymath_info['include_dirs'])"}), "(name='numba.runtime._nrt_python', sources=[\n 'numba/runtime/_nrt_python.c', 'numba/runtime/nrt.c'], depends=[\n 'numba/runtime/nrt.h', 'numba/_pymodule.h'], include_dirs=['numba'] +\n npymath_info['include_dirs'])\n", (3841, 4062), False, 'from distutils.core import setup, Extension\n'), ((5191, 5215), 'versioneer.get_version', 'versioneer.get_version', ([], {}), '()\n', (5213, 5215), False, 'import versioneer\n'), ((1906, 1925), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (1923, 1925), False, 'import numpy\n'), ((2613, 2632), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (2630, 2632), False, 'import numpy\n'), ((3489, 3508), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (3506, 3508), False, 'import numpy\n'), ((4586, 4602), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (4596, 4602), False, 'import os\n'), ((4627, 4649), 'os.path.join', 'os.path.join', (['path', 'fn'], {}), '(path, fn)\n', (4639, 4649), False, 'import os\n'), ((4680, 4716), 'os.path.join', 'os.path.join', (['subpath', '"""__init__.py"""'], {}), "(subpath, '__init__.py')\n", (4692, 4716), False, 'import os\n')] |
import numpy as np
from mxnet.gluon.loss import Loss
class Tanimoto(Loss):
def __init__(self, _smooth=1.0e-5, _axis=[2,3], _weight = None, _batch_axis= 0, **kwards):
Loss.__init__(self,weight=_weight, batch_axis = _batch_axis, **kwards)
self.axis = _axis
self.smooth = _smooth
def hybrid_forward(self,F,_preds, _label):
# Evaluate the mean volume of class per batch
Vli = F.mean(F.sum(_label,axis=self.axis),axis=0)
#wli = 1.0/Vli**2 # weighting scheme
wli = F.reciprocal(Vli**2) # weighting scheme
# ---------------------This line is taken from niftyNet package --------------
# ref: https://github.com/NifTK/NiftyNet/blob/dev/niftynet/layer/loss_segmentation.py, lines:170 -- 172
# new_weights = tf.where(tf.is_inf(weights), tf.zeros_like(weights), weights)
# weights = tf.where(tf.is_inf(weights), tf.ones_like(weights) * tf.reduce_max(new_weights), weights)
# --------------------------------------------------------------------
# ***********************************************************************************************
# First turn inf elements to zero, then replace that with the maximum weight value
new_weights = F.where(wli == np.float('inf'), F.zeros_like(wli), wli )
wli = F.where( wli == np.float('inf'), F.broadcast_mul(F.ones_like(wli),F.max(new_weights)) , wli)
# ************************************************************************************************
rl_x_pl = F.sum( F.broadcast_mul(_label , _preds), axis=self.axis)
# This is sum of squares
l = F.sum( F.broadcast_mul(_label , _label), axis=self.axis)
r = F.sum( F.broadcast_mul( _preds , _preds ) , axis=self.axis)
rl_p_pl = l + r - rl_x_pl
tnmt = (F.sum( F.broadcast_mul(wli , rl_x_pl),axis=1) + self.smooth)/ ( F.sum( F.broadcast_mul(wli,(rl_p_pl)),axis=1) + self.smooth)
return tnmt # This returns the tnmt for EACH data point, i.e. a vector of values equal to the batch size
# This is the loss used in the manuscript of resuneta
class Tanimoto_wth_dual(Loss):
"""
Tanimoto coefficient with dual from: Diakogiannis et al 2019 (https://arxiv.org/abs/1904.00592)
Note: to use it in deep learning training use: return 1. - 0.5*(loss1+loss2)
"""
def __init__(self, _smooth=1.0e-5, _axis=[2,3], _weight = None, _batch_axis= 0, **kwards):
Loss.__init__(self,weight=_weight, batch_axis = _batch_axis, **kwards)
with self.name_scope():
self.Loss = Tanimoto(_smooth = _smooth, _axis = _axis)
def hybrid_forward(self,F,_preds,_label):
# measure of overlap
loss1 = self.Loss(_preds,_label)
# measure of non-overlap as inner product
preds_dual = 1.0-_preds
labels_dual = 1.0-_label
loss2 = self.Loss(preds_dual,labels_dual)
return 0.5*(loss1+loss2)
| [
"numpy.float",
"mxnet.gluon.loss.Loss.__init__"
] | [((182, 251), 'mxnet.gluon.loss.Loss.__init__', 'Loss.__init__', (['self'], {'weight': '_weight', 'batch_axis': '_batch_axis'}), '(self, weight=_weight, batch_axis=_batch_axis, **kwards)\n', (195, 251), False, 'from mxnet.gluon.loss import Loss\n'), ((2489, 2558), 'mxnet.gluon.loss.Loss.__init__', 'Loss.__init__', (['self'], {'weight': '_weight', 'batch_axis': '_batch_axis'}), '(self, weight=_weight, batch_axis=_batch_axis, **kwards)\n', (2502, 2558), False, 'from mxnet.gluon.loss import Loss\n'), ((1288, 1303), 'numpy.float', 'np.float', (['"""inf"""'], {}), "('inf')\n", (1296, 1303), True, 'import numpy as np\n'), ((1360, 1375), 'numpy.float', 'np.float', (['"""inf"""'], {}), "('inf')\n", (1368, 1375), True, 'import numpy as np\n')] |
import numpy as np
from common.utils import *
class StringSimilaritySorter:
def __init__(self, metric, metric_range_percentage=False, return_similarity=False):
self.metric = metric
self.metric_range_percentage = metric_range_percentage
self.return_similarity = return_similarity
@profile
def sort(self, surface, question, candidates):
if candidates is None or len(candidates) == 0:
return []
candidates_distance = np.array(
[(self.metric(surface, candidate[1].lower()), self.metric(surface, candidate[0][28:].lower()))
for candidate in candidates], dtype=float)
exact_match_idx = [idx for idx, candidate in enumerate(candidates) if surface in candidate[1].lower()]
candidates_distance[exact_match_idx][:, 0] /= 2
filtered_candidates = np.array(candidates, dtype=object)
idxs = np.lexsort((candidates_distance[:, 0], candidates_distance[:, 1]))
if self.return_similarity:
if self.metric_range_percentage:
candidates_similarity = 1 - candidates_distance[:, 0]
else:
surface_len = len(surface)
candidates_len = np.array([max(surface_len, len(candidate[1])) for candidate in candidates])
candidates_similarity = 1 - (candidates_distance[:, 0] / candidates_len)
output = np.hstack((filtered_candidates[idxs], candidates_similarity[idxs].reshape(-1, 1)))
else:
output = filtered_candidates[idxs]
return output
| [
"numpy.lexsort",
"numpy.array"
] | [((851, 885), 'numpy.array', 'np.array', (['candidates'], {'dtype': 'object'}), '(candidates, dtype=object)\n', (859, 885), True, 'import numpy as np\n'), ((901, 967), 'numpy.lexsort', 'np.lexsort', (['(candidates_distance[:, 0], candidates_distance[:, 1])'], {}), '((candidates_distance[:, 0], candidates_distance[:, 1]))\n', (911, 967), True, 'import numpy as np\n')] |
from bdgtools.bedgraph import BedGraph, BedGraphArray
from bdgtools.regions import Regions
import numpy as np
import pytest
@pytest.fixture
def bedgraph():
return BedGraph([0, 10, 15, 25, 40], [0, 1, 2, 3, 4], size=50)
@pytest.fixture
def bedgrapharray():
return BedGraphArray([0, 10, 0, 10, 25], [0, 1, 2, 3, 4], [15, 35], [0, 2, 5])
@pytest.fixture
def regions():
starts = [2, 13, 17]
ends = [12, 27, 36]
directions = [1, -1, -1]
return Regions(starts, ends, directions)
def test_getitem(bedgraph):
values = bedgraph[[10, 11, 14, 15, 16]]
assert values == [1, 1, 1, 2, 2]
def test_getitem_slice(bedgraph):
assert bedgraph[9:25] == BedGraph([0, 1, 6], [0, 1, 2])
assert bedgraph[10:26] == BedGraph([0, 5, 15], [1, 2, 3])
assert bedgraph[0:9] == BedGraph([0], [0])
assert bedgraph[1:9] == BedGraph([0], [0])
assert bedgraph[15:42] == BedGraph([0, 10, 25], [2, 3, 4])
assert bedgraph[16:41] == BedGraph([0, 9, 24], [2, 3, 4])
def test_reverse(bedgraph):
assert bedgraph.reverse() == BedGraph([0, 10, 25, 35, 40], [4, 3, 2, 1, 0], size=50)
return BedGraph([0, 10, 15, 25, 40], [0, 1, 2, 3, 4], size=50)
def test_extract_regions(bedgraph, regions):
slices = list(bedgraph.extract_regions(regions))
true = [BedGraph([0,8], [0, 1], 10),
BedGraph([0, 2, 12], [3, 2, 1], 14),
BedGraph([0, 11], [3, 2], 19)]
for calc, t in zip(slices, true):
assert calc == t
def test_concat(bedgraph):
combined = BedGraph.concatenate([bedgraph, bedgraph])
true = BedGraph([0, 10, 15, 25, 40, 50, 60, 65, 75, 90], [0, 1, 2, 3, 4]*2, size=100)
assert combined == true
def test_join_rows(bedgraph, bedgrapharray):
bg = list(bedgrapharray.join_rows([0,2]))[0]
assert bedgraph == bg
def test_join_rows2(bedgraph, bedgrapharray):
bga = BedGraphArray.vstack((bedgrapharray, bedgrapharray))
for bg in list(bga.join_rows([0,2,4])):
assert bedgraph == bg
def test_extract_regions_bga(bedgraph, regions):
regions.directions=np.array([1,1,1], dtype="int")
bga = BedGraphArray.from_bedgraphs([bedgraph]*3)
assert bga.extract_regions(regions)==bedgraph.extract_regions(regions)
| [
"bdgtools.bedgraph.BedGraphArray",
"bdgtools.bedgraph.BedGraphArray.from_bedgraphs",
"bdgtools.bedgraph.BedGraphArray.vstack",
"numpy.array",
"bdgtools.regions.Regions",
"bdgtools.bedgraph.BedGraph",
"bdgtools.bedgraph.BedGraph.concatenate"
] | [((168, 223), 'bdgtools.bedgraph.BedGraph', 'BedGraph', (['[0, 10, 15, 25, 40]', '[0, 1, 2, 3, 4]'], {'size': '(50)'}), '([0, 10, 15, 25, 40], [0, 1, 2, 3, 4], size=50)\n', (176, 223), False, 'from bdgtools.bedgraph import BedGraph, BedGraphArray\n'), ((273, 344), 'bdgtools.bedgraph.BedGraphArray', 'BedGraphArray', (['[0, 10, 0, 10, 25]', '[0, 1, 2, 3, 4]', '[15, 35]', '[0, 2, 5]'], {}), '([0, 10, 0, 10, 25], [0, 1, 2, 3, 4], [15, 35], [0, 2, 5])\n', (286, 344), False, 'from bdgtools.bedgraph import BedGraph, BedGraphArray\n'), ((466, 499), 'bdgtools.regions.Regions', 'Regions', (['starts', 'ends', 'directions'], {}), '(starts, ends, directions)\n', (473, 499), False, 'from bdgtools.regions import Regions\n'), ((1120, 1175), 'bdgtools.bedgraph.BedGraph', 'BedGraph', (['[0, 10, 15, 25, 40]', '[0, 1, 2, 3, 4]'], {'size': '(50)'}), '([0, 10, 15, 25, 40], [0, 1, 2, 3, 4], size=50)\n', (1128, 1175), False, 'from bdgtools.bedgraph import BedGraph, BedGraphArray\n'), ((1517, 1559), 'bdgtools.bedgraph.BedGraph.concatenate', 'BedGraph.concatenate', (['[bedgraph, bedgraph]'], {}), '([bedgraph, bedgraph])\n', (1537, 1559), False, 'from bdgtools.bedgraph import BedGraph, BedGraphArray\n'), ((1571, 1656), 'bdgtools.bedgraph.BedGraph', 'BedGraph', (['[0, 10, 15, 25, 40, 50, 60, 65, 75, 90]', '([0, 1, 2, 3, 4] * 2)'], {'size': '(100)'}), '([0, 10, 15, 25, 40, 50, 60, 65, 75, 90], [0, 1, 2, 3, 4] * 2, size=100\n )\n', (1579, 1656), False, 'from bdgtools.bedgraph import BedGraph, BedGraphArray\n'), ((1856, 1908), 'bdgtools.bedgraph.BedGraphArray.vstack', 'BedGraphArray.vstack', (['(bedgrapharray, bedgrapharray)'], {}), '((bedgrapharray, bedgrapharray))\n', (1876, 1908), False, 'from bdgtools.bedgraph import BedGraph, BedGraphArray\n'), ((2056, 2088), 'numpy.array', 'np.array', (['[1, 1, 1]'], {'dtype': '"""int"""'}), "([1, 1, 1], dtype='int')\n", (2064, 2088), True, 'import numpy as np\n'), ((2097, 2141), 'bdgtools.bedgraph.BedGraphArray.from_bedgraphs', 'BedGraphArray.from_bedgraphs', (['([bedgraph] * 3)'], {}), '([bedgraph] * 3)\n', (2125, 2141), False, 'from bdgtools.bedgraph import BedGraph, BedGraphArray\n'), ((674, 704), 'bdgtools.bedgraph.BedGraph', 'BedGraph', (['[0, 1, 6]', '[0, 1, 2]'], {}), '([0, 1, 6], [0, 1, 2])\n', (682, 704), False, 'from bdgtools.bedgraph import BedGraph, BedGraphArray\n'), ((735, 766), 'bdgtools.bedgraph.BedGraph', 'BedGraph', (['[0, 5, 15]', '[1, 2, 3]'], {}), '([0, 5, 15], [1, 2, 3])\n', (743, 766), False, 'from bdgtools.bedgraph import BedGraph, BedGraphArray\n'), ((795, 813), 'bdgtools.bedgraph.BedGraph', 'BedGraph', (['[0]', '[0]'], {}), '([0], [0])\n', (803, 813), False, 'from bdgtools.bedgraph import BedGraph, BedGraphArray\n'), ((842, 860), 'bdgtools.bedgraph.BedGraph', 'BedGraph', (['[0]', '[0]'], {}), '([0], [0])\n', (850, 860), False, 'from bdgtools.bedgraph import BedGraph, BedGraphArray\n'), ((891, 923), 'bdgtools.bedgraph.BedGraph', 'BedGraph', (['[0, 10, 25]', '[2, 3, 4]'], {}), '([0, 10, 25], [2, 3, 4])\n', (899, 923), False, 'from bdgtools.bedgraph import BedGraph, BedGraphArray\n'), ((954, 985), 'bdgtools.bedgraph.BedGraph', 'BedGraph', (['[0, 9, 24]', '[2, 3, 4]'], {}), '([0, 9, 24], [2, 3, 4])\n', (962, 985), False, 'from bdgtools.bedgraph import BedGraph, BedGraphArray\n'), ((1052, 1107), 'bdgtools.bedgraph.BedGraph', 'BedGraph', (['[0, 10, 25, 35, 40]', '[4, 3, 2, 1, 0]'], {'size': '(50)'}), '([0, 10, 25, 35, 40], [4, 3, 2, 1, 0], size=50)\n', (1060, 1107), False, 'from bdgtools.bedgraph import BedGraph, BedGraphArray\n'), ((1288, 1316), 'bdgtools.bedgraph.BedGraph', 'BedGraph', (['[0, 8]', '[0, 1]', '(10)'], {}), '([0, 8], [0, 1], 10)\n', (1296, 1316), False, 'from bdgtools.bedgraph import BedGraph, BedGraphArray\n'), ((1330, 1365), 'bdgtools.bedgraph.BedGraph', 'BedGraph', (['[0, 2, 12]', '[3, 2, 1]', '(14)'], {}), '([0, 2, 12], [3, 2, 1], 14)\n', (1338, 1365), False, 'from bdgtools.bedgraph import BedGraph, BedGraphArray\n'), ((1380, 1409), 'bdgtools.bedgraph.BedGraph', 'BedGraph', (['[0, 11]', '[3, 2]', '(19)'], {}), '([0, 11], [3, 2], 19)\n', (1388, 1409), False, 'from bdgtools.bedgraph import BedGraph, BedGraphArray\n')] |
import unittest as ut
import os
import numpy as np
np.set_printoptions(threshold=np.nan)
class TestGLSLCPU(ut.TestCase):
def setUp(self):
self.maxDiff = None
# todo: add standalone tests
| [
"numpy.set_printoptions"
] | [((52, 89), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.nan'}), '(threshold=np.nan)\n', (71, 89), True, 'import numpy as np\n')] |
# Copyright (c) 2020 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
import random
import numpy as np
from imageio import mimread
import nnabla.logger as logger
from nnabla.utils.data_iterator import data_iterator
from nnabla.utils.data_source import DataSource
from nnabla.utils.image_utils import imread
def read_video(name, frame_shape):
"""
note that this function assumes that data (images or a video)
is stored as RGB format.
"""
if os.path.isdir(name):
frames = sorted(os.listdir(name))
num_frames = len(frames)
video_array = np.array(
[imread(os.path.join(name, frames[idx])) / 255. for idx in range(num_frames)])
elif name.lower().endswith('.gif') or name.lower().endswith('.mp4') or name.lower().endswith('.mov'):
video = np.array(mimread(name, memtest=False,
size=tuple(frame_shape[:2])))
if video.shape[-1] == 4:
video = video[..., :3]
video_array = video / 255.
else:
raise Exception("Unknown file extensions %s" % name)
return video_array
class FramesDataSource(DataSource):
def __init__(self, root_dir, frame_shape=(256, 256, 3),
id_sampling=False, is_train=True,
random_seed=0,
augmentation_params=None,
shuffle=True):
super(FramesDataSource, self).__init__()
self.root_dir = root_dir
self.videos = os.listdir(root_dir)
self.frame_shape = tuple(frame_shape)
self.id_sampling = id_sampling
self._shuffle = shuffle
if os.path.exists(os.path.join(root_dir, 'train')):
assert os.path.exists(os.path.join(root_dir, 'test'))
logger.info("Use predefined train-test split.")
if id_sampling:
train_videos = {os.path.basename(video).split('#')[0] for video in
os.listdir(os.path.join(root_dir, 'train'))}
train_videos = list(train_videos)
else:
train_videos = os.listdir(os.path.join(root_dir, 'train'))
test_videos = os.listdir(os.path.join(root_dir, 'test'))
if is_train:
self.root_dir = os.path.join(self.root_dir, 'train')
else:
self.root_dir = os.path.join(self.root_dir, 'test')
else:
logger.info("Use random train-test split.")
random.shuffle(self.videos)
num_test_samples = int(len(self.videos) * 0.2)
train_videos, test_videos = self.videos[num_test_samples:], self.videos[:num_test_samples]
if is_train:
self.videos = train_videos
else:
self.videos = test_videos
self.is_train = is_train
if self.is_train:
self.transform = True
else:
self.transform = None
logger.info(f'using data in {self.root_dir}')
# requirement
self._size = len(self.videos)
if self.is_train:
self._variables = ('driving', 'source')
else:
self._variables = ('video', 'name')
self.reset()
def _get_data(self, position):
idx = self._indexes[position]
if self.is_train and self.id_sampling:
name = self.videos[idx]
path = np.random.choice(
glob.glob(os.path.join(self.root_dir, name + '*.mp4')))
path = str(path)
else:
name = self.videos[idx]
path = os.path.join(self.root_dir, name)
if self.is_train and os.path.isdir(path):
frames = os.listdir(path)
num_frames = len(frames)
frame_idx = np.sort(np.random.choice(
num_frames, replace=True, size=2))
video_array = [
imread(os.path.join(path, frames[idx])) / 255.0 for idx in frame_idx]
else:
video_array = read_video(path, frame_shape=self.frame_shape)
num_frames = len(video_array)
if self.is_train:
frame_idx = np.sort(np.random.choice(
num_frames, replace=True, size=2))
else:
frame_idx = range(num_frames)
video_array = video_array[frame_idx]
if self.transform is not None:
if random.random() < 0.5:
video_array = video_array[::-1]
if random.random() < 0.5:
video_array = [np.fliplr(img) for img in video_array]
out = {}
if self.is_train:
source = np.array(video_array[0], dtype='float32')
driving = np.array(video_array[1], dtype='float32')
out['driving'] = driving.transpose((2, 0, 1))
out['source'] = source.transpose((2, 0, 1))
else:
video = np.array(video_array, dtype='float32')
out['video'] = video.transpose((3, 0, 1, 2))
if self.is_train:
return out["driving"], out["source"]
else:
return out["video"], out["name"]
def reset(self):
# reset method initialize self._indexes
if self._shuffle:
self._indexes = np.arange(self._size)
np.random.shuffle(self._indexes)
else:
self._indexes = np.arange(self._size)
super(FramesDataSource, self).reset()
def frame_data_iterator(root_dir, frame_shape=(256, 256, 3), id_sampling=False,
is_train=True, random_seed=0,
augmentation_params=None, batch_size=1, shuffle=True,
with_memory_cache=False, with_file_cache=False):
return data_iterator(FramesDataSource(root_dir=root_dir,
frame_shape=frame_shape,
id_sampling=id_sampling,
is_train=is_train,
random_seed=random_seed,
augmentation_params=augmentation_params,
shuffle=shuffle),
batch_size=batch_size,
rng=random_seed,
with_memory_cache=with_memory_cache,
with_file_cache=with_file_cache)
| [
"os.path.join",
"os.path.basename",
"os.path.isdir",
"random.shuffle",
"random.random",
"numpy.fliplr",
"numpy.array",
"numpy.arange",
"numpy.random.choice",
"nnabla.logger.info",
"os.listdir",
"numpy.random.shuffle"
] | [((1032, 1051), 'os.path.isdir', 'os.path.isdir', (['name'], {}), '(name)\n', (1045, 1051), False, 'import os\n'), ((2036, 2056), 'os.listdir', 'os.listdir', (['root_dir'], {}), '(root_dir)\n', (2046, 2056), False, 'import os\n'), ((3480, 3525), 'nnabla.logger.info', 'logger.info', (['f"""using data in {self.root_dir}"""'], {}), "(f'using data in {self.root_dir}')\n", (3491, 3525), True, 'import nnabla.logger as logger\n'), ((1077, 1093), 'os.listdir', 'os.listdir', (['name'], {}), '(name)\n', (1087, 1093), False, 'import os\n'), ((2201, 2232), 'os.path.join', 'os.path.join', (['root_dir', '"""train"""'], {}), "(root_dir, 'train')\n", (2213, 2232), False, 'import os\n'), ((2313, 2360), 'nnabla.logger.info', 'logger.info', (['"""Use predefined train-test split."""'], {}), "('Use predefined train-test split.')\n", (2324, 2360), True, 'import nnabla.logger as logger\n'), ((2969, 3012), 'nnabla.logger.info', 'logger.info', (['"""Use random train-test split."""'], {}), "('Use random train-test split.')\n", (2980, 3012), True, 'import nnabla.logger as logger\n'), ((3025, 3052), 'random.shuffle', 'random.shuffle', (['self.videos'], {}), '(self.videos)\n', (3039, 3052), False, 'import random\n'), ((4113, 4146), 'os.path.join', 'os.path.join', (['self.root_dir', 'name'], {}), '(self.root_dir, name)\n', (4125, 4146), False, 'import os\n'), ((4177, 4196), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (4190, 4196), False, 'import os\n'), ((4219, 4235), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (4229, 4235), False, 'import os\n'), ((5169, 5210), 'numpy.array', 'np.array', (['video_array[0]'], {'dtype': '"""float32"""'}), "(video_array[0], dtype='float32')\n", (5177, 5210), True, 'import numpy as np\n'), ((5233, 5274), 'numpy.array', 'np.array', (['video_array[1]'], {'dtype': '"""float32"""'}), "(video_array[1], dtype='float32')\n", (5241, 5274), True, 'import numpy as np\n'), ((5424, 5462), 'numpy.array', 'np.array', (['video_array'], {'dtype': '"""float32"""'}), "(video_array, dtype='float32')\n", (5432, 5462), True, 'import numpy as np\n'), ((5779, 5800), 'numpy.arange', 'np.arange', (['self._size'], {}), '(self._size)\n', (5788, 5800), True, 'import numpy as np\n'), ((5813, 5845), 'numpy.random.shuffle', 'np.random.shuffle', (['self._indexes'], {}), '(self._indexes)\n', (5830, 5845), True, 'import numpy as np\n'), ((5888, 5909), 'numpy.arange', 'np.arange', (['self._size'], {}), '(self._size)\n', (5897, 5909), True, 'import numpy as np\n'), ((2269, 2299), 'os.path.join', 'os.path.join', (['root_dir', '"""test"""'], {}), "(root_dir, 'test')\n", (2281, 2299), False, 'import os\n'), ((2729, 2759), 'os.path.join', 'os.path.join', (['root_dir', '"""test"""'], {}), "(root_dir, 'test')\n", (2741, 2759), False, 'import os\n'), ((2819, 2855), 'os.path.join', 'os.path.join', (['self.root_dir', '"""train"""'], {}), "(self.root_dir, 'train')\n", (2831, 2855), False, 'import os\n'), ((2906, 2941), 'os.path.join', 'os.path.join', (['self.root_dir', '"""test"""'], {}), "(self.root_dir, 'test')\n", (2918, 2941), False, 'import os\n'), ((4305, 4355), 'numpy.random.choice', 'np.random.choice', (['num_frames'], {'replace': '(True)', 'size': '(2)'}), '(num_frames, replace=True, size=2)\n', (4321, 4355), True, 'import numpy as np\n'), ((4925, 4940), 'random.random', 'random.random', ([], {}), '()\n', (4938, 4940), False, 'import random\n'), ((5011, 5026), 'random.random', 'random.random', ([], {}), '()\n', (5024, 5026), False, 'import random\n'), ((2659, 2690), 'os.path.join', 'os.path.join', (['root_dir', '"""train"""'], {}), "(root_dir, 'train')\n", (2671, 2690), False, 'import os\n'), ((3969, 4012), 'os.path.join', 'os.path.join', (['self.root_dir', "(name + '*.mp4')"], {}), "(self.root_dir, name + '*.mp4')\n", (3981, 4012), False, 'import os\n'), ((4684, 4734), 'numpy.random.choice', 'np.random.choice', (['num_frames'], {'replace': '(True)', 'size': '(2)'}), '(num_frames, replace=True, size=2)\n', (4700, 4734), True, 'import numpy as np\n'), ((5065, 5079), 'numpy.fliplr', 'np.fliplr', (['img'], {}), '(img)\n', (5074, 5079), True, 'import numpy as np\n'), ((1181, 1212), 'os.path.join', 'os.path.join', (['name', 'frames[idx]'], {}), '(name, frames[idx])\n', (1193, 1212), False, 'import os\n'), ((4425, 4456), 'os.path.join', 'os.path.join', (['path', 'frames[idx]'], {}), '(path, frames[idx])\n', (4437, 4456), False, 'import os\n'), ((2515, 2546), 'os.path.join', 'os.path.join', (['root_dir', '"""train"""'], {}), "(root_dir, 'train')\n", (2527, 2546), False, 'import os\n'), ((2421, 2444), 'os.path.basename', 'os.path.basename', (['video'], {}), '(video)\n', (2437, 2444), False, 'import os\n')] |
import sys
import pandas as pd
import numpy as np
def topsis(file,weight,impact,output):
## Handling invalid file type exception
if(file.split('.')[-1]!='csv'):
print("[ERROR]File extension not supported! Must be csv flie")
exit(0)
## Handling File not present exception
try:
df=pd.read_csv(f'./{file}')
except :
print(f'[ERROR]{file} does not exist!s')
sys.exit(0)
models=df.iloc[:,0].values.tolist()
data=df.iloc[:,1:].values.tolist()
# print(data)
# print(weight)
## Handling insufficient number of columsn exception
if len(data[0])<3:
print("[Error]Insufficient number of columns must be atleast 3")
sys.exit(0)
## Handling Wrong weights format exception
try:
weights=list(map(int,weight.strip().split(',')))
except:
print("[ERROR] weights must be provided in format '1,0.5,2,1' and seperated by ',' and all weights must be numeric values")
sys.exit(0)
# print(type(weights))
# print(len(data[0]))
## Handling Wrong number of weights exception
if(len(weights)!=len(data[0])):
print(f"[ERROR]Number of weights should be :{len(data[0])}")
sys.exit(0)
## Handling negative weights exception
if any(x <0 for x in weights):
print("[ERROR] Weights Must be positive")
sys.exit(0)
## Handling Wrong Impacts format exception
try:
impacts=list(impact.strip().split(','))
except:
print("[ERROR] impacts must be provided in format '+,-,+,-,+' and seperated by ','")
sys.exit(0)
# print(impacts)
## Handling Wrong number of impacts exception
if(len(impacts)!=len(data[0])):
print(f"[ERROR]Number of impacts should be :{len(data[0])}")
sys.exit(0)
## Handling of impact either + or - exception
signs='+-'
if any(x not in signs for x in impacts):
print("[ERROR] impacts can only be '+' or '-'")
sys.exit(0)
# -------------- 1. Calculating Root Mean Square of each column -------------- #
# print(data)
rms=[0]*(len(data[0]))
for i in range(len(data[0])):
for j in range(len(data)):
rms[i]=rms[i]+data[j][i]**2
rms[i]=(rms[i])**(1/2)
# print(rms)
# ------------------------ 2 . Noramalization of data ------------------------ #
normalised=[]
for i in range(len(data)):
l = list()
for j in range(len(data[0])):
l.append(data[i][j]/rms[j])
normalised.append(l)
# print(noramalised)
# -------------------------- 3. Calculating Weights -------------------------- #
s=sum(weights)
# print(weights)
for i in range(len(weights)):
weights[i]/=s
# print(weights)
# --------------------- 4. Multiplying data with weights --------------------- #
# print(noramalised)
for i in range(len(data[0])):
for j in range(len(data)):
normalised[j][i]*=weights[i]
# print(noramalised)
# ----------------- 5. Calculating Ideal Best and Ideal Worst ---------------- #
idealBest=[]
idealWorst=[]
for i in range(len(normalised[0])):
if(impacts[i]=='+'):
idealBest.append(np.max([ x[i] for x in normalised] ))
idealWorst.append(np.min([ x[i] for x in normalised] ))
if(impacts[i]=='-'):
idealWorst.append(np.max([ x[i] for x in normalised] ))
idealBest.append(np.min([ x[i] for x in normalised] ))
# print(idealBest)
# print(idealWorst)
# for i in range(len(normalised)):
# for j in range(len(normalised[0])):
# print(normalised[i][j],end=" ")
# print()
# --------------------- 6. Calculating Performance Score --------------------- #
performance=[]
for i in range(len(normalised)):
pos=0
neg=0
for j in range(len(normalised[0])):
pos+=(normalised[i][j]-idealBest[j])**2
neg+=(normalised[i][j]-idealWorst[j])**2
pos=pos**(1/2)
neg=neg**(1/2)
performance.append(neg/(neg+pos))
ranks = sorted(list(range(1,len(performance)+1)))
pt=sorted(performance,reverse=True)
data2=[]
for i in range(len(data)):
data[i].append(performance[i])
data[i].append(ranks[pt.index(performance[i])])
l=[]
l.append(models[i])
l.extend(data[i])
data2.append(l)
cols=list(df.columns)
cols.extend(['Topsis Score','Rank'])
final=[]
# print(final)
for i in range(len(data)):
final.append(data2[i])
# print(final)
final=pd.DataFrame(final,columns=cols,index=None)
## Handling wrong output filename exception
if(output.split('.')[-1]!='csv'):
print("[ERROR]File extension for output filename not supported! Must be csv flie")
exit(0)
path=f"101917050-{output}"
final.to_csv(path)
def main(filename,output):
# args=sys.argv
# argLen=len(args)
weight=input("Enter weights (seperated by comma) :")
impact=input("Enter impacts(+/-) seperated by comma :")
topsis(filename,weight,impact,output)
# ## Handling Wrong number of arguments exception
# if(argLen!=5):
# print("[ERROR]Invalid number of arguments")
# sys.exit(0)
# else :
# topsis(args[1],args[2],args[3],args[4])
# if __name__=='__main__':
# main(filename,output)
| [
"pandas.DataFrame",
"pandas.read_csv",
"numpy.max",
"numpy.min",
"sys.exit"
] | [((4680, 4725), 'pandas.DataFrame', 'pd.DataFrame', (['final'], {'columns': 'cols', 'index': 'None'}), '(final, columns=cols, index=None)\n', (4692, 4725), True, 'import pandas as pd\n'), ((326, 350), 'pandas.read_csv', 'pd.read_csv', (['f"""./{file}"""'], {}), "(f'./{file}')\n", (337, 350), True, 'import pandas as pd\n'), ((717, 728), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (725, 728), False, 'import sys\n'), ((1224, 1235), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1232, 1235), False, 'import sys\n'), ((1377, 1388), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1385, 1388), False, 'import sys\n'), ((1808, 1819), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1816, 1819), False, 'import sys\n'), ((1995, 2006), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2003, 2006), False, 'import sys\n'), ((421, 432), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (429, 432), False, 'import sys\n'), ((995, 1006), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1003, 1006), False, 'import sys\n'), ((1611, 1622), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1619, 1622), False, 'import sys\n'), ((3267, 3301), 'numpy.max', 'np.max', (['[x[i] for x in normalised]'], {}), '([x[i] for x in normalised])\n', (3273, 3301), True, 'import numpy as np\n'), ((3335, 3369), 'numpy.min', 'np.min', (['[x[i] for x in normalised]'], {}), '([x[i] for x in normalised])\n', (3341, 3369), True, 'import numpy as np\n'), ((3432, 3466), 'numpy.max', 'np.max', (['[x[i] for x in normalised]'], {}), '([x[i] for x in normalised])\n', (3438, 3466), True, 'import numpy as np\n'), ((3499, 3533), 'numpy.min', 'np.min', (['[x[i] for x in normalised]'], {}), '([x[i] for x in normalised])\n', (3505, 3533), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import torch
from iflow.utils.generic import to_numpy
class TestClass():
def __init__(self, dynamics):
self.dynamics = dynamics
self.N = 100
self.dim = dynamics.dim
def points_evolution(self):
x0 = torch.ones(1, self.dim)
trj_n = self.dynamics.generate_trj(x0, T=self.N, noise=False)
trj_n_np = to_numpy(trj_n)
fig, axs = plt.subplots(self.dim)
for i in range(self.dim):
axs[i].plot(trj_n_np[:, 0, i],'*')
plt.show()
plt.plot(trj_n_np[:, 0, 0], trj_n_np[:, 0, 1])
plt.show()
def noise_forward_evaluation(self):
x0 = torch.ones(100, 3)
trj_n = self.dynamics.generate_trj(x0, T=4*self.N, noise=True)
trj_n_np = to_numpy(trj_n)
fig, axs = plt.subplots(self.dim)
for i in range(self.dim):
for j in range(100):
axs[i].plot(trj_n_np[:, j, i])
plt.show()
for j in range(100):
plt.plot(trj_n_np[:, j, 0], trj_n_np[:, j, 1])
plt.show()
def noise_backward_evaluation(self):
x0 = torch.ones(100, 3)
trj_n = self.dynamics.generate_trj(x0, T=4*self.N, noise=True, reverse=True)
trj_n_np = to_numpy(trj_n)
fig, axs = plt.subplots(self.dim)
for i in range(self.dim):
for j in range(100):
axs[i].plot(trj_n_np[:, j, i])
plt.show()
for j in range(100):
plt.plot(trj_n_np[:, j, 0], trj_n_np[:, j, 1])
plt.show()
def conditional_prob_forward(self):
step = 20
x0 = torch.ones(1, self.dim)
trj_n = self.dynamics.generate_trj(x0, T=self.N, noise=False)
x0 = trj_n[:-step, 0 , :]
x1 = trj_n[step:, 0, :]
log_prob_x0_x1 = self.dynamics.conditional_log_prob(x0,x1, T=step, reverse=False)
print('True Steps prob: {}'.format(torch.mean(log_prob_x0_x1)))
log_prob_x0_x1 = self.dynamics.conditional_log_prob(x0,x1, T=1, reverse=False)
print('Less Steps prob: {}'.format(torch.mean(log_prob_x0_x1)))
log_prob_x0_x1 = self.dynamics.conditional_log_prob(x0,x1, T=50, reverse=False)
print('More Steps prob: {}'.format(torch.mean(log_prob_x0_x1)))
def forward_density(self):
x0 = torch.randn(1, self.dim)
tr_mu, tr_var = self.dynamics.generate_trj_density(x0, self.N, reverse=False)
tr_mu = to_numpy(tr_mu)
tr_var = to_numpy(tr_var)
print(tr_mu.shape)
print(tr_var.shape)
fig, axs = plt.subplots(self.dim)
for i in range(self.dim):
l_trj = tr_mu[:, 0, i] - 3 * np.sqrt(tr_var[:, 0, i, i])
h_trj = tr_mu[:, 0, i] + 3 * np.sqrt(tr_var[:, 0, i, i])
t = np.linspace(0, tr_mu.shape[0], tr_mu.shape[0])
axs[i].plot(t, tr_mu[:, 0, i])
axs[i].fill_between(t, l_trj, h_trj, alpha=0.3)
plt.show()
def backward_density(self):
x0 = torch.randn(1, self.dim)
tr_mu, tr_var = self.dynamics.generate_trj_density(x0, self.N, reverse=True)
tr_mu = to_numpy(tr_mu)
tr_var = to_numpy(tr_var)
print(tr_mu.shape)
print(tr_var.shape)
fig, axs = plt.subplots(self.dim)
for i in range(self.dim):
l_trj = tr_mu[:, 0, i] - 3 * np.sqrt(tr_var[:, 0, i, i])
h_trj = tr_mu[:, 0, i] + 3 * np.sqrt(tr_var[:, 0, i, i])
t = np.linspace(0, tr_mu.shape[0], tr_mu.shape[0])
axs[i].plot(t, tr_mu[:, 0, i])
axs[i].fill_between(t, l_trj, h_trj, alpha=0.3)
plt.show() | [
"torch.mean",
"torch.ones",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"torch.randn",
"iflow.utils.generic.to_numpy",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"numpy.sqrt"
] | [((292, 315), 'torch.ones', 'torch.ones', (['(1)', 'self.dim'], {}), '(1, self.dim)\n', (302, 315), False, 'import torch\n'), ((405, 420), 'iflow.utils.generic.to_numpy', 'to_numpy', (['trj_n'], {}), '(trj_n)\n', (413, 420), False, 'from iflow.utils.generic import to_numpy\n'), ((441, 463), 'matplotlib.pyplot.subplots', 'plt.subplots', (['self.dim'], {}), '(self.dim)\n', (453, 463), True, 'import matplotlib.pyplot as plt\n'), ((553, 563), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (561, 563), True, 'import matplotlib.pyplot as plt\n'), ((573, 619), 'matplotlib.pyplot.plot', 'plt.plot', (['trj_n_np[:, 0, 0]', 'trj_n_np[:, 0, 1]'], {}), '(trj_n_np[:, 0, 0], trj_n_np[:, 0, 1])\n', (581, 619), True, 'import matplotlib.pyplot as plt\n'), ((628, 638), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (636, 638), True, 'import matplotlib.pyplot as plt\n'), ((693, 711), 'torch.ones', 'torch.ones', (['(100)', '(3)'], {}), '(100, 3)\n', (703, 711), False, 'import torch\n'), ((802, 817), 'iflow.utils.generic.to_numpy', 'to_numpy', (['trj_n'], {}), '(trj_n)\n', (810, 817), False, 'from iflow.utils.generic import to_numpy\n'), ((838, 860), 'matplotlib.pyplot.subplots', 'plt.subplots', (['self.dim'], {}), '(self.dim)\n', (850, 860), True, 'import matplotlib.pyplot as plt\n'), ((983, 993), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (991, 993), True, 'import matplotlib.pyplot as plt\n'), ((1091, 1101), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1099, 1101), True, 'import matplotlib.pyplot as plt\n'), ((1157, 1175), 'torch.ones', 'torch.ones', (['(100)', '(3)'], {}), '(100, 3)\n', (1167, 1175), False, 'import torch\n'), ((1280, 1295), 'iflow.utils.generic.to_numpy', 'to_numpy', (['trj_n'], {}), '(trj_n)\n', (1288, 1295), False, 'from iflow.utils.generic import to_numpy\n'), ((1316, 1338), 'matplotlib.pyplot.subplots', 'plt.subplots', (['self.dim'], {}), '(self.dim)\n', (1328, 1338), True, 'import matplotlib.pyplot as plt\n'), ((1461, 1471), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1469, 1471), True, 'import matplotlib.pyplot as plt\n'), ((1569, 1579), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1577, 1579), True, 'import matplotlib.pyplot as plt\n'), ((1652, 1675), 'torch.ones', 'torch.ones', (['(1)', 'self.dim'], {}), '(1, self.dim)\n', (1662, 1675), False, 'import torch\n'), ((2339, 2363), 'torch.randn', 'torch.randn', (['(1)', 'self.dim'], {}), '(1, self.dim)\n', (2350, 2363), False, 'import torch\n'), ((2466, 2481), 'iflow.utils.generic.to_numpy', 'to_numpy', (['tr_mu'], {}), '(tr_mu)\n', (2474, 2481), False, 'from iflow.utils.generic import to_numpy\n'), ((2499, 2515), 'iflow.utils.generic.to_numpy', 'to_numpy', (['tr_var'], {}), '(tr_var)\n', (2507, 2515), False, 'from iflow.utils.generic import to_numpy\n'), ((2591, 2613), 'matplotlib.pyplot.subplots', 'plt.subplots', (['self.dim'], {}), '(self.dim)\n', (2603, 2613), True, 'import matplotlib.pyplot as plt\n'), ((2961, 2971), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2969, 2971), True, 'import matplotlib.pyplot as plt\n'), ((3018, 3042), 'torch.randn', 'torch.randn', (['(1)', 'self.dim'], {}), '(1, self.dim)\n', (3029, 3042), False, 'import torch\n'), ((3144, 3159), 'iflow.utils.generic.to_numpy', 'to_numpy', (['tr_mu'], {}), '(tr_mu)\n', (3152, 3159), False, 'from iflow.utils.generic import to_numpy\n'), ((3177, 3193), 'iflow.utils.generic.to_numpy', 'to_numpy', (['tr_var'], {}), '(tr_var)\n', (3185, 3193), False, 'from iflow.utils.generic import to_numpy\n'), ((3269, 3291), 'matplotlib.pyplot.subplots', 'plt.subplots', (['self.dim'], {}), '(self.dim)\n', (3281, 3291), True, 'import matplotlib.pyplot as plt\n'), ((3639, 3649), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3647, 3649), True, 'import matplotlib.pyplot as plt\n'), ((1036, 1082), 'matplotlib.pyplot.plot', 'plt.plot', (['trj_n_np[:, j, 0]', 'trj_n_np[:, j, 1]'], {}), '(trj_n_np[:, j, 0], trj_n_np[:, j, 1])\n', (1044, 1082), True, 'import matplotlib.pyplot as plt\n'), ((1514, 1560), 'matplotlib.pyplot.plot', 'plt.plot', (['trj_n_np[:, j, 0]', 'trj_n_np[:, j, 1]'], {}), '(trj_n_np[:, j, 0], trj_n_np[:, j, 1])\n', (1522, 1560), True, 'import matplotlib.pyplot as plt\n'), ((2803, 2849), 'numpy.linspace', 'np.linspace', (['(0)', 'tr_mu.shape[0]', 'tr_mu.shape[0]'], {}), '(0, tr_mu.shape[0], tr_mu.shape[0])\n', (2814, 2849), True, 'import numpy as np\n'), ((3481, 3527), 'numpy.linspace', 'np.linspace', (['(0)', 'tr_mu.shape[0]', 'tr_mu.shape[0]'], {}), '(0, tr_mu.shape[0], tr_mu.shape[0])\n', (3492, 3527), True, 'import numpy as np\n'), ((1946, 1972), 'torch.mean', 'torch.mean', (['log_prob_x0_x1'], {}), '(log_prob_x0_x1)\n', (1956, 1972), False, 'import torch\n'), ((2105, 2131), 'torch.mean', 'torch.mean', (['log_prob_x0_x1'], {}), '(log_prob_x0_x1)\n', (2115, 2131), False, 'import torch\n'), ((2265, 2291), 'torch.mean', 'torch.mean', (['log_prob_x0_x1'], {}), '(log_prob_x0_x1)\n', (2275, 2291), False, 'import torch\n'), ((2689, 2716), 'numpy.sqrt', 'np.sqrt', (['tr_var[:, 0, i, i]'], {}), '(tr_var[:, 0, i, i])\n', (2696, 2716), True, 'import numpy as np\n'), ((2758, 2785), 'numpy.sqrt', 'np.sqrt', (['tr_var[:, 0, i, i]'], {}), '(tr_var[:, 0, i, i])\n', (2765, 2785), True, 'import numpy as np\n'), ((3367, 3394), 'numpy.sqrt', 'np.sqrt', (['tr_var[:, 0, i, i]'], {}), '(tr_var[:, 0, i, i])\n', (3374, 3394), True, 'import numpy as np\n'), ((3436, 3463), 'numpy.sqrt', 'np.sqrt', (['tr_var[:, 0, i, i]'], {}), '(tr_var[:, 0, i, i])\n', (3443, 3463), True, 'import numpy as np\n')] |
import numpy as np
import scipy.sparse as sp
import torch
import random
from sklearn.feature_extraction.text import TfidfTransformer
def clean_dblp(path='./data/dblp/',new_path='./data/dblp2/'):
label_file = "author_label"
PA_file = "PA"
PC_file = "PC"
PT_file = "PT"
PA = np.genfromtxt("{}{}.txt".format(path, PA_file),
dtype=np.int32)
PC = np.genfromtxt("{}{}.txt".format(path, PC_file),
dtype=np.int32)
PT = np.genfromtxt("{}{}.txt".format(path, PT_file),
dtype=np.int32)
labels_raw = np.genfromtxt("{}{}.txt".format(path, label_file),
dtype=np.int32)
A = {}
for i,a in enumerate(labels_raw[:,0]):
A[a]=i+1
print(len(A))
PA_new = np.asarray([[PA[i,0],A[PA[i,1]]] for i in range(PA.shape[0]) if PA[i,1] in A])
PC_new = PC
PT_new = PT
labels_new = np.asarray([[A[labels_raw[i,0]],labels_raw[i,1]] for i in range(labels_raw.shape[0]) if labels_raw[i,0] in A])
np.savetxt("{}{}.txt".format(new_path, PA_file),PA_new,fmt='%i')
np.savetxt("{}{}.txt".format(new_path, PC_file),PC_new,fmt='%i')
np.savetxt("{}{}.txt".format(new_path, PT_file),PT_new,fmt='%i')
np.savetxt("{}{}.txt".format(new_path, label_file),labels_new,fmt='%i')
def gen_homograph():
path = "data/dblp2/"
out_file = "homograph"
label_file = "author_label"
PA_file = "PA"
PC_file = "PC"
PT_file = "PT"
APA_file = "APA"
APAPA_file = "APAPA"
APCPA_file = "APCPA"
PA = np.genfromtxt("{}{}.txt".format(path, PA_file),
dtype=np.int32)
PC = np.genfromtxt("{}{}.txt".format(path, PC_file),
dtype=np.int32)
PT = np.genfromtxt("{}{}.txt".format(path, PT_file),
dtype=np.int32)
PA[:, 0] -= 1
PA[:, 1] -= 1
PC[:, 0] -= 1
PC[:, 1] -= 1
PT[:, 0] -= 1
PT[:, 1] -= 1
paper_max = max(PA[:, 0]) + 1
author_max = max(PA[:, 1]) + 1
conf_max = max(PC[:, 1]) + 1
term_max = max(PT[:, 1]) + 1
PA[:, 0] += author_max
PC[:, 0] += author_max
PC[:, 1] += author_max+paper_max
edges = np.concatenate((PA,PC),axis=0)
np.savetxt("{}{}.txt".format(path, out_file),edges,fmt='%u')
def read_embed(path="../../../data/dblp2/",
emb_file="APC",emb_len=16):
with open("{}{}_{}.emb".format(path, emb_file,emb_len)) as f:
n_nodes, n_feature = map(int, f.readline().strip().split())
print("number of nodes:{}, embedding size:{}".format(n_nodes, n_feature))
embedding = np.loadtxt("{}{}_{}.emb".format(path, emb_file,emb_len),
dtype=np.float32, skiprows=1)
emb_index = {}
for i in range(n_nodes):
emb_index[embedding[i, 0]] = i
features = np.asarray([embedding[emb_index[i], 1:] if i in emb_index else embedding[0, 1:] for i in range(18405)])
#assert features.shape[1] == n_feature
#assert features.shape[0] == n_nodes
return features, n_nodes, n_feature
def dump_edge_emb(path='../../../data/dblp2/',emb_len=16):
# dump APA
APA_file = "APA"
APAPA_file = "APAPA"
APCPA_file = "APCPA"
APA_e,n_nodes,n_emb =read_embed(path=path,emb_file='APC',emb_len=emb_len)
APCPA_e,n_nodes,n_emb =read_embed(path=path,emb_file='APC',emb_len=emb_len)
PA_file = "PA"
PC_file = "PC"
PA = np.genfromtxt("{}{}.txt".format(path, PA_file),
dtype=np.int32)
PC = np.genfromtxt("{}{}.txt".format(path, PC_file),
dtype=np.int32)
PA[:, 0] -= 1
PA[:, 1] -= 1
PC[:, 0] -= 1
PC[:, 1] -= 1
PAi={}
APi={}
PCi={}
CPi={}
for i in range(PA.shape[0]):
p=PA[i,0]
a=PA[i,1]
if p not in PAi:
PAi[p]=set()
if a not in APi:
APi[a]=set()
PAi[p].add(a)
APi[a].add(p)
for i in range(PC.shape[0]):
p=PC[i,0]
c=PC[i,1]
if p not in PCi:
PCi[p]=set()
if c not in CPi:
CPi[c]=set()
PCi[p].add(c)
CPi[c].add(p)
APAi={}
APCi={}
CPAi={}
for v in APi:
for p in APi[v]:
if p not in PAi:
continue
for a in PAi[p]:
if a not in APAi:
APAi[a] ={}
if v not in APAi:
APAi[v] ={}
if v not in APAi[a]:
APAi[a][v]=set()
if a not in APAi[v]:
APAi[v][a]=set()
APAi[a][v].add(p)
APAi[v][a].add(p)
for v in APi:
for p in APi[v]:
if p not in PCi:
continue
for c in PCi[p]:
if v not in APCi:
APCi[v] ={}
if c not in CPAi:
CPAi[c] ={}
if c not in APCi[v]:
APCi[v][c]=set()
if v not in CPAi[c]:
CPAi[c][v]=set()
CPAi[c][v].add(p)
APCi[v][c].add(p)
## APAPA; vpa1pa2
#APAPA_emb = []
#for v in APAi:
# result = {}
# count = {}
# for a1 in APAi[v]:
# np1 = len(APAi[v][a1])
# edge1 = [node_emb[p] for p in APAi[v][a1]]
# edge1 = np.sum(np.vstack(edge1), axis=0) # edge1: the emd between v and a1
# for a2 in APAi[a1].keys():
# np2 = len(APAi[a1][a2])
# edge2 = [node_emb[p] for p in APAi[a1][a2]]
# edge2 = np.sum(np.vstack(edge2), axis=0) # edge2: the emd between a1 and a2
# if a2 not in result:
# result[a2] = node_emb[a2] * (np2 * np1)
# else:
# result[a2] += node_emb[a2] * (np2 * np1)
# result[a2] += edge1 * np2
# result[a2] += edge2 * np1
# if a2 not in count:
# count[a2]=0
# count[a2] += np1*np2
# for a2 in result:
# if v <= a2:
# APAPA_emb.append(np.concatenate(([v, a2], result[a2]/count[a2], [count[a2]])))
#APAPA_emb = np.asarray(APAPA_emb)
#m = np.max(APAPA_emb[:, -1])
#APAPA_emb[:, -1] /= m
#print("compute edge embeddings {} complete".format('APAPA'))
APA_ps=sp.load_npz("{}{}".format(path, 'APA_ps.npz')).todense()
APAPA_ps=sp.load_npz("{}{}".format(path, 'APAPA_ps.npz')).todense()
APCPA_ps=sp.load_npz("{}{}".format(path, 'APCPA_ps.npz')).todense()
# APA
APA = APAi
APA_emb = []
for a1 in APA.keys():
for a2 in APA[a1]:
tmp = [APA_e[p] for p in APA[a1][a2]]
tmp = np.sum(tmp, axis=0)/len(APA[a1][a2])
tmp += APA_e[a1]+APA_e[a2]
tmp /= 3
if a1 <= a2:
APA_emb.append(np.concatenate(([a1, a2], tmp,[APA_ps[a1,a2]], [len(APA[a1][a2])])))
APA_emb = np.asarray(APA_emb)
print("compute edge embeddings {} complete".format(APA_file))
# APAPA
APAPA_emb = []
ind1 = APAi
ind2 = APAi
for v in ind1:
result = {}
count = {}
for a1 in ind1[v].keys():
np1 = len(ind1[v][a1])
edge1 = [APA_e[p] for p in ind1[v][a1]]
edge1 = np.sum(np.vstack(edge1), axis=0) # edge1: the emd between v and a1
for a2 in ind2[a1].keys():
np2 = len(ind2[a1][a2])
edge2 = [APA_e[p] for p in ind2[a1][a2]]
edge2 = np.sum(np.vstack(edge2), axis=0) # edge2: the emd between a1 and a2
if a2 not in result:
result[a2] = APA_e[a1] * (np2 * np1)
else:
result[a2] += APA_e[a1] * (np2 * np1)
result[a2] += edge1 * np2
result[a2] += edge2 * np1
if a2 not in count:
count[a2]=0
count[a2] += np1*np2
for a in result:
if v <= a:
APAPA_emb.append(np.concatenate(([v, a], (result[a]/count[a]+APA_e[a]+APA_e[v])/5
,[APAPA_ps[v,a]],[count[a]])))
# f.write('{} {} '.format(v, a))
# f.write(" ".join(map(str, result[a].numpy())))
# f.write('\n')
APAPA_emb = np.asarray(APAPA_emb)
m = np.max(APAPA_emb[:, -1])
APAPA_emb[:, -1] /= m
print("compute edge embeddings {} complete".format(APAPA_file))
#APCPA
ind1 = APCi
ind2 = CPAi
APCPA_emb = []
for v in ind1:
result = {}
count = {}
if len(ind1[v]) == 0:
continue
for a1 in ind1[v].keys():
np1 = len(ind1[v][a1])
edge1 = [APCPA_e[p] for p in ind1[v][a1]]
edge1 = np.sum(np.vstack(edge1), axis=0) # edge1: the emd between v and a1
for a2 in ind2[a1].keys():
np2 = len(ind2[a1][a2])
edge2 = [APCPA_e[p] for p in ind2[a1][a2]]
edge2 = np.sum(np.vstack(edge2), axis=0) # edge2: the emd between a1 and a2
if a2 not in result:
result[a2] = APCPA_e[a1] * (np2 * np1)
else:
result[a2] += APCPA_e[a1] * (np2 * np1)
if a2 not in count:
count[a2]=0
result[a2] += edge1 * np2
result[a2] += edge2 * np1
count[a2] += np1*np2
for a in result:
if v <= a:
if APCPA_ps[v,a]==0: print(v,a)
APCPA_emb.append(np.concatenate(([v, a], (result[a]/count[a]+APCPA_e[a]+APCPA_e[v])/5,
[APCPA_ps[v,a]],
[count[a]])))
# f.write('{} {} '.format(v,a))
# f.write(" ".join(map(str, result[a].numpy())))
# f.write('\n')
APCPA_emb = np.asarray(APCPA_emb)
m = np.max(APCPA_emb[:, -1])
APCPA_emb[:, -1] /= m
print("compute edge embeddings {} complete".format(APCPA_file))
emb_len=APA_emb.shape[1]-2
np.savez("{}edge{}.npz".format(path, emb_len),
APA=APA_emb, APAPA=APAPA_emb, APCPA=APCPA_emb)
print('dump npz file {}edge{}.npz complete'.format(path, emb_len))
pass
def pathsim(A):
value = []
x,y = A.nonzero()
for i,j in zip(x,y):
value.append(2 * A[i, j] / (A[i, i] + A[j, j]))
return sp.coo_matrix((value,(x,y)))
def gen_homoadj():
path = "data/dblp2/"
PA_file = "PA"
PC_file = "PC"
PT_file = "PT"
PA = np.genfromtxt("{}{}.txt".format(path, PA_file),
dtype=np.int32)
PC = np.genfromtxt("{}{}.txt".format(path, PC_file),
dtype=np.int32)
PT = np.genfromtxt("{}{}.txt".format(path, PT_file),
dtype=np.int32)
PA[:, 0] -= 1
PA[:, 1] -= 1
PC[:, 0] -= 1
PC[:, 1] -= 1
PT[:, 0] -= 1
PT[:, 1] -= 1
paper_max = max(PA[:, 0]) + 1
author_max = max(PA[:, 1]) + 1
conf_max = max(PC[:, 1]) + 1
term_max = max(PT[:, 1]) + 1
PA = sp.coo_matrix((np.ones(PA.shape[0]), (PA[:, 0], PA[:, 1])),
shape=(paper_max, author_max),
dtype=np.float32)
PC = sp.coo_matrix((np.ones(PC.shape[0]), (PC[:, 0], PC[:, 1])),
shape=(paper_max, conf_max),
dtype=np.float32)
#PT = sp.coo_matrix((np.ones(PT.shape[0]), (PT[:, 0], PT[:, 1])),
# shape=(paper_max, term_max),
# dtype=np.int32)
APA = PA.transpose()*PA
APAPA = APA*APA
APCPA = PA.transpose()*PC * PC.transpose() * PA
APA = pathsim(APA)
APAPA = pathsim(APAPA)
APCPA = pathsim(APCPA)
sp.save_npz("{}{}".format(path, 'APA_ps.npz'), APA)
sp.save_npz("{}{}".format(path, 'APAPA_ps.npz'), APAPA)
sp.save_npz("{}{}".format(path, 'APCPA_ps.npz'), APCPA)
#APA = np.hstack([APA.nonzero()[0].reshape(-1,1), APA.nonzero()[1].reshape(-1,1)])
#APAPA = np.hstack([APAPA.nonzero()[0].reshape(-1,1), APAPA.nonzero()[1].reshape(-1,1)])
#APCPA = np.hstack([APCPA.nonzero()[0].reshape(-1,1), APCPA.nonzero()[1].reshape(-1,1)])
#np.savetxt("{}{}.txt".format(path, 'APA'),APA,fmt='%u')
#np.savetxt("{}{}.txt".format(path, 'APAPA'),APA,fmt='%u')
#np.savetxt("{}{}.txt".format(path, 'APCPA'),APA,fmt='%u')
def gen_walk(path='data/dblp2/'):
APA_file = "APA"
APAPA_file = "APAPA"
APCPA_file = "APCPA"
PA_file = "PA"
PC_file = "PC"
PA = np.genfromtxt("{}{}.txt".format(path, PA_file),
dtype=np.int32)
PC = np.genfromtxt("{}{}.txt".format(path, PC_file),
dtype=np.int32)
PA[:, 0] -= 1
PA[:, 1] -= 1
PC[:, 0] -= 1
PC[:, 1] -= 1
paper_max = max(PA[:, 0]) + 1
author_max = max(PA[:, 1]) + 1
conf_max = max(PC[:, 1]) + 1
PA[:, 0] += author_max
PC[:, 0] += author_max
PC[:, 1] += author_max+paper_max
PAi={}
APi={}
PCi={}
CPi={}
for i in range(PA.shape[0]):
p=PA[i,0]
a=PA[i,1]
if p not in PAi:
PAi[p]=set()
if a not in APi:
APi[a]=set()
PAi[p].add(a)
APi[a].add(p)
for i in range(PC.shape[0]):
p=PC[i,0]
c=PC[i,1]
if p not in PCi:
PCi[p]=set()
if c not in CPi:
CPi[c]=set()
PCi[p].add(c)
CPi[c].add(p)
APAi={}
APCi={}
CPAi={}
for v in APi:
for p in APi[v]:
if p not in PAi:
continue
for a in PAi[p]:
if a not in APAi:
APAi[a] ={}
if v not in APAi:
APAi[v] ={}
if v not in APAi[a]:
APAi[a][v]=set()
if a not in APAi[v]:
APAi[v][a]=set()
APAi[a][v].add(p)
APAi[v][a].add(p)
for v in APi:
for p in APi[v]:
if p not in PCi:
continue
for c in PCi[p]:
if v not in APCi:
APCi[v] ={}
if c not in CPAi:
CPAi[c] ={}
if c not in APCi[v]:
APCi[v][c]=set()
if v not in CPAi[c]:
CPAi[c][v]=set()
CPAi[c][v].add(p)
APCi[v][c].add(p)
#(1) number of walks per node w: 1000; TOO many
#(2) walk length l: 100;
#(3) vector dimension d: 128 (LINE: 128 for each order);
#(4) neighborhood size k: 7; --default is 5
#(5) size of negative samples: 5
#mapping of notation: a:author v:paper i:conference
l = 100
w = 1000
import random
#gen random walk for meta-path APCPA
with open("{}{}.walk".format(path,APCPA_file),mode='w') as f:
for _ in range(w):
for a in APi:
#print(a)
result="a{}".format(a)
for _ in range(int(l/4)):
p = random.sample(APi[a],1)[0]
c = random.sample(PCi[p],1)[0]
result+=" v{} i{}".format(p,c)
p = random.sample(CPi[c],1)[0]
while p not in PAi:
p = random.sample(CPi[c],1)[0]
a = random.sample(PAi[p],1)[0]
result+=" v{} a{}".format(p,a)
f.write(result+"\n")
#gen random walk for meta-path APA
with open("{}{}.walk".format(path,APA_file),mode='w') as f:
for _ in range(w):
for a in APi:
result="a{}".format(a)
for _ in range(int(l/2)):
p = random.sample(APi[a],1)[0]
a = random.sample(PAi[p],1)[0]
result+=" v{} a{}".format(p,a)
f.write(result+"\n")
##gen random walk for meta-path APAPA
#with open("{}{}.walk".format(path,APAPA_file),mode='w') as f:
# for _ in range(w):
# for a in APi:
# result="a{}".format(a)
# for _ in range(int(l/2)):
# p = random.sample(APi[a],1)[0]
# a = random.sample(PAi[p],1)[0]
# result+=" v{} a{}".format(p,a)
# f.write(result+"\n")
pass
#clean_dblp()
#gen_homograph()
dump_edge_emb(emb_len=8)
#gen_homoadj()
#gen_walk()
| [
"numpy.sum",
"random.sample",
"numpy.asarray",
"numpy.ones",
"scipy.sparse.coo_matrix",
"numpy.max",
"numpy.vstack",
"numpy.concatenate"
] | [((2188, 2220), 'numpy.concatenate', 'np.concatenate', (['(PA, PC)'], {'axis': '(0)'}), '((PA, PC), axis=0)\n', (2202, 2220), True, 'import numpy as np\n'), ((6997, 7016), 'numpy.asarray', 'np.asarray', (['APA_emb'], {}), '(APA_emb)\n', (7007, 7016), True, 'import numpy as np\n'), ((8385, 8406), 'numpy.asarray', 'np.asarray', (['APAPA_emb'], {}), '(APAPA_emb)\n', (8395, 8406), True, 'import numpy as np\n'), ((8415, 8439), 'numpy.max', 'np.max', (['APAPA_emb[:, -1]'], {}), '(APAPA_emb[:, -1])\n', (8421, 8439), True, 'import numpy as np\n'), ((10003, 10024), 'numpy.asarray', 'np.asarray', (['APCPA_emb'], {}), '(APCPA_emb)\n', (10013, 10024), True, 'import numpy as np\n'), ((10033, 10057), 'numpy.max', 'np.max', (['APCPA_emb[:, -1]'], {}), '(APCPA_emb[:, -1])\n', (10039, 10057), True, 'import numpy as np\n'), ((10520, 10550), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['(value, (x, y))'], {}), '((value, (x, y)))\n', (10533, 10550), True, 'import scipy.sparse as sp\n'), ((11198, 11218), 'numpy.ones', 'np.ones', (['PA.shape[0]'], {}), '(PA.shape[0])\n', (11205, 11218), True, 'import numpy as np\n'), ((11362, 11382), 'numpy.ones', 'np.ones', (['PC.shape[0]'], {}), '(PC.shape[0])\n', (11369, 11382), True, 'import numpy as np\n'), ((6761, 6780), 'numpy.sum', 'np.sum', (['tmp'], {'axis': '(0)'}), '(tmp, axis=0)\n', (6767, 6780), True, 'import numpy as np\n'), ((7354, 7370), 'numpy.vstack', 'np.vstack', (['edge1'], {}), '(edge1)\n', (7363, 7370), True, 'import numpy as np\n'), ((8856, 8872), 'numpy.vstack', 'np.vstack', (['edge1'], {}), '(edge1)\n', (8865, 8872), True, 'import numpy as np\n'), ((7583, 7599), 'numpy.vstack', 'np.vstack', (['edge2'], {}), '(edge2)\n', (7592, 7599), True, 'import numpy as np\n'), ((8090, 8199), 'numpy.concatenate', 'np.concatenate', (['([v, a], (result[a] / count[a] + APA_e[a] + APA_e[v]) / 5, [APAPA_ps[v, a]],\n [count[a]])'], {}), '(([v, a], (result[a] / count[a] + APA_e[a] + APA_e[v]) / 5, [\n APAPA_ps[v, a]], [count[a]]))\n', (8104, 8199), True, 'import numpy as np\n'), ((9087, 9103), 'numpy.vstack', 'np.vstack', (['edge2'], {}), '(edge2)\n', (9096, 9103), True, 'import numpy as np\n'), ((9655, 9768), 'numpy.concatenate', 'np.concatenate', (['([v, a], (result[a] / count[a] + APCPA_e[a] + APCPA_e[v]) / 5, [APCPA_ps[v,\n a]], [count[a]])'], {}), '(([v, a], (result[a] / count[a] + APCPA_e[a] + APCPA_e[v]) / \n 5, [APCPA_ps[v, a]], [count[a]]))\n', (9669, 9768), True, 'import numpy as np\n'), ((15182, 15206), 'random.sample', 'random.sample', (['APi[a]', '(1)'], {}), '(APi[a], 1)\n', (15195, 15206), False, 'import random\n'), ((15233, 15257), 'random.sample', 'random.sample', (['PCi[p]', '(1)'], {}), '(PCi[p], 1)\n', (15246, 15257), False, 'import random\n'), ((15335, 15359), 'random.sample', 'random.sample', (['CPi[c]', '(1)'], {}), '(CPi[c], 1)\n', (15348, 15359), False, 'import random\n'), ((15481, 15505), 'random.sample', 'random.sample', (['PAi[p]', '(1)'], {}), '(PAi[p], 1)\n', (15494, 15505), False, 'import random\n'), ((15858, 15882), 'random.sample', 'random.sample', (['APi[a]', '(1)'], {}), '(APi[a], 1)\n', (15871, 15882), False, 'import random\n'), ((15909, 15933), 'random.sample', 'random.sample', (['PAi[p]', '(1)'], {}), '(PAi[p], 1)\n', (15922, 15933), False, 'import random\n'), ((15430, 15454), 'random.sample', 'random.sample', (['CPi[c]', '(1)'], {}), '(CPi[c], 1)\n', (15443, 15454), False, 'import random\n')] |
import sys
sys.path.insert(0, '../../../src/')
import random
import numpy as np
import json
import os
import time
from datetime import datetime
from util import *
from nnett import *
from lp import *
def ssc_pair(nnet, I, J, K, test_data, di):
index=-1
tot=len(test_data[0].eval())
ordering=list(range(tot))
np.random.shuffle(ordering)
cex=False
while index<tot-1:
index+=1
X=test_data[0][ordering[index]].eval()
label=test_data[1][ordering[index]].eval()
label_, act=nnet.eval(list(X))
times=[]
start=time.time()
feasible, new_x, d, s1, s2=rp_ssc(I, J, K, nnet, X, act)
end=time.time()
times.append(end-start)
if feasible:
label__, act=nnet.eval(list(new_x))
if label==label_ or label==label__:
if label_!=label__:
cex=True
for i in range(0, 99):
start=time.time()
feasible, new_x, d, s1, s2=rp_ssc(I, J, K, nnet, X, act)
end=time.time()
times.append(end-start)
tot_time=0
for t in times:
tot_time+=t
tot_time=1.0*tot_time/len(times)
f=open(di+'results.txt'.format(label), "a")
#s='index: {0}\n'.format(index)
s='#vars: {0}, #constraints: {1}, #time: {2}\n'.format(s1, s2, tot_time)
f.write(s)
f.close()
return True, index, cex, d, label, label_, label__
if index>=40: break ##
return False, index, cex, -1, -1, -1, -1
def main():
di='../../random-nn/'
training_data, validation_data, test_data = mnist_load_data_shared(filename="../../data/mnist.pkl.gz")
nnindex=-1
with open(di+'README.txt') as f:
lines = f.readlines()
for line in lines:
nnindex+=1
if nnindex<7: continue
fname=line.split()[0]
with open(di+'w_'+fname, "r") as infile:
weights=json.load(infile)
with open(di+'b_'+fname, "r") as infile:
biases=json.load(infile)
nnet=NNett(weights, biases)
N=len(nnet.weights)
s='Neural net tested: {0}\n'.format(fname)
f=open('./results.txt', "a")
f.write(s)
f.close()
ncex=0
covered=0
not_covered=0
i_begin=1
j_begin=0
k_begin=0
flag=False
for I in range(i_begin, N-1): ## iterate each hidden layer
M=len(nnet.weights[I-1][0])
f=open('./results.txt', "a")
s='L{0}-{1}: '.format(I, I+1)
f.write(s)
for J in range(j_begin, M):
for K in range(k_begin, len(nnet.weights[I][0])):
flag=True
found, tested, cex, d, label, label_, label__=ssc_pair(nnet, I, J, K, test_data, './')
if found: covered+=1
else:
not_covered+=1
flag=False
if cex: ncex+=1
#s='I-J-K: {0}-{1}-{2}, '.format(I, J, K)
#s+='{0}, tested images: {1}, ncex={2}, covered={3}, not_covered={4}, d={5}, {6}:{7}-{8}\n'.format(found, tested, ncex, covered, not_covered, d, label, label_, label__)
#f=open(outs+'results.txt', "a")
#f.write(s)
#f.close()
if flag: break
k_begin=0
if flag: break
j_begin=0
#f=open(di+'results.txt', "a")
#s='{0}: mcdc-coverage: {1}, CEX={2}, covered={3}, not-covered={4}\n'.format(fname, 1.0*covered/(covered+not_covered), ncex, covered, not_covered)
#f.write(s)
if __name__=="__main__":
main()
| [
"json.load",
"sys.path.insert",
"numpy.random.shuffle",
"time.time"
] | [((12, 47), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../../../src/"""'], {}), "(0, '../../../src/')\n", (27, 47), False, 'import sys\n'), ((322, 349), 'numpy.random.shuffle', 'np.random.shuffle', (['ordering'], {}), '(ordering)\n', (339, 349), True, 'import numpy as np\n'), ((552, 563), 'time.time', 'time.time', ([], {}), '()\n', (561, 563), False, 'import time\n'), ((633, 644), 'time.time', 'time.time', ([], {}), '()\n', (642, 644), False, 'import time\n'), ((1839, 1856), 'json.load', 'json.load', (['infile'], {}), '(infile)\n', (1848, 1856), False, 'import json\n'), ((1919, 1936), 'json.load', 'json.load', (['infile'], {}), '(infile)\n', (1928, 1936), False, 'import json\n'), ((871, 882), 'time.time', 'time.time', ([], {}), '()\n', (880, 882), False, 'import time\n'), ((964, 975), 'time.time', 'time.time', ([], {}), '()\n', (973, 975), False, 'import time\n')] |
import calendar
from datetime import date
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def transform_data(data):
time_data = data[month][::-1].stack().reset_index().iloc[:, 2]
dt = []
for i, _ in enumerate(time_data):
dt.append(date(2007 + i // 12, i % 12 + 1, 1))
new_data = pd.DataFrame({'date': dt, 'data': time_data})
new_data.index = pd.PeriodIndex(dt, freq='M')
new_data.index = new_data.index.to_timestamp()
return new_data
def plot_data(data, name):
ax = data['data'].plot(figsize=(13, 8),
title=f'Пассажиропоток за 2007 - 2020 годы по месяцам {name}')
ax.set_xlabel("Время")
ax.set_ylabel("Количество пассажиров в месяц")
plt.grid()
plt.show()
def plot_intensity(data, name, year):
n_year = 366 if calendar.isleap(year) else 365
m_year = np.array([calendar.monthrange(year, i)[1] for i in range(1, 13)])
m_year_t = m_year.cumsum()
x = np.sort(list(np.arange(0, n_year, 1)) + list(m_year_t))
global flag
flag = True
def f_cond(x):
global flag
cond = [False] * 12
m = np.where(x <= m_year_t)[0][0]
if flag and any(x == m_year_t):
flag = False
elif not flag and any(x == m_year_t):
m += 1
flag = True
cond[m] = True
return 0.8 * data[cond] / (2 * m_year[m])
plt.figure(figsize=(13, 8))
plt.plot(x, [f_cond(xi) for xi in x])
plt.title(f'Интенсивность входящего потока в узел 1 сети, {name}, {year} год')
plt.xlabel('t, дни')
plt.ylabel('$\lambda_1$, пасс./день')
plt.grid()
plt.show()
if __name__ == '__main__':
# link = 'http://www.favt.ru/opendata/7714549744-statperevaeroportpas/'
path = 'data-20210201-structure-20181102.csv'
df = pd.read_csv(path, encoding='windows-1251', sep=';')
df.replace('***', 0, inplace=True)
month = ['Январь', 'Февраль', 'Март', 'Апрель', 'Май', 'Июнь', 'Июль', 'Август',
'Сентябрь', 'Октябрь', 'Ноябрь', 'Декабрь']
for m in month + ['Январь - Декабрь']:
df[m] = df[m].apply(lambda x: float(str(x).replace(' ', '')))
year = 2019
name_svo = 'Москва(Шереметьево)'
svo = df[df['Наименование аэропорта РФ'] == name_svo]
df_svo = transform_data(svo)
plot_data(df_svo, name_svo)
plot_intensity(svo.iloc[1, :][month], name_svo, year)
name_spb = 'Санкт-Петербург(Пулково)'
spb = df[df['Наименование аэропорта РФ'] == name_spb]
df_spb = transform_data(spb)
plot_data(df_spb, name_spb)
plot_intensity(spb.iloc[1, :][month], name_spb, year)
| [
"pandas.DataFrame",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"pandas.read_csv",
"datetime.date",
"matplotlib.pyplot.figure",
"numpy.where",
"calendar.isleap",
"numpy.arange",
"calendar.monthrange",
"matplotlib.pyplot.ylabel",
"pandas.PeriodIndex",
"matplotlib.pyplot.xlabel",
"m... | [((344, 389), 'pandas.DataFrame', 'pd.DataFrame', (["{'date': dt, 'data': time_data}"], {}), "({'date': dt, 'data': time_data})\n", (356, 389), True, 'import pandas as pd\n'), ((412, 440), 'pandas.PeriodIndex', 'pd.PeriodIndex', (['dt'], {'freq': '"""M"""'}), "(dt, freq='M')\n", (426, 440), True, 'import pandas as pd\n'), ((767, 777), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (775, 777), True, 'import matplotlib.pyplot as plt\n'), ((783, 793), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (791, 793), True, 'import matplotlib.pyplot as plt\n'), ((1456, 1483), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(13, 8)'}), '(figsize=(13, 8))\n', (1466, 1483), True, 'import matplotlib.pyplot as plt\n'), ((1532, 1610), 'matplotlib.pyplot.title', 'plt.title', (['f"""Интенсивность входящего потока в узел 1 сети, {name}, {year} год"""'], {}), "(f'Интенсивность входящего потока в узел 1 сети, {name}, {year} год')\n", (1541, 1610), True, 'import matplotlib.pyplot as plt\n'), ((1616, 1636), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t, дни"""'], {}), "('t, дни')\n", (1626, 1636), True, 'import matplotlib.pyplot as plt\n'), ((1642, 1680), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\lambda_1$, пасс./день"""'], {}), "('$\\\\lambda_1$, пасс./день')\n", (1652, 1680), True, 'import matplotlib.pyplot as plt\n'), ((1685, 1695), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1693, 1695), True, 'import matplotlib.pyplot as plt\n'), ((1701, 1711), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1709, 1711), True, 'import matplotlib.pyplot as plt\n'), ((1882, 1933), 'pandas.read_csv', 'pd.read_csv', (['path'], {'encoding': '"""windows-1251"""', 'sep': '""";"""'}), "(path, encoding='windows-1251', sep=';')\n", (1893, 1933), True, 'import pandas as pd\n'), ((858, 879), 'calendar.isleap', 'calendar.isleap', (['year'], {}), '(year)\n', (873, 879), False, 'import calendar\n'), ((289, 324), 'datetime.date', 'date', (['(2007 + i // 12)', '(i % 12 + 1)', '(1)'], {}), '(2007 + i // 12, i % 12 + 1, 1)\n', (293, 324), False, 'from datetime import date\n'), ((913, 941), 'calendar.monthrange', 'calendar.monthrange', (['year', 'i'], {}), '(year, i)\n', (932, 941), False, 'import calendar\n'), ((1023, 1046), 'numpy.arange', 'np.arange', (['(0)', 'n_year', '(1)'], {}), '(0, n_year, 1)\n', (1032, 1046), True, 'import numpy as np\n'), ((1185, 1208), 'numpy.where', 'np.where', (['(x <= m_year_t)'], {}), '(x <= m_year_t)\n', (1193, 1208), True, 'import numpy as np\n')] |
from __future__ import print_function
try:
import cPickle as thepickle
except ImportError:
import _pickle as thepickle
from keras.callbacks import LambdaCallback
from new_model import create_model, create_model_2d
import keras.backend.tensorflow_backend as ktf
import tensorflow as tf
import os
from keras.models import Model
from keras.layers import Input, Lambda, Dot
from keras import optimizers
import sys
import numpy as np
import argparse
import random
import pickle
import keras.backend as K
#### Stop the model training when 0.002 to get the best result in the paper!!!!
os.environ["CUDA_VISIBLE_DEVICES"] = "1";
parser = argparse.ArgumentParser()
def get_params():
parser.add_argument ('--tor_len', required=False, default=500)
parser.add_argument ('--exit_len', required=False, default=800)
parser.add_argument ('--win_interval', required=False, default=5)
parser.add_argument ('--num_window', required=False, default=11)
parser.add_argument ('--alpha', required=False, default=0.1)
parser.add_argument ('--input', required=False, default='/data/website-fingerprinting/datasets/new_dcf_data/crawle_new_overlap_interval')
parser.add_argument ('--test', required=False, default='/data/seoh/DeepCCA_model/crawle_overlap_new2021_interal')
parser.add_argument ('--model', required=False, default="/data/seoh/DeepCCA_model/crawle_overlap_new2021_")
args = parser.parse_args ()
return args
def get_session(gpu_fraction=0.85):
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction,
allow_growth=True)
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
def load_whole_seq_new(tor_seq,exit_seq,circuit_labels,test_c,train_c,model_gb):
train_window1=[]
train_window2=[]
test_window1=[]
test_window2=[]
window_tor=[]
window_exit=[]
window_tor_size = []
window_exit_size = []
window_tor_ipd = []
window_exit_ipd = []
print("extract both ipd and size features...")
for i in range(len(tor_seq)):
window_tor_size.append([float(pair["size"])/1000.0 for pair in tor_seq[i]])
window_exit_size.append([float(pair["size"]) / 1000.0 for pair in exit_seq[i]])
window_tor_ipd.append ([float(pair["ipd"])* 1000.0 for pair in tor_seq[i]])
window_exit_ipd.append ([float(pair["ipd"])* 1000.0 for pair in exit_seq[i]])
print('window_tor_size', np.array(window_tor_size).shape)
print('window_exit_size', np.array(window_exit_size).shape)
print('window_tor_ipd', np.array(window_tor_ipd).shape)
print('window_exit_ipd', np.array(window_exit_ipd).shape)
window_tor_ipd = np.array(window_tor_ipd)
window_exit_ipd = np.array(window_exit_ipd)
# Change the first idp to 0 across all windows.
new_window_tor_ipd = []
new_window_exit_ipd = []
for trace in window_tor_ipd:
new_trace = [0]+list(trace[1:])
new_window_tor_ipd.append([ipd for ipd in new_trace])
for trace in window_exit_ipd:
new_trace = [0]+list(trace[1:])
new_window_exit_ipd.append([ipd for ipd in new_trace])
window_tor_ipd = new_window_tor_ipd
window_exit_ipd = new_window_exit_ipd
print('window_tor_ipd',window_tor_ipd[10][:10])
print('window_exit_ipd',window_exit_ipd[10][:10])
for i in range(len(window_tor_ipd)):
window_tor.append(np.concatenate((window_tor_ipd[i], window_tor_size[i]), axis=None))
window_exit.append(np.concatenate((window_exit_ipd[i], window_exit_size[i]), axis=None))
window_tor = np.array(window_tor)
window_exit = np.array(window_exit)
print('window_tor', window_tor.shape)
print('window_exit', window_exit.shape)
for w, c in zip (window_tor, circuit_labels):
if c in train_c:
train_window1.append(w)
elif c in test_c:
test_window1.append(w)
for w, c in zip (window_exit, circuit_labels):
if c in train_c:
train_window2.append(w)
elif c in test_c:
test_window2.append(w)
print ('train_window1', np.array(train_window1).shape)
print ('train_window2', np.array(train_window1).shape)
return np.array(train_window1), np.array(train_window2), np.array(test_window1), np.array(test_window2), np.array(test_window1), np.array(test_window2)
if __name__ == '__main__':
args = get_params()
ktf.set_session(get_session())
model_gb = 'cnn1d'
## Params for time-based window
interval = args.win_interval#5
t_flow_size = int(args.tor_len)#500#400#238 # 238#150#184 # 238#264
e_flow_size = int(args.exit_len)#800#330#140
num_windows = int(args.num_window)#11#21#5
window_index_list = np.arange(num_windows)
pad_t = t_flow_size * 2
pad_e = e_flow_size * 2
alpha_value = float(args.alpha)#0.1
train_windows1 = []
valid_windows1 = []
test_windows1 = []
train_windows2 = []
valid_windows2 = []
test_windows2 = []
train_labels = []
test_labels = []
valid_labels = []
for window_index in window_index_list:
addn = 2
pickle_path = args.input+str(interval)+'_win'+ str(window_index) +'_addn'+ str(addn) +'_w_superpkt.pickle'
with open (pickle_path, 'rb') as handle:
traces = pickle.load (handle)
tor_seq = traces["tor"]
exit_seq = traces["exit"]
labels = traces["label"]
circuit_labels = np.array ([int (labels[i].split ('_')[0]) for i in range (len (labels))])
print (tor_seq[0])
circuit = {}
for i in range(len(labels)):
if labels[i].split ('_')[0] not in circuit.keys ():
circuit[labels[i].split ('_')[0]] = 1
else:
circuit[labels[i].split ('_')[0]] += 1
# No overlapping circuits between training and testing sets
global test_c
global train_c
if window_index == 0:
test_c = []
train_c = []
sum_ins = 2093
keys = list (circuit.keys ())
random.shuffle (keys)
for key in keys:
if sum_ins > 0:
sum_ins -= circuit[key]
test_c.append (key)
else:
train_c.append (key)
test_c = np.array (test_c).astype ('int')
train_c = np.array (train_c).astype ('int')
# print (train_c)
print ('test_c', test_c)
print ('train_c', train_c)
###########
train_set_x1, train_set_x2, test_set_x1, test_set_x2, valid_set_x1, valid_set_x2 = load_whole_seq_new(tor_seq,exit_seq,circuit_labels,test_c,train_c,model_gb)
temp_test1 = []
temp_test2 = []
print(train_set_x1.shape)
print(valid_set_x1.shape)
print(test_set_x1.shape)
print('train_set_x1', train_set_x1.shape)
for x in train_set_x1:
train_windows1.append(np.reshape(np.pad(x[:pad_t], (0, pad_t - len(x[:pad_t])), 'constant'), [-1, 1]))
for x in valid_set_x1:
valid_windows1.append(np.reshape(np.pad(x[:pad_t], (0, pad_t - len(x[:pad_t])), 'constant'), [-1, 1]))
for x in test_set_x1:
temp_test1.append(np.reshape(np.pad(x[:pad_t], (0, pad_t - len(x[:pad_t])), 'constant'), [-1, 1]))
for x in train_set_x2:
train_windows2.append(np.reshape(np.pad(x[:pad_e], (0, pad_e - len(x[:pad_e])), 'constant'), [-1, 1]))
for x in valid_set_x2:
valid_windows2.append(np.reshape(np.pad(x[:pad_e], (0, pad_e - len(x[:pad_e])), 'constant'), [-1, 1]))
for x in test_set_x2:
temp_test2.append(np.reshape(np.pad(x[:pad_e], (0, pad_e - len(x[:pad_e])), 'constant'), [-1, 1]))
print('temp_test1: ', np.array(temp_test1).shape)
print('temp_test2: ', np.array(temp_test2).shape)
test_windows1.append(np.array(temp_test1))
test_windows2.append(np.array(temp_test2))
np.savez_compressed(args.test+str(interval)+'_test' + str(num_windows) + 'addn'+str(addn)+'_w_superpkt.npz',
tor=np.array(test_windows1),
exit=np.array(test_windows2))
train_windows1 = np.array(train_windows1)
valid_windows1 = np.array(valid_windows1)
train_windows2 = np.array(train_windows2)
valid_windows2 = np.array(valid_windows2)
train_labels = np.array(train_labels)
test_labels = np.array(test_labels)
valid_labels = np.array(valid_labels)
print('train_windows1: ', np.array(train_windows1).shape)
print('train_windows2: ', np.array(train_windows2).shape)
print('test_windows1: ', np.array(test_windows1).shape)
print('test_windows2: ', np.array(test_windows2).shape)
input_shape1 = (pad_t, 1)
input_shape2 = (pad_e, 1)
shared_model1 = create_model(input_shape=input_shape1, emb_size=64, model_name='tor') ##
shared_model2 = create_model(input_shape=input_shape2, emb_size=64, model_name='exit') ##
anchor = Input(input_shape1, name='anchor')
positive = Input(input_shape2, name='positive')
negative = Input(input_shape2, name='negative')
a = shared_model1(anchor)
p = shared_model2(positive)
n = shared_model2(negative)
print('a shape', a.shape)
print('p shape', p.shape)
print('n shape', n.shape)
pos_sim = Dot(axes=-1, normalize=True)([a, p])
neg_sim = Dot(axes=-1, normalize=True)([a, n])
print('pos_sim shape', pos_sim.shape)
print('neg_sim shape', neg_sim.shape)
# customized loss
def cosine_triplet_loss(X):
_alpha = alpha_value
positive_sim, negative_sim = X
losses = K.maximum(0.0, negative_sim - positive_sim + _alpha)
# if similarity is based on the distance functions, use below
# losses = K.maximum(0.0, positive_sim - negative_sim + _alpha)
return K.mean(losses)
loss = Lambda(cosine_triplet_loss, output_shape=(1,))([pos_sim, neg_sim])
model_triplet = Model(
inputs=[anchor, positive, negative],
outputs=loss)
print(model_triplet.summary())
opt = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
def identity_loss(y_true, y_pred):
return K.mean(y_pred - 0 * y_true)
model_triplet.compile(loss=identity_loss, optimizer=opt)
batch_size = 128 # batch_size_value
def intersect(a, b):
return list(set(a) & set(b))
def build_similarities(conv1, conv2, tor_t, exit_t):
tor_embs = conv1.predict(tor_t)
exit_embs = conv2.predict(exit_t)
all_embs = np.concatenate((tor_embs, exit_embs), axis=0)
all_embs = all_embs / np.linalg.norm(all_embs, axis=-1, keepdims=True)
mid = int(len(all_embs) / 2)
all_sims = np.dot(all_embs[:mid], all_embs[mid:].T)
return all_sims
def build_negatives(anc_idxs, pos_idxs, similarities, neg_imgs_idx, num_retries=50):
# If no similarities were computed, return a random negative
if similarities is None:
# print(neg_imgs_idx)
# print(anc_idxs)
anc_idxs = list(anc_idxs)
valid_neg_pool = neg_imgs_idx # .difference(anc_idxs)
print('valid_neg_pool', valid_neg_pool.shape)
return np.random.choice(valid_neg_pool, len(anc_idxs), replace=False)
final_neg = []
# for each positive pair
for (anc_idx, pos_idx) in zip(anc_idxs, pos_idxs):
anchor_class = anc_idx
# print('anchor_class',anchor_class)
valid_neg_pool = neg_imgs_idx # .difference(np.array([anchor_class]))
# positive similarity
sim = similarities[anc_idx, pos_idx]
# find all negatives which are semi(hard)
possible_ids = np.where((similarities[anc_idx] + alpha_value) > sim)[0]
possible_ids = intersect(valid_neg_pool, possible_ids)
appended = False
for iteration in range(num_retries):
if len(possible_ids) == 0:
break
idx_neg = np.random.choice(possible_ids, 1)[0]
if idx_neg != anchor_class:
final_neg.append(idx_neg)
appended = True
break
if not appended:
final_neg.append(np.random.choice(valid_neg_pool, 1)[0])
return final_neg
class SemiHardTripletGenerator():
def __init__(self, Xa_train, Xp_train, batch_size, neg_traces_train_idx, Xa_train_all, Xp_train_all, conv1,
conv2):
self.batch_size = batch_size # 128
self.Xa = Xa_train
self.Xp = Xp_train
self.Xa_all = Xa_train_all
self.Xp_all = Xp_train_all
self.Xp = Xp_train
self.cur_train_index = 0
self.num_samples = Xa_train.shape[0]
self.neg_traces_idx = neg_traces_train_idx
if conv1:
self.similarities = build_similarities(conv1, conv2, self.Xa_all,
self.Xp_all) # compute all similarities including cross pairs
else:
self.similarities = None
def next_train(self):
while 1:
self.cur_train_index += self.batch_size
if self.cur_train_index >= self.num_samples:
self.cur_train_index = 0 # initialize the index for the next epoch
# fill one batch
traces_a = np.array(range(self.cur_train_index,
self.cur_train_index + self.batch_size))
traces_p = np.array(range(self.cur_train_index,
self.cur_train_index + self.batch_size))
traces_n = build_negatives(traces_a, traces_p, self.similarities, self.neg_traces_idx)
yield ([self.Xa[traces_a],
self.Xp[traces_p],
self.Xp_all[traces_n]],
np.zeros(shape=(traces_a.shape[0]))
)
# At first epoch we don't generate hard triplets
all_traces_train_idx = np.array(range(len(train_windows1)))
gen_hard = SemiHardTripletGenerator(train_windows1, train_windows2, batch_size, all_traces_train_idx,
train_windows1, train_windows2, None, None)
nb_epochs = 10000
description = 'coffeh2'
best_loss = sys.float_info.max
def saveModel(epoch, logs):
global best_loss
loss = logs['loss']
if loss < best_loss:
print("loss is improved from {} to {}. save the model".format(str(best_loss),
str(loss)))
best_loss = loss
shared_model1.save_weights(
args.model + str(num_windows) + "_interval"+str(interval)+ '_addn'+str(addn)+"_model1_w_superpkt.h5")
shared_model2.save_weights(
args.model + str(num_windows) + "_interval"+str(interval)+'_addn'+str(addn)+"_model2_w_superpkt.h5")
else:
print("loss is not improved from {}.".format(str(best_loss)))
for epoch in range(nb_epochs):
print("built new hard generator for epoch " + str(epoch))
if epoch % 2 == 0:
if epoch == 0:
model_triplet.fit_generator(generator=gen_hard.next_train(),
steps_per_epoch=train_windows1.shape[0] // batch_size - 1,
epochs=1, verbose=1)
else:
model_triplet.fit_generator(generator=gen_hard_even.next_train(),
steps_per_epoch=(train_windows1.shape[0] // 2) // batch_size - 1,
epochs=1, verbose=1, callbacks=[LambdaCallback(on_epoch_end=saveModel)])
else:
model_triplet.fit_generator(generator=gen_hard_odd.next_train(),
steps_per_epoch=(train_windows1.shape[0] // 2) // batch_size - 1,
epochs=1, verbose=1, callbacks=[LambdaCallback(on_epoch_end=saveModel)])
mid = int(len(train_windows1) / 2)
random_ind = np.array(range(len(train_windows1)))
np.random.shuffle(random_ind)
X1 = np.array(random_ind[:mid])
X2 = np.array(random_ind[mid:])
gen_hard_odd = SemiHardTripletGenerator(train_windows1[X1], train_windows2[X1], batch_size, X2, train_windows1,
train_windows2,
shared_model1, shared_model2)
gen_hard_even = SemiHardTripletGenerator(train_windows1[X2], train_windows2[X2], batch_size,
X1, train_windows1, train_windows2,
shared_model1, shared_model2)
| [
"argparse.ArgumentParser",
"random.shuffle",
"keras.models.Model",
"keras.callbacks.LambdaCallback",
"tensorflow.ConfigProto",
"pickle.load",
"numpy.arange",
"numpy.linalg.norm",
"keras.layers.Input",
"tensorflow.GPUOptions",
"keras.optimizers.SGD",
"numpy.random.choice",
"new_model.create_m... | [((642, 667), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (665, 667), False, 'import argparse\n'), ((1501, 1579), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': 'gpu_fraction', 'allow_growth': '(True)'}), '(per_process_gpu_memory_fraction=gpu_fraction, allow_growth=True)\n', (1514, 1579), True, 'import tensorflow as tf\n'), ((2681, 2705), 'numpy.array', 'np.array', (['window_tor_ipd'], {}), '(window_tor_ipd)\n', (2689, 2705), True, 'import numpy as np\n'), ((2728, 2753), 'numpy.array', 'np.array', (['window_exit_ipd'], {}), '(window_exit_ipd)\n', (2736, 2753), True, 'import numpy as np\n'), ((3576, 3596), 'numpy.array', 'np.array', (['window_tor'], {}), '(window_tor)\n', (3584, 3596), True, 'import numpy as np\n'), ((3615, 3636), 'numpy.array', 'np.array', (['window_exit'], {}), '(window_exit)\n', (3623, 3636), True, 'import numpy as np\n'), ((4724, 4746), 'numpy.arange', 'np.arange', (['num_windows'], {}), '(num_windows)\n', (4733, 4746), True, 'import numpy as np\n'), ((8330, 8354), 'numpy.array', 'np.array', (['train_windows1'], {}), '(train_windows1)\n', (8338, 8354), True, 'import numpy as np\n'), ((8376, 8400), 'numpy.array', 'np.array', (['valid_windows1'], {}), '(valid_windows1)\n', (8384, 8400), True, 'import numpy as np\n'), ((8423, 8447), 'numpy.array', 'np.array', (['train_windows2'], {}), '(train_windows2)\n', (8431, 8447), True, 'import numpy as np\n'), ((8469, 8493), 'numpy.array', 'np.array', (['valid_windows2'], {}), '(valid_windows2)\n', (8477, 8493), True, 'import numpy as np\n'), ((8514, 8536), 'numpy.array', 'np.array', (['train_labels'], {}), '(train_labels)\n', (8522, 8536), True, 'import numpy as np\n'), ((8555, 8576), 'numpy.array', 'np.array', (['test_labels'], {}), '(test_labels)\n', (8563, 8576), True, 'import numpy as np\n'), ((8596, 8618), 'numpy.array', 'np.array', (['valid_labels'], {}), '(valid_labels)\n', (8604, 8618), True, 'import numpy as np\n'), ((8946, 9015), 'new_model.create_model', 'create_model', ([], {'input_shape': 'input_shape1', 'emb_size': '(64)', 'model_name': '"""tor"""'}), "(input_shape=input_shape1, emb_size=64, model_name='tor')\n", (8958, 9015), False, 'from new_model import create_model, create_model_2d\n'), ((9040, 9110), 'new_model.create_model', 'create_model', ([], {'input_shape': 'input_shape2', 'emb_size': '(64)', 'model_name': '"""exit"""'}), "(input_shape=input_shape2, emb_size=64, model_name='exit')\n", (9052, 9110), False, 'from new_model import create_model, create_model_2d\n'), ((9129, 9163), 'keras.layers.Input', 'Input', (['input_shape1'], {'name': '"""anchor"""'}), "(input_shape1, name='anchor')\n", (9134, 9163), False, 'from keras.layers import Input, Lambda, Dot\n'), ((9179, 9215), 'keras.layers.Input', 'Input', (['input_shape2'], {'name': '"""positive"""'}), "(input_shape2, name='positive')\n", (9184, 9215), False, 'from keras.layers import Input, Lambda, Dot\n'), ((9231, 9267), 'keras.layers.Input', 'Input', (['input_shape2'], {'name': '"""negative"""'}), "(input_shape2, name='negative')\n", (9236, 9267), False, 'from keras.layers import Input, Lambda, Dot\n'), ((10108, 10164), 'keras.models.Model', 'Model', ([], {'inputs': '[anchor, positive, negative]', 'outputs': 'loss'}), '(inputs=[anchor, positive, negative], outputs=loss)\n', (10113, 10164), False, 'from keras.models import Model\n'), ((10228, 10294), 'keras.optimizers.SGD', 'optimizers.SGD', ([], {'lr': '(0.001)', 'decay': '(1e-06)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.001, decay=1e-06, momentum=0.9, nesterov=True)\n', (10242, 10294), False, 'from keras import optimizers\n'), ((4201, 4224), 'numpy.array', 'np.array', (['train_window1'], {}), '(train_window1)\n', (4209, 4224), True, 'import numpy as np\n'), ((4226, 4249), 'numpy.array', 'np.array', (['train_window2'], {}), '(train_window2)\n', (4234, 4249), True, 'import numpy as np\n'), ((4251, 4273), 'numpy.array', 'np.array', (['test_window1'], {}), '(test_window1)\n', (4259, 4273), True, 'import numpy as np\n'), ((4275, 4297), 'numpy.array', 'np.array', (['test_window2'], {}), '(test_window2)\n', (4283, 4297), True, 'import numpy as np\n'), ((4299, 4321), 'numpy.array', 'np.array', (['test_window1'], {}), '(test_window1)\n', (4307, 4321), True, 'import numpy as np\n'), ((4323, 4345), 'numpy.array', 'np.array', (['test_window2'], {}), '(test_window2)\n', (4331, 4345), True, 'import numpy as np\n'), ((9468, 9496), 'keras.layers.Dot', 'Dot', ([], {'axes': '(-1)', 'normalize': '(True)'}), '(axes=-1, normalize=True)\n', (9471, 9496), False, 'from keras.layers import Input, Lambda, Dot\n'), ((9519, 9547), 'keras.layers.Dot', 'Dot', ([], {'axes': '(-1)', 'normalize': '(True)'}), '(axes=-1, normalize=True)\n', (9522, 9547), False, 'from keras.layers import Input, Lambda, Dot\n'), ((9782, 9834), 'keras.backend.maximum', 'K.maximum', (['(0.0)', '(negative_sim - positive_sim + _alpha)'], {}), '(0.0, negative_sim - positive_sim + _alpha)\n', (9791, 9834), True, 'import keras.backend as K\n'), ((9992, 10006), 'keras.backend.mean', 'K.mean', (['losses'], {}), '(losses)\n', (9998, 10006), True, 'import keras.backend as K\n'), ((10020, 10066), 'keras.layers.Lambda', 'Lambda', (['cosine_triplet_loss'], {'output_shape': '(1,)'}), '(cosine_triplet_loss, output_shape=(1,))\n', (10026, 10066), False, 'from keras.layers import Input, Lambda, Dot\n'), ((10350, 10377), 'keras.backend.mean', 'K.mean', (['(y_pred - 0 * y_true)'], {}), '(y_pred - 0 * y_true)\n', (10356, 10377), True, 'import keras.backend as K\n'), ((10708, 10753), 'numpy.concatenate', 'np.concatenate', (['(tor_embs, exit_embs)'], {'axis': '(0)'}), '((tor_embs, exit_embs), axis=0)\n', (10722, 10753), True, 'import numpy as np\n'), ((10889, 10929), 'numpy.dot', 'np.dot', (['all_embs[:mid]', 'all_embs[mid:].T'], {}), '(all_embs[:mid], all_embs[mid:].T)\n', (10895, 10929), True, 'import numpy as np\n'), ((16522, 16551), 'numpy.random.shuffle', 'np.random.shuffle', (['random_ind'], {}), '(random_ind)\n', (16539, 16551), True, 'import numpy as np\n'), ((16565, 16591), 'numpy.array', 'np.array', (['random_ind[:mid]'], {}), '(random_ind[:mid])\n', (16573, 16591), True, 'import numpy as np\n'), ((16605, 16631), 'numpy.array', 'np.array', (['random_ind[mid:]'], {}), '(random_ind[mid:])\n', (16613, 16631), True, 'import numpy as np\n'), ((1641, 1680), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (1655, 1680), True, 'import tensorflow as tf\n'), ((2441, 2466), 'numpy.array', 'np.array', (['window_tor_size'], {}), '(window_tor_size)\n', (2449, 2466), True, 'import numpy as np\n'), ((2504, 2530), 'numpy.array', 'np.array', (['window_exit_size'], {}), '(window_exit_size)\n', (2512, 2530), True, 'import numpy as np\n'), ((2566, 2590), 'numpy.array', 'np.array', (['window_tor_ipd'], {}), '(window_tor_ipd)\n', (2574, 2590), True, 'import numpy as np\n'), ((2627, 2652), 'numpy.array', 'np.array', (['window_exit_ipd'], {}), '(window_exit_ipd)\n', (2635, 2652), True, 'import numpy as np\n'), ((3393, 3459), 'numpy.concatenate', 'np.concatenate', (['(window_tor_ipd[i], window_tor_size[i])'], {'axis': 'None'}), '((window_tor_ipd[i], window_tor_size[i]), axis=None)\n', (3407, 3459), True, 'import numpy as np\n'), ((3488, 3556), 'numpy.concatenate', 'np.concatenate', (['(window_exit_ipd[i], window_exit_size[i])'], {'axis': 'None'}), '((window_exit_ipd[i], window_exit_size[i]), axis=None)\n', (3502, 3556), True, 'import numpy as np\n'), ((4099, 4122), 'numpy.array', 'np.array', (['train_window1'], {}), '(train_window1)\n', (4107, 4122), True, 'import numpy as np\n'), ((4158, 4181), 'numpy.array', 'np.array', (['train_window1'], {}), '(train_window1)\n', (4166, 4181), True, 'import numpy as np\n'), ((5301, 5320), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (5312, 5320), False, 'import pickle\n'), ((8036, 8056), 'numpy.array', 'np.array', (['temp_test1'], {}), '(temp_test1)\n', (8044, 8056), True, 'import numpy as np\n'), ((8087, 8107), 'numpy.array', 'np.array', (['temp_test2'], {}), '(temp_test2)\n', (8095, 8107), True, 'import numpy as np\n'), ((8240, 8263), 'numpy.array', 'np.array', (['test_windows1'], {}), '(test_windows1)\n', (8248, 8263), True, 'import numpy as np\n'), ((8283, 8306), 'numpy.array', 'np.array', (['test_windows2'], {}), '(test_windows2)\n', (8291, 8306), True, 'import numpy as np\n'), ((8650, 8674), 'numpy.array', 'np.array', (['train_windows1'], {}), '(train_windows1)\n', (8658, 8674), True, 'import numpy as np\n'), ((8712, 8736), 'numpy.array', 'np.array', (['train_windows2'], {}), '(train_windows2)\n', (8720, 8736), True, 'import numpy as np\n'), ((8773, 8796), 'numpy.array', 'np.array', (['test_windows1'], {}), '(test_windows1)\n', (8781, 8796), True, 'import numpy as np\n'), ((8833, 8856), 'numpy.array', 'np.array', (['test_windows2'], {}), '(test_windows2)\n', (8841, 8856), True, 'import numpy as np\n'), ((10784, 10832), 'numpy.linalg.norm', 'np.linalg.norm', (['all_embs'], {'axis': '(-1)', 'keepdims': '(True)'}), '(all_embs, axis=-1, keepdims=True)\n', (10798, 10832), True, 'import numpy as np\n'), ((6152, 6172), 'random.shuffle', 'random.shuffle', (['keys'], {}), '(keys)\n', (6166, 6172), False, 'import random\n'), ((7921, 7941), 'numpy.array', 'np.array', (['temp_test1'], {}), '(temp_test1)\n', (7929, 7941), True, 'import numpy as np\n'), ((7979, 7999), 'numpy.array', 'np.array', (['temp_test2'], {}), '(temp_test2)\n', (7987, 7999), True, 'import numpy as np\n'), ((11902, 11953), 'numpy.where', 'np.where', (['(similarities[anc_idx] + alpha_value > sim)'], {}), '(similarities[anc_idx] + alpha_value > sim)\n', (11910, 11953), True, 'import numpy as np\n'), ((12199, 12232), 'numpy.random.choice', 'np.random.choice', (['possible_ids', '(1)'], {}), '(possible_ids, 1)\n', (12215, 12232), True, 'import numpy as np\n'), ((6431, 6447), 'numpy.array', 'np.array', (['test_c'], {}), '(test_c)\n', (6439, 6447), True, 'import numpy as np\n'), ((6490, 6507), 'numpy.array', 'np.array', (['train_c'], {}), '(train_c)\n', (6498, 6507), True, 'import numpy as np\n'), ((12450, 12485), 'numpy.random.choice', 'np.random.choice', (['valid_neg_pool', '(1)'], {}), '(valid_neg_pool, 1)\n', (12466, 12485), True, 'import numpy as np\n'), ((14189, 14222), 'numpy.zeros', 'np.zeros', ([], {'shape': 'traces_a.shape[0]'}), '(shape=traces_a.shape[0])\n', (14197, 14222), True, 'import numpy as np\n'), ((16371, 16409), 'keras.callbacks.LambdaCallback', 'LambdaCallback', ([], {'on_epoch_end': 'saveModel'}), '(on_epoch_end=saveModel)\n', (16385, 16409), False, 'from keras.callbacks import LambdaCallback\n'), ((16061, 16099), 'keras.callbacks.LambdaCallback', 'LambdaCallback', ([], {'on_epoch_end': 'saveModel'}), '(on_epoch_end=saveModel)\n', (16075, 16099), False, 'from keras.callbacks import LambdaCallback\n')] |
'''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''
from __future__ import print_function
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="1"
import argparse
import keras
from keras.models import Model
from keras.layers import Dense, Dropout, Concatenate, Average
from keras import backend as K
from titer import *
import numpy as np
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
from importlib import import_module
import matplotlib.pyplot as plt
plt.switch_backend('agg')
config =tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction=0.2
set_session(tf.Session(config=config))
batch_size = 256
parser=argparse.ArgumentParser()
parser.add_argument("--rat",type=float,default=0.6,help='ratio for weak qg batch')
parser.add_argument("--end",type=float,default=1.,help='end ratio')
parser.add_argument("--epoch",type=int,default=10,help='epoch')
parser.add_argument("--net1",type=str,default="ten100grucnn",help='rch')
parser.add_argument("--net2",type=str,default="ten100grucnn",help='rch')
parser.add_argument("--rc",type=str,default='rc',help='rnn or cnn')
parser.add_argument("--pt",type=int,default=100,help='pt range pt~pt*1.1')
args=parser.parse_args()
# input image dimensions
img_rows, img_cols = 33, 33
# the data, shuffled and split between train and test sets
#(x_train, y_train), (x_test, y_test) = mnist.load_data()
#if K.image_data_format() == 'channels_first':
# x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
# x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
# input_shape = (1, img_rows, img_cols)
#else:
#x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
#x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape1= (9,33,33)
input_shape2= (20,10)
#model.compile(loss=keras.losses.categorical_crossentropy,
# optimizer=keras.optimizers.Adadelta(),
# metrics=['accuracy'])
#model=keras.models.load_model('save/fullydijetsame_10')
savename="save/ten"+str(args.pt)+str(args.net1)
epoch=eval(open(savename+"/history").readline())+1
model1=keras.models.load_model(savename+"/check_"+str(epoch))
for i in range(len(model1.layers)):
model1.layers[i].name+="_1"
savename="save/ten"+str(args.pt)+str(args.net2)
epoch=eval(open(savename+"/history").readline())+1
model2=keras.models.load_model(savename+"/check_"+str(epoch))
for i in range(len(model2.layers)):
model2.layers[i].name+="_2"
out=Average()([model1.outputs[0],model2.outputs[0]])
model=Model(inputs=[model1.input,model2.input],outputs=out,name='ensemble')
rc=""
for sha in model._feed_inputs:
if(len(sha._keras_shape)==4):
rc+="c"
if(len(sha._keras_shape)==3):
rc+="r"
tdata="sdata/dijet_{0}_{1}/dijet_{0}_{1}_test.root".format(args.pt,int(args.pt*1.1))
test=wkiter([tdata,tdata],batch_size=batch_size,end=args.end*1.,rc=rc)
gen=test.next()
from sklearn.metrics import roc_auc_score, auc,precision_recall_curve,roc_curve,average_precision_score
x=[]
y=[]
g=[]
q=[]
entries=300
batch_num=batch_size
print ("test",test.totalnum())
toten=test.totalnum()
for j in range(entries):
a,c=next(gen)
if(j>=toten):
break
b=model.predict(a,verbose=0)[:,0]
x=np.append(x,np.array(c[:,0]))
y=np.append(y,b)
for i in range(batch_num):
if(c[i][0]==1):
g.append(b[i])
else:
q.append(b[i])
plt.figure(1)
plt.hist(q,bins=50,weights=np.ones_like(q),histtype='step',alpha=0.7,label='quark')
plt.hist(g,bins=50,weights=np.ones_like(g),histtype='step',alpha=0.7,label='gluon')
plt.legend(loc="upper center")
savename="ensemble/"+args.net1+args.net2+str(args.pt)
plt.savefig(savename+"out.png")
f=open(savename+"out.dat",'w')
f.write(str(q)+"\n")
f.write(str(g))
f.close()
t_fpr,t_tpr,_=roc_curve(x,y)
t_fnr=1-t_fpr
test_auc=np.around(auc(t_fpr,t_tpr),4)
plt.figure(2)
plt.plot(t_tpr,t_fnr,alpha=0.5,label="AUC={}".format(test_auc),lw=2)
plt.legend(loc='lower left')
os.system("rm "+savename+"roc*.png")
plt.savefig(savename+"roc"+str(test_auc)+".png")
f=open(savename+"roc.dat",'w')
f.write(str(t_tpr.tolist())+"\n")
f.write(str(t_fnr.tolist()))
f.close()
#print(b,c)
| [
"matplotlib.pyplot.switch_backend",
"numpy.ones_like",
"argparse.ArgumentParser",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.legend",
"tensorflow.Session",
"os.system",
"keras.models.Model",
"tensorflow.ConfigProto",
"matplotlib.pyplot.figure",
"numpy.append",
"keras.layers.Average",
"s... | [((668, 693), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (686, 693), True, 'import matplotlib.pyplot as plt\n'), ((702, 718), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (716, 718), True, 'import tensorflow as tf\n'), ((839, 864), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (862, 864), False, 'import argparse\n'), ((2706, 2778), 'keras.models.Model', 'Model', ([], {'inputs': '[model1.input, model2.input]', 'outputs': 'out', 'name': '"""ensemble"""'}), "(inputs=[model1.input, model2.input], outputs=out, name='ensemble')\n", (2711, 2778), False, 'from keras.models import Model\n'), ((3542, 3555), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (3552, 3555), True, 'import matplotlib.pyplot as plt\n'), ((3724, 3754), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper center"""'}), "(loc='upper center')\n", (3734, 3754), True, 'import matplotlib.pyplot as plt\n'), ((3809, 3842), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(savename + 'out.png')"], {}), "(savename + 'out.png')\n", (3820, 3842), True, 'import matplotlib.pyplot as plt\n'), ((3933, 3948), 'sklearn.metrics.roc_curve', 'roc_curve', (['x', 'y'], {}), '(x, y)\n', (3942, 3948), False, 'from sklearn.metrics import roc_auc_score, auc, precision_recall_curve, roc_curve, average_precision_score\n'), ((4001, 4014), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (4011, 4014), True, 'import matplotlib.pyplot as plt\n'), ((4084, 4112), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower left"""'}), "(loc='lower left')\n", (4094, 4112), True, 'import matplotlib.pyplot as plt\n'), ((4113, 4153), 'os.system', 'os.system', (["('rm ' + savename + 'roc*.png')"], {}), "('rm ' + savename + 'roc*.png')\n", (4122, 4153), False, 'import os\n'), ((786, 811), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (796, 811), True, 'import tensorflow as tf\n'), ((2651, 2660), 'keras.layers.Average', 'Average', ([], {}), '()\n', (2658, 2660), False, 'from keras.layers import Dense, Dropout, Concatenate, Average\n'), ((3426, 3441), 'numpy.append', 'np.append', (['y', 'b'], {}), '(y, b)\n', (3435, 3441), True, 'import numpy as np\n'), ((3981, 3998), 'sklearn.metrics.auc', 'auc', (['t_fpr', 't_tpr'], {}), '(t_fpr, t_tpr)\n', (3984, 3998), False, 'from sklearn.metrics import roc_auc_score, auc, precision_recall_curve, roc_curve, average_precision_score\n'), ((3404, 3421), 'numpy.array', 'np.array', (['c[:, 0]'], {}), '(c[:, 0])\n', (3412, 3421), True, 'import numpy as np\n'), ((3583, 3598), 'numpy.ones_like', 'np.ones_like', (['q'], {}), '(q)\n', (3595, 3598), True, 'import numpy as np\n'), ((3667, 3682), 'numpy.ones_like', 'np.ones_like', (['g'], {}), '(g)\n', (3679, 3682), True, 'import numpy as np\n')] |
# Import python libraries
import numpy as np
import cv2
# set to 1 for pipeline images
debug = 0
class Segmentation(object):
# Segmentation class to detect objects
def __init__(self):
self.fgbg = cv2.createBackgroundSubtractorMOG2()
def detect(self, frame):
'''Detect objects in video frame using following pipeline
- Convert captured frame from BGR to GRAY
- Perform Background Subtraction
- Detect edges using Canny Edge Detection
http://docs.opencv.org/trunk/da/d22/tutorial_py_canny.html
- Retain only edges within the threshold
- Dilation for objects
- Find contours
- Find centroids for each valid contours
Args:
frame: single image frame
Return:
centers: vector of object centroids in the source frame
'''
# Convert BGR to GRAY
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Perform Background Subtraction
fgmask = self.fgbg.apply(gray)
#cv2.imshow('bgsub', fgmask)
# Detect edges
edges = cv2.Canny(fgmask, 50, 190, 3)
# Retain only edges within the threshold
ret, thresh = cv2.threshold(edges, 127, 255, 0)
cv2.imshow('thresh',thresh)
# Dilate objects
kernel = np.ones((5, 5), 'uint8')
dilated = cv2.dilate(thresh, kernel, iterations=2)
cv2.imshow('dilated',dilated)
# Find contours
contours, hierarchy = cv2.findContours(dilated,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
centers = [] # vector of object centroids in a frame
# setting threshold size for cells
blob_radius_thresh = 15
# Find centroid for each valid contours
for cnt in contours:
try:
# Calculate and draw circle
(x, y), radius = cv2.minEnclosingCircle(cnt)
centeroid = (int(x), int(y))
radius = int(radius)
if (radius > blob_radius_thresh):
cv2.circle(frame, centeroid, radius, (0, 255, 0), 2)
b = np.array([[x], [y]])
centers.append(np.round(b))
except ZeroDivisionError:
pass
# Show contours of tracking objects
cv2.imshow('Track Cells', frame)
return centers
| [
"cv2.createBackgroundSubtractorMOG2",
"cv2.Canny",
"cv2.minEnclosingCircle",
"cv2.circle",
"cv2.dilate",
"cv2.cvtColor",
"cv2.threshold",
"numpy.ones",
"numpy.array",
"cv2.imshow",
"numpy.round",
"cv2.findContours"
] | [((215, 251), 'cv2.createBackgroundSubtractorMOG2', 'cv2.createBackgroundSubtractorMOG2', ([], {}), '()\n', (249, 251), False, 'import cv2\n'), ((937, 976), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (949, 976), False, 'import cv2\n'), ((1136, 1165), 'cv2.Canny', 'cv2.Canny', (['fgmask', '(50)', '(190)', '(3)'], {}), '(fgmask, 50, 190, 3)\n', (1145, 1165), False, 'import cv2\n'), ((1238, 1271), 'cv2.threshold', 'cv2.threshold', (['edges', '(127)', '(255)', '(0)'], {}), '(edges, 127, 255, 0)\n', (1251, 1271), False, 'import cv2\n'), ((1280, 1308), 'cv2.imshow', 'cv2.imshow', (['"""thresh"""', 'thresh'], {}), "('thresh', thresh)\n", (1290, 1308), False, 'import cv2\n'), ((1359, 1383), 'numpy.ones', 'np.ones', (['(5, 5)', '"""uint8"""'], {}), "((5, 5), 'uint8')\n", (1366, 1383), True, 'import numpy as np\n'), ((1402, 1442), 'cv2.dilate', 'cv2.dilate', (['thresh', 'kernel'], {'iterations': '(2)'}), '(thresh, kernel, iterations=2)\n', (1412, 1442), False, 'import cv2\n'), ((1451, 1481), 'cv2.imshow', 'cv2.imshow', (['"""dilated"""', 'dilated'], {}), "('dilated', dilated)\n", (1461, 1481), False, 'import cv2\n'), ((1536, 1605), 'cv2.findContours', 'cv2.findContours', (['dilated', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (1552, 1605), False, 'import cv2\n'), ((2454, 2486), 'cv2.imshow', 'cv2.imshow', (['"""Track Cells"""', 'frame'], {}), "('Track Cells', frame)\n", (2464, 2486), False, 'import cv2\n'), ((2016, 2043), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['cnt'], {}), '(cnt)\n', (2038, 2043), False, 'import cv2\n'), ((2196, 2248), 'cv2.circle', 'cv2.circle', (['frame', 'centeroid', 'radius', '(0, 255, 0)', '(2)'], {}), '(frame, centeroid, radius, (0, 255, 0), 2)\n', (2206, 2248), False, 'import cv2\n'), ((2273, 2293), 'numpy.array', 'np.array', (['[[x], [y]]'], {}), '([[x], [y]])\n', (2281, 2293), True, 'import numpy as np\n'), ((2329, 2340), 'numpy.round', 'np.round', (['b'], {}), '(b)\n', (2337, 2340), True, 'import numpy as np\n')] |
import argparse
import os
import numpy as np
import math
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch
parser = argparse.ArgumentParser()
parser.add_argument("--n_epochs", type=int, default=200, help="number of epochs of training")
parser.add_argument("--batch_size", type=int, default=64, help="size of the batches")
parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of first order momentum of gradient")
parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation")
parser.add_argument("--latent_dim", type=int, default=100, help="dimensionality of the latent space")
parser.add_argument("--n_classes", type=int, default=10, help="number of classes for dataset")
parser.add_argument("--img_size", type=int, default=32, help="size of each image dimension")
parser.add_argument("--channels", type=int, default=3, help="number of image channels")
parser.add_argument("--sample_interval", type=int, default=400, help="interval between image sampling")
parser.add_argument("--version", type=str, default="acgan2")
opt = parser.parse_args()
print(opt)
cuda = True if torch.cuda.is_available() else False
os.makedirs("images/%s" % opt.version, exist_ok=True)
os.makedirs("saved_models/%s" % opt.version, exist_ok=True)
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find("BatchNorm2d") != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.label_emb = nn.Embedding(opt.n_classes, opt.n_classes)
self.fc1 = nn.Linear(opt.latent_dim + opt.n_classes, 384)
self.tconv2 = nn.Sequential(
nn.ConvTranspose2d(384, 192, 4, 1, 0, bias=False),
nn.BatchNorm2d(192),
nn.ReLU(True),
)
self.tconv3 = nn.Sequential(
nn.ConvTranspose2d(192, 96, 4, 2, 1, bias=False),
nn.BatchNorm2d(96),
nn.ReLU(True),
)
self.tconv4 = nn.Sequential(
nn.ConvTranspose2d(96, 48, 4, 2, 1, bias=False),
nn.BatchNorm2d(48),
nn.ReLU(True),
)
self.tconv5 = nn.Sequential(
nn.ConvTranspose2d(48, 3, 4, 2, 1, bias=False),
nn.Tanh(),
)
def forward(self, noise, labels):
gen_input = torch.cat((self.label_emb(labels), noise), -1)
fc1 = self.fc1(gen_input).view(-1, 384, 1, 1)
tconv2 = self.tconv2(fc1)
tconv3 = self.tconv3(tconv2)
tconv4 = self.tconv4(tconv3)
tconv5 = self.tconv5(tconv4)
output = tconv5
return output
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
# Convolution 1
self.conv1 = nn.Sequential(
nn.Conv2d(3, 16, 3, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Dropout(0.5, inplace=False),
)
# Convolution 2
self.conv2 = nn.Sequential(
nn.Conv2d(16, 32, 3, 1, 1, bias=False),
nn.BatchNorm2d(32),
nn.LeakyReLU(0.2, inplace=True),
nn.Dropout(0.5, inplace=False),
)
# Convolution 3
self.conv3 = nn.Sequential(
nn.Conv2d(32, 64, 3, 2, 1, bias=False),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.2, inplace=True),
nn.Dropout(0.5, inplace=False),
)
# Convolution 4
self.conv4 = nn.Sequential(
nn.Conv2d(64, 128, 3, 1, 1, bias=False),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
nn.Dropout(0.5, inplace=False),
)
# Convolution 5
self.conv5 = nn.Sequential(
nn.Conv2d(128, 256, 3, 2, 1, bias=False),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, inplace=True),
nn.Dropout(0.5, inplace=False),
)
# Convolution 6
self.conv6 = nn.Sequential(
nn.Conv2d(256, 512, 3, 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
nn.Dropout(0.5, inplace=False),
)
# discriminator fc
self.fc_dis = nn.Linear(4*4*512, 1)
# aux-classifier fc
self.fc_aux = nn.Linear(4*4*512, num_classes)
# softmax and sigmoid
self.softmax = nn.Softmax()
self.sigmoid = nn.Sigmoid()
def forward(self, img):
conv1 = self.conv1(input)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
conv5 = self.conv5(conv4)
conv6 = self.conv6(conv5)
flat6 = conv6.view(-1, 4*4*512)
fc_dis = self.fc_dis(flat6)
fc_aux = self.fc_aux(flat6)
label = self.softmax(fc_aux)
validity = self.sigmoid(fc_dis)
return validity, label
# Loss functions
adversarial_loss = torch.nn.BCELoss()
auxiliary_loss = torch.nn.CrossEntropyLoss()
# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()
if cuda:
generator.cuda()
discriminator.cuda()
adversarial_loss.cuda()
auxiliary_loss.cuda()
# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
dataloader = torch.utils.data.DataLoader(
datasets.CIFAR10(
"/home/danny/work/ICCV_2019/Datasets/CIFAR10",
train=True,
download=False,
transform=transforms.Compose(
[transforms.Resize(opt.img_size), transforms.ToTensor(), transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [0.5, 0.5, 0.5])]
),
),
batch_size=opt.batch_size,
shuffle=True,
)
# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor
def sample_image(n_row, batches_done):
"""Saves a grid of generated digits ranging from 0 to n_classes"""
# Sample noise
z = Variable(FloatTensor(np.random.normal(0, 1, (n_row ** 2, opt.latent_dim))))
# Get labels ranging from 0 to n_classes for n rows
labels = np.array([num for _ in range(n_row) for num in range(n_row)])
labels = Variable(LongTensor(labels))
gen_imgs = generator(z, labels)
save_image(gen_imgs.data, "images/%s/%d.png" % (opt.version, batches_done), nrow=n_row, normalize=True)
# ----------
# Training
# ----------
for epoch in range(opt.n_epochs):
for i, (imgs, labels) in enumerate(dataloader):
batch_size = imgs.shape[0]
# Adversarial ground truths
valid = Variable(FloatTensor(batch_size, 1).fill_(1.0), requires_grad=False)
fake = Variable(FloatTensor(batch_size, 1).fill_(0.0), requires_grad=False)
# Configure input
real_imgs = Variable(imgs.type(FloatTensor))
labels = Variable(labels.type(LongTensor))
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# Sample noise and labels as generator input
z = Variable(FloatTensor(np.random.normal(0, 1, (batch_size, opt.latent_dim))))
gen_labels = Variable(LongTensor(np.random.randint(0, opt.n_classes, batch_size)))
# Generate a batch of images
gen_imgs = generator(z, gen_labels)
# Loss measures generator's ability to fool the discriminator
validity, pred_label = discriminator(gen_imgs)
g_loss = 0.5 * (adversarial_loss(validity, valid) + auxiliary_loss(pred_label, gen_labels))
g_loss.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# Loss for real images
real_pred, real_aux = discriminator(real_imgs)
d_real_loss = (adversarial_loss(real_pred, valid) + auxiliary_loss(real_aux, labels)) / 2
# Loss for fake images
fake_pred, fake_aux = discriminator(gen_imgs.detach())
d_fake_loss = (adversarial_loss(fake_pred, fake) + auxiliary_loss(fake_aux, gen_labels)) / 2
# Total discriminator loss
d_loss = (d_real_loss + d_fake_loss) / 2
# Calculate discriminator accuracy
pred = np.concatenate([real_aux.data.cpu().numpy(), fake_aux.data.cpu().numpy()], axis=0)
gt = np.concatenate([labels.data.cpu().numpy(), gen_labels.data.cpu().numpy()], axis=0)
d_acc = np.mean(np.argmax(pred, axis=1) == gt)
d_loss.backward()
optimizer_D.step()
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %d%%] [G loss: %f]"
% (epoch, opt.n_epochs, i, len(dataloader), d_loss.item(), 100 * d_acc, g_loss.item())
)
batches_done = epoch * len(dataloader) + i
if batches_done % opt.sample_interval == 0:
sample_image(n_row=10, batches_done=batches_done)
torch.save(generator.state_dict(), "saved_models/%s/%d_G.pth" % (opt.version, epoch))
torch.save(discriminator.state_dict(), "saved_models/%s/%d_D.pth" % (opt.version, epoch))
| [
"torch.nn.Dropout",
"argparse.ArgumentParser",
"numpy.argmax",
"torch.nn.Embedding",
"torch.nn.init.constant_",
"torch.nn.Softmax",
"numpy.random.randint",
"numpy.random.normal",
"torchvision.transforms.Normalize",
"torch.nn.BCELoss",
"torch.nn.Linear",
"torch.nn.Tanh",
"torch.nn.Conv2d",
... | [((331, 356), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (354, 356), False, 'import argparse\n'), ((1588, 1641), 'os.makedirs', 'os.makedirs', (["('images/%s' % opt.version)"], {'exist_ok': '(True)'}), "('images/%s' % opt.version, exist_ok=True)\n", (1599, 1641), False, 'import os\n'), ((1642, 1701), 'os.makedirs', 'os.makedirs', (["('saved_models/%s' % opt.version)"], {'exist_ok': '(True)'}), "('saved_models/%s' % opt.version, exist_ok=True)\n", (1653, 1701), False, 'import os\n'), ((5527, 5545), 'torch.nn.BCELoss', 'torch.nn.BCELoss', ([], {}), '()\n', (5543, 5545), False, 'import torch\n'), ((5563, 5590), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (5588, 5590), False, 'import torch\n'), ((1550, 1575), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1573, 1575), False, 'import torch\n'), ((7068, 7175), 'torchvision.utils.save_image', 'save_image', (['gen_imgs.data', "('images/%s/%d.png' % (opt.version, batches_done))"], {'nrow': 'n_row', 'normalize': '(True)'}), "(gen_imgs.data, 'images/%s/%d.png' % (opt.version, batches_done),\n nrow=n_row, normalize=True)\n", (7078, 7175), False, 'from torchvision.utils import save_image\n'), ((1814, 1861), 'torch.nn.init.normal_', 'torch.nn.init.normal_', (['m.weight.data', '(0.0)', '(0.02)'], {}), '(m.weight.data, 0.0, 0.02)\n', (1835, 1861), False, 'import torch\n'), ((2136, 2178), 'torch.nn.Embedding', 'nn.Embedding', (['opt.n_classes', 'opt.n_classes'], {}), '(opt.n_classes, opt.n_classes)\n', (2148, 2178), True, 'import torch.nn as nn\n'), ((2199, 2245), 'torch.nn.Linear', 'nn.Linear', (['(opt.latent_dim + opt.n_classes)', '(384)'], {}), '(opt.latent_dim + opt.n_classes, 384)\n', (2208, 2245), True, 'import torch.nn as nn\n'), ((4828, 4853), 'torch.nn.Linear', 'nn.Linear', (['(4 * 4 * 512)', '(1)'], {}), '(4 * 4 * 512, 1)\n', (4837, 4853), True, 'import torch.nn as nn\n'), ((4900, 4935), 'torch.nn.Linear', 'nn.Linear', (['(4 * 4 * 512)', 'num_classes'], {}), '(4 * 4 * 512, num_classes)\n', (4909, 4935), True, 'import torch.nn as nn\n'), ((4985, 4997), 'torch.nn.Softmax', 'nn.Softmax', ([], {}), '()\n', (4995, 4997), True, 'import torch.nn as nn\n'), ((5021, 5033), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (5031, 5033), True, 'import torch.nn as nn\n'), ((1916, 1963), 'torch.nn.init.normal_', 'torch.nn.init.normal_', (['m.weight.data', '(1.0)', '(0.02)'], {}), '(m.weight.data, 1.0, 0.02)\n', (1937, 1963), False, 'import torch\n'), ((1972, 2013), 'torch.nn.init.constant_', 'torch.nn.init.constant_', (['m.bias.data', '(0.0)'], {}), '(m.bias.data, 0.0)\n', (1995, 2013), False, 'import torch\n'), ((2304, 2353), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(384)', '(192)', '(4)', '(1)', '(0)'], {'bias': '(False)'}), '(384, 192, 4, 1, 0, bias=False)\n', (2322, 2353), True, 'import torch.nn as nn\n'), ((2367, 2386), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(192)'], {}), '(192)\n', (2381, 2386), True, 'import torch.nn as nn\n'), ((2400, 2413), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2407, 2413), True, 'import torch.nn as nn\n'), ((2474, 2522), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(192)', '(96)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(192, 96, 4, 2, 1, bias=False)\n', (2492, 2522), True, 'import torch.nn as nn\n'), ((2536, 2554), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(96)'], {}), '(96)\n', (2550, 2554), True, 'import torch.nn as nn\n'), ((2568, 2581), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2575, 2581), True, 'import torch.nn as nn\n'), ((2642, 2689), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(96)', '(48)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(96, 48, 4, 2, 1, bias=False)\n', (2660, 2689), True, 'import torch.nn as nn\n'), ((2703, 2721), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(48)'], {}), '(48)\n', (2717, 2721), True, 'import torch.nn as nn\n'), ((2735, 2748), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2742, 2748), True, 'import torch.nn as nn\n'), ((2809, 2855), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(48)', '(3)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(48, 3, 4, 2, 1, bias=False)\n', (2827, 2855), True, 'import torch.nn as nn\n'), ((2869, 2878), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (2876, 2878), True, 'import torch.nn as nn\n'), ((3418, 3455), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(16)', '(3)', '(2)', '(1)'], {'bias': '(False)'}), '(3, 16, 3, 2, 1, bias=False)\n', (3427, 3455), True, 'import torch.nn as nn\n'), ((3469, 3500), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (3481, 3500), True, 'import torch.nn as nn\n'), ((3514, 3544), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {'inplace': '(False)'}), '(0.5, inplace=False)\n', (3524, 3544), True, 'import torch.nn as nn\n'), ((3628, 3666), 'torch.nn.Conv2d', 'nn.Conv2d', (['(16)', '(32)', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(16, 32, 3, 1, 1, bias=False)\n', (3637, 3666), True, 'import torch.nn as nn\n'), ((3680, 3698), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (3694, 3698), True, 'import torch.nn as nn\n'), ((3712, 3743), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (3724, 3743), True, 'import torch.nn as nn\n'), ((3757, 3787), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {'inplace': '(False)'}), '(0.5, inplace=False)\n', (3767, 3787), True, 'import torch.nn as nn\n'), ((3871, 3909), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64)', '(3)', '(2)', '(1)'], {'bias': '(False)'}), '(32, 64, 3, 2, 1, bias=False)\n', (3880, 3909), True, 'import torch.nn as nn\n'), ((3923, 3941), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (3937, 3941), True, 'import torch.nn as nn\n'), ((3955, 3986), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (3967, 3986), True, 'import torch.nn as nn\n'), ((4000, 4030), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {'inplace': '(False)'}), '(0.5, inplace=False)\n', (4010, 4030), True, 'import torch.nn as nn\n'), ((4114, 4153), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(64, 128, 3, 1, 1, bias=False)\n', (4123, 4153), True, 'import torch.nn as nn\n'), ((4167, 4186), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (4181, 4186), True, 'import torch.nn as nn\n'), ((4200, 4231), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (4212, 4231), True, 'import torch.nn as nn\n'), ((4245, 4275), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {'inplace': '(False)'}), '(0.5, inplace=False)\n', (4255, 4275), True, 'import torch.nn as nn\n'), ((4359, 4399), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)', '(3)', '(2)', '(1)'], {'bias': '(False)'}), '(128, 256, 3, 2, 1, bias=False)\n', (4368, 4399), True, 'import torch.nn as nn\n'), ((4413, 4432), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (4427, 4432), True, 'import torch.nn as nn\n'), ((4446, 4477), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (4458, 4477), True, 'import torch.nn as nn\n'), ((4491, 4521), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {'inplace': '(False)'}), '(0.5, inplace=False)\n', (4501, 4521), True, 'import torch.nn as nn\n'), ((4605, 4645), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(512)', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(256, 512, 3, 1, 1, bias=False)\n', (4614, 4645), True, 'import torch.nn as nn\n'), ((4659, 4678), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {}), '(512)\n', (4673, 4678), True, 'import torch.nn as nn\n'), ((4692, 4723), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (4704, 4723), True, 'import torch.nn as nn\n'), ((4737, 4767), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {'inplace': '(False)'}), '(0.5, inplace=False)\n', (4747, 4767), True, 'import torch.nn as nn\n'), ((6800, 6852), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(n_row ** 2, opt.latent_dim)'], {}), '(0, 1, (n_row ** 2, opt.latent_dim))\n', (6816, 6852), True, 'import numpy as np\n'), ((7877, 7929), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(batch_size, opt.latent_dim)'], {}), '(0, 1, (batch_size, opt.latent_dim))\n', (7893, 7929), True, 'import numpy as np\n'), ((7973, 8020), 'numpy.random.randint', 'np.random.randint', (['(0)', 'opt.n_classes', 'batch_size'], {}), '(0, opt.n_classes, batch_size)\n', (7990, 8020), True, 'import numpy as np\n'), ((9242, 9265), 'numpy.argmax', 'np.argmax', (['pred'], {'axis': '(1)'}), '(pred, axis=1)\n', (9251, 9265), True, 'import numpy as np\n'), ((6114, 6145), 'torchvision.transforms.Resize', 'transforms.Resize', (['opt.img_size'], {}), '(opt.img_size)\n', (6131, 6145), True, 'import torchvision.transforms as transforms\n'), ((6147, 6168), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6166, 6168), True, 'import torchvision.transforms as transforms\n'), ((6170, 6233), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.5, 0.5, 0.5]', 'std': '[0.5, 0.5, 0.5]'}), '(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n', (6190, 6233), True, 'import torchvision.transforms as transforms\n')] |
# Step 4
# Matrix Reordering using GPU
# push high values to the diagonal line
"""
while True:
randomly detect swapable pairs (choices) on GPU
sort choices by the gains, remove the conflicted choices
swap remain pairs
if not many pairs found at this step:
doulbe the search scale up to 8192.
"""
import os, argparse
import numpy as np
from numba import cuda
from numba.cuda.random import create_xoroshiro128p_states, xoroshiro128p_uniform_float32
from tools.core import loss_gpu, swap_inplace
from tools.images import save_pic
import wandb
@cuda.jit
def _detect_gpu(matrix, vec, rng_states):
grid_id = cuda.grid(1)
if grid_id<vec.shape[0]:
l = matrix.shape[0]
x = int(xoroshiro128p_uniform_float32(rng_states, grid_id) * l)
y = int(xoroshiro128p_uniform_float32(rng_states, grid_id) * l)
ret = 0
for m in [x, y]:
m_inv = x + y - m
for n in range(l):
if matrix[m, n] > 0 or matrix[m_inv, n] > 0:
if m != n and m_inv != n:
ret += (abs(m-n)-abs(m_inv-n)) * (matrix[m, n] - matrix[m_inv, n])
if ret>0:
vec[grid_id,0] = x
vec[grid_id,1] = y
vec[grid_id,2] = ret
def detect_and_swap_gpu(matrix, indices, seed, threads_per_block=128, blocks=128):
rng_states = create_xoroshiro128p_states(threads_per_block * blocks, seed=seed)
vec = np.zeros([threads_per_block * blocks, 3]).astype(int)
d_matrix = cuda.to_device(matrix)
d_vec = cuda.to_device(vec)
_detect_gpu[blocks, threads_per_block](d_matrix, d_vec, rng_states)
vec = d_vec.copy_to_host()
vec = vec[~np.all(vec == 0, axis=1)] # select non-zero rows
vec = vec[np.argsort(vec[:, 2])[::-1]] # TODO: greedy?
vec_detected = vec.shape[0]
# remove conflicted rows
visited = {}
selected = []
for i in range(vec.shape[0]):
if vec[i,0] not in visited and vec[i,1] not in visited:
selected.append(i)
visited[vec[i,0]] = 1
visited[vec[i,1]] = 1
vec = vec[selected, :]
for i in range(vec.shape[0]):
swap_inplace(matrix, indices, vec[i,0], vec[i,1])
vec_swapped = vec.shape[0]
return matrix, indices, vec_detected, vec_swapped
def step4(args):
os.makedirs("tmp/minLA_gpu", exist_ok=True)
matrix = np.load("data/matrix.npy")
np.random.seed(args.seed)
indices = np.arange(matrix.shape[0])
# random initial state
np.random.shuffle(indices)
matrix = matrix[indices, :]
matrix = matrix[:, indices]
save_pic(matrix, indices, f"tmp/minLA_gpu/seed_{args.seed}_start")
# record loss for initial state
loss_LA = loss_gpu(matrix)
print(f"After initialization, loss LA = {loss_LA}")
num_blocks = 256
threads_per_block = 128
for step in range(args.num_steps):
matrix, indices, vec_detected, vec_swapped = detect_and_swap_gpu(matrix, indices, threads_per_block=threads_per_block, blocks=num_blocks, seed=step+args.seed)
loss_LA = loss_gpu(matrix)
record= {"step": step, "loss": loss_LA, "detected": vec_detected, "swapped": vec_swapped, "threads_per_block": threads_per_block, "blocks": num_blocks}
wandb.log(record)
print(record, flush=True)
if (step-1)%20==0:
save_pic(matrix, indices, f"tmp/minLA_gpu/seed_{args.seed}_step_{step:04}")
if vec_detected<100: # double the search scale
if num_blocks<8192:
num_blocks *= 2
print("done", flush=True)
if __name__=="__main__":
wandb.init(project="arxiv_4422")
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--num_steps", type=float, default=200, help="")
parser.add_argument("--seed", type=int, default=0, help="random seed")
parser.add_argument("--tag", type=str, default="gpu")
parser.add_argument("--exp_name", type=str)
args = parser.parse_args()
args.num_steps = int(args.num_steps)
wandb.config.update(args)
step4(args) | [
"wandb.log",
"numpy.load",
"numpy.random.seed",
"argparse.ArgumentParser",
"tools.core.swap_inplace",
"numpy.argsort",
"numpy.arange",
"numpy.random.shuffle",
"tools.images.save_pic",
"tools.core.loss_gpu",
"numpy.all",
"os.makedirs",
"numba.cuda.random.create_xoroshiro128p_states",
"wandb... | [((632, 644), 'numba.cuda.grid', 'cuda.grid', (['(1)'], {}), '(1)\n', (641, 644), False, 'from numba import cuda\n'), ((1360, 1426), 'numba.cuda.random.create_xoroshiro128p_states', 'create_xoroshiro128p_states', (['(threads_per_block * blocks)'], {'seed': 'seed'}), '(threads_per_block * blocks, seed=seed)\n', (1387, 1426), False, 'from numba.cuda.random import create_xoroshiro128p_states, xoroshiro128p_uniform_float32\n'), ((1506, 1528), 'numba.cuda.to_device', 'cuda.to_device', (['matrix'], {}), '(matrix)\n', (1520, 1528), False, 'from numba import cuda\n'), ((1541, 1560), 'numba.cuda.to_device', 'cuda.to_device', (['vec'], {}), '(vec)\n', (1555, 1560), False, 'from numba import cuda\n'), ((2307, 2350), 'os.makedirs', 'os.makedirs', (['"""tmp/minLA_gpu"""'], {'exist_ok': '(True)'}), "('tmp/minLA_gpu', exist_ok=True)\n", (2318, 2350), False, 'import os, argparse\n'), ((2365, 2391), 'numpy.load', 'np.load', (['"""data/matrix.npy"""'], {}), "('data/matrix.npy')\n", (2372, 2391), True, 'import numpy as np\n'), ((2396, 2421), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (2410, 2421), True, 'import numpy as np\n'), ((2436, 2462), 'numpy.arange', 'np.arange', (['matrix.shape[0]'], {}), '(matrix.shape[0])\n', (2445, 2462), True, 'import numpy as np\n'), ((2494, 2520), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (2511, 2520), True, 'import numpy as np\n'), ((2589, 2655), 'tools.images.save_pic', 'save_pic', (['matrix', 'indices', 'f"""tmp/minLA_gpu/seed_{args.seed}_start"""'], {}), "(matrix, indices, f'tmp/minLA_gpu/seed_{args.seed}_start')\n", (2597, 2655), False, 'from tools.images import save_pic\n'), ((2706, 2722), 'tools.core.loss_gpu', 'loss_gpu', (['matrix'], {}), '(matrix)\n', (2714, 2722), False, 'from tools.core import loss_gpu, swap_inplace\n'), ((3587, 3619), 'wandb.init', 'wandb.init', ([], {'project': '"""arxiv_4422"""'}), "(project='arxiv_4422')\n", (3597, 3619), False, 'import wandb\n'), ((3634, 3659), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3657, 3659), False, 'import os, argparse\n'), ((3996, 4021), 'wandb.config.update', 'wandb.config.update', (['args'], {}), '(args)\n', (4015, 4021), False, 'import wandb\n'), ((2150, 2201), 'tools.core.swap_inplace', 'swap_inplace', (['matrix', 'indices', 'vec[i, 0]', 'vec[i, 1]'], {}), '(matrix, indices, vec[i, 0], vec[i, 1])\n', (2162, 2201), False, 'from tools.core import loss_gpu, swap_inplace\n'), ((3053, 3069), 'tools.core.loss_gpu', 'loss_gpu', (['matrix'], {}), '(matrix)\n', (3061, 3069), False, 'from tools.core import loss_gpu, swap_inplace\n'), ((3238, 3255), 'wandb.log', 'wandb.log', (['record'], {}), '(record)\n', (3247, 3255), False, 'import wandb\n'), ((1437, 1478), 'numpy.zeros', 'np.zeros', (['[threads_per_block * blocks, 3]'], {}), '([threads_per_block * blocks, 3])\n', (1445, 1478), True, 'import numpy as np\n'), ((1680, 1704), 'numpy.all', 'np.all', (['(vec == 0)'], {'axis': '(1)'}), '(vec == 0, axis=1)\n', (1686, 1704), True, 'import numpy as np\n'), ((1743, 1764), 'numpy.argsort', 'np.argsort', (['vec[:, 2]'], {}), '(vec[:, 2])\n', (1753, 1764), True, 'import numpy as np\n'), ((3329, 3404), 'tools.images.save_pic', 'save_pic', (['matrix', 'indices', 'f"""tmp/minLA_gpu/seed_{args.seed}_step_{step:04}"""'], {}), "(matrix, indices, f'tmp/minLA_gpu/seed_{args.seed}_step_{step:04}')\n", (3337, 3404), False, 'from tools.images import save_pic\n'), ((718, 768), 'numba.cuda.random.xoroshiro128p_uniform_float32', 'xoroshiro128p_uniform_float32', (['rng_states', 'grid_id'], {}), '(rng_states, grid_id)\n', (747, 768), False, 'from numba.cuda.random import create_xoroshiro128p_states, xoroshiro128p_uniform_float32\n'), ((790, 840), 'numba.cuda.random.xoroshiro128p_uniform_float32', 'xoroshiro128p_uniform_float32', (['rng_states', 'grid_id'], {}), '(rng_states, grid_id)\n', (819, 840), False, 'from numba.cuda.random import create_xoroshiro128p_states, xoroshiro128p_uniform_float32\n')] |
# ---------------------------------------------------------------------
# Project "Track 3D-Objects Over Time"
# Copyright (C) 2020, Dr. <NAME> / Dr. <NAME>.
#
# Purpose of this file : Kalman filter class
#
# You should have received a copy of the Udacity license together with this program.
#
# https://www.udacity.com/course/self-driving-car-engineer-nanodegree--nd013
# ----------------------------------------------------------------------
#
# imports
import numpy as np
# add project directory to python path to enable relative imports
import os
import sys
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
import misc.params as params
class Filter:
'''Kalman filter class'''
def __init__(self):
pass
def F(self):
############
# TODO Step 1: implement and return system matrix F
############
dt = params.dt
n = params.dim_state
F = np.identity(params.dim_state).reshape(n,n)
F[0, 3] = dt
F[1, 4] = dt
F[2, 5] = dt
return np.matrix(F)
#return 0
############
# END student code
############
def Q(self):
############
# TODO Step 1: implement and return process noise covariance Q
############
dt = params.dt
Q = np.zeros((params.dim_state, params.dim_state))
np.fill_diagonal(Q, dt* params.q)
return np.matrix(Q)
#return 0
############
# END student code
############
def predict(self, track):
############
# TODO Step 1: predict state x and estimation error covariance P to next timestep, save x and P in track
############
F = self.F()
Q = self.Q()
x = F * track.x
P = F * track.P * F.T + Q
track.set_x(x)
track.set_P(P)
############
# END student code
############
def update(self, track, meas):
############
# TODO Step 1: update state x and covariance P with associated measurement, save x and P in track
############
x = track.x
P = track.P
y = self.gamma(track, meas)
H = meas.sensor.get_H(x)
S = self.S(track, meas, H)
K = P * H.T * S.I
I = np.identity(params.dim_state)
x = x + K*y
P = (I - K * H) * P
track.set_x(x)
track.set_P(P)
############
# END student code
############
track.update_attributes(meas)
def gamma(self, track, meas):
############
# TODO Step 1: calculate and return residual gamma
############
z = meas.z
z_pred = meas.sensor.get_hx(track.x)
y = z - z_pred
return y
#return 0
############
# END student code
############
def S(self, track, meas, H):
############
# TODO Step 1: calculate and return covariance of residual S
############
S = H * track.P * H.T + meas.R
return S
#return 0
############
# END student code
############ | [
"os.path.expanduser",
"numpy.matrix",
"numpy.fill_diagonal",
"os.getcwd",
"numpy.zeros",
"numpy.identity",
"os.path.join"
] | [((723, 763), 'os.path.join', 'os.path.join', (['SCRIPT_DIR', 'PACKAGE_PARENT'], {}), '(SCRIPT_DIR, PACKAGE_PARENT)\n', (735, 763), False, 'import os\n'), ((1187, 1199), 'numpy.matrix', 'np.matrix', (['F'], {}), '(F)\n', (1196, 1199), True, 'import numpy as np\n'), ((1467, 1513), 'numpy.zeros', 'np.zeros', (['(params.dim_state, params.dim_state)'], {}), '((params.dim_state, params.dim_state))\n', (1475, 1513), True, 'import numpy as np\n'), ((1522, 1556), 'numpy.fill_diagonal', 'np.fill_diagonal', (['Q', '(dt * params.q)'], {}), '(Q, dt * params.q)\n', (1538, 1556), True, 'import numpy as np\n'), ((1572, 1584), 'numpy.matrix', 'np.matrix', (['Q'], {}), '(Q)\n', (1581, 1584), True, 'import numpy as np\n'), ((2508, 2537), 'numpy.identity', 'np.identity', (['params.dim_state'], {}), '(params.dim_state)\n', (2519, 2537), True, 'import numpy as np\n'), ((645, 656), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (654, 656), False, 'import os\n'), ((658, 686), 'os.path.expanduser', 'os.path.expanduser', (['__file__'], {}), '(__file__)\n', (676, 686), False, 'import os\n'), ((1062, 1091), 'numpy.identity', 'np.identity', (['params.dim_state'], {}), '(params.dim_state)\n', (1073, 1091), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["BasicSolver"]
import numpy as np
from scipy.linalg import cholesky, cho_solve
class BasicSolver(object):
"""
This is the most basic solver built using :func:`scipy.linalg.cholesky`.
kernel (george.kernels.Kernel): A subclass of :class:`Kernel` specifying
the kernel function.
"""
def __init__(self, kernel):
self.kernel = kernel
self._computed = False
self._log_det = None
@property
def computed(self):
"""
A flag indicating whether or not the covariance matrix was computed
and factorized (using the :func:`compute` method).
"""
return self._computed
@computed.setter
def computed(self, v):
self._computed = v
@property
def log_determinant(self):
"""
The log-determinant of the covariance matrix. This will only be
non-``None`` after calling the :func:`compute` method.
"""
return self._log_det
@log_determinant.setter
def log_determinant(self, v):
self._log_det = v
def compute(self, x, yerr):
"""
Compute and factorize the covariance matrix.
Args:
x (ndarray[nsamples, ndim]): The independent coordinates of the
data points.
yerr (ndarray[nsamples] or float): The Gaussian uncertainties on
the data points at coordinates ``x``. These values will be
added in quadrature to the diagonal of the covariance matrix.
"""
# Compute the kernel matrix.
K = self.kernel.get_value(x)
K[np.diag_indices_from(K)] += yerr ** 2
# Factor the matrix and compute the log-determinant.
self._factor = (cholesky(K, overwrite_a=True, lower=False), False)
self.log_determinant = 2 * np.sum(np.log(np.diag(self._factor[0])))
self.computed = True
def apply_inverse(self, y, in_place=False):
r"""
Apply the inverse of the covariance matrix to the input by solving
.. math::
K\,x = y
Args:
y (ndarray[nsamples] or ndadrray[nsamples, nrhs]): The vector or
matrix :math:`y`.
in_place (Optional[bool]): Should the data in ``y`` be overwritten
with the result :math:`x`? (default: ``False``)
"""
return cho_solve(self._factor, y, overwrite_b=in_place)
def dot_solve(self, y):
r"""
Compute the inner product of a vector with the inverse of the
covariance matrix applied to itself:
.. math::
y\,K^{-1}\,y
Args:
y (ndarray[nsamples]): The vector :math:`y`.
"""
return np.dot(y.T, cho_solve(self._factor, y))
def apply_sqrt(self, r):
"""
Apply the Cholesky square root of the covariance matrix to the input
vector or matrix.
Args:
r (ndarray[nsamples] or ndarray[nsamples, nrhs]: The input vector
or matrix.
"""
return np.dot(r, self._factor[0])
def get_inverse(self):
"""
Get the dense inverse covariance matrix. This is used for computing
gradients, but it is not recommended in general.
"""
return self.apply_inverse(np.eye(len(self._factor[0])), in_place=True)
| [
"numpy.diag_indices_from",
"scipy.linalg.cholesky",
"scipy.linalg.cho_solve",
"numpy.dot",
"numpy.diag"
] | [((2446, 2494), 'scipy.linalg.cho_solve', 'cho_solve', (['self._factor', 'y'], {'overwrite_b': 'in_place'}), '(self._factor, y, overwrite_b=in_place)\n', (2455, 2494), False, 'from scipy.linalg import cholesky, cho_solve\n'), ((3130, 3156), 'numpy.dot', 'np.dot', (['r', 'self._factor[0]'], {}), '(r, self._factor[0])\n', (3136, 3156), True, 'import numpy as np\n'), ((1691, 1714), 'numpy.diag_indices_from', 'np.diag_indices_from', (['K'], {}), '(K)\n', (1711, 1714), True, 'import numpy as np\n'), ((1815, 1857), 'scipy.linalg.cholesky', 'cholesky', (['K'], {'overwrite_a': '(True)', 'lower': '(False)'}), '(K, overwrite_a=True, lower=False)\n', (1823, 1857), False, 'from scipy.linalg import cholesky, cho_solve\n'), ((2809, 2835), 'scipy.linalg.cho_solve', 'cho_solve', (['self._factor', 'y'], {}), '(self._factor, y)\n', (2818, 2835), False, 'from scipy.linalg import cholesky, cho_solve\n'), ((1915, 1939), 'numpy.diag', 'np.diag', (['self._factor[0]'], {}), '(self._factor[0])\n', (1922, 1939), True, 'import numpy as np\n')] |
# Copyright 2020 trueto
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import logging
import numpy as np
import torch.nn as nn
from glob import glob
from tqdm import tqdm
from scipy.stats import pearsonr, spearmanr
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.metrics import f1_score, multilabel_confusion_matrix
from sklearn.model_selection import KFold
from ignite.engine import Engine, Events
from ignite.metrics import RunningAverage
from ignite.handlers import ModelCheckpoint, EarlyStopping, global_step_from_engine
from ignite.contrib.handlers import ProgressBar
from torch.utils.tensorboard import SummaryWriter
from transformers import AutoTokenizer, AdamW, \
get_cosine_with_hard_restarts_schedule_with_warmup, BertTokenizer, \
get_linear_schedule_with_warmup, get_cosine_schedule_with_warmup, get_constant_schedule_with_warmup
from torch.utils.data import random_split, DataLoader, SequentialSampler, RandomSampler
from bertology_sklearn.models import BertologyForClassification, Focal_Loss
from bertology_sklearn.data_utils.common import to_numpy
from bertology_sklearn.data_utils import text_load_and_cache_examples, TextDataProcessor
logger = logging.getLogger(__name__)
class BertologyClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, model_name_or_path="bert-base-chinese",
do_lower_case=True, cache_dir="cache_model", data_dir="cache_data",
max_seq_length=512, overwrite_cache=False, output_dir="results",
dev_fraction=0.1, per_train_batch_size=8, per_val_batch_size=8,
no_cuda=False, fp16=False, seed=42, overwrite_output_dir=False,
classifier_dropout=0.5, classifier_type="Linear", kernel_num=3,
kernel_sizes=(3,4,5), num_layers=2, weight_decay=1e-3,
gradient_accumulation_steps=1, max_epochs=10, learning_rate=2e-5,
warmup=0.1, fp16_opt_level='01', patience=3, n_saved=3,
task_type="classify", do_cv=False, schedule_type="linear",
focal_loss=False, k_fold=5, multi_label=False, multi_label_threshold=0.5):
super().__init__()
self.task_type = task_type
self.n_saved = n_saved
self.patience = patience
self.fp16_opt_level = fp16_opt_level
self.warmup = warmup
self.learning_rate = learning_rate
self.gradient_accumulation_steps = gradient_accumulation_steps
self.max_epochs = max_epochs
self.weight_decay = weight_decay
self.num_layers = num_layers
self.kernel_sizes = kernel_sizes
self.kernel_num = kernel_num
self.classifier_type = classifier_type
self.classifier_dropout = classifier_dropout
self.overwrite_output_dir = overwrite_output_dir
self.fp16 = fp16
self.no_cuda = no_cuda
self.dev_fraction = dev_fraction
self.per_train_batch_size = per_train_batch_size
self.per_val_batch_size = per_val_batch_size
self.output_dir = output_dir
self.data_dir = data_dir
self.max_seq_length = max_seq_length
self.overwrite_cache = overwrite_cache
self.model_name_or_path = model_name_or_path
self.do_lower_case = do_lower_case
self.cache_dir = cache_dir
self.do_cv = do_cv
self.k_fold = k_fold
self.seed = seed
self.schedule_type = schedule_type
self.focal_loss = focal_loss
self.multi_label = multi_label
self.multi_label_threshold = multi_label_threshold
device = torch.device("cuda" if torch.cuda.is_available() and not self.no_cuda else "cpu")
self.n_gpu = torch.cuda.device_count() if not self.no_cuda else 1
self.device = device
# Setup logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger.warning("Process device: %s, n_gpu: %s,16-bits training: %s",
device, self.n_gpu, self.fp16)
# Set seed
np.random.seed(seed)
torch.manual_seed(seed)
if self.n_gpu > 0:
torch.cuda.manual_seed_all(seed)
def init_focal_loss_params(self, y):
y_np = to_numpy(y)
y_int = [self.label_list.index(label) for label in y_np]
alpha = np.bincount(y_int) / len(y_int)
return list(alpha)
def fit(self, X, y, sample_weight=None):
if not os.path.exists(self.data_dir):
os.makedirs(self.data_dir)
# os.mkdir(self.data_dir)
if not os.path.exists(self.output_dir):
# os.mkdir(self.output_dir)
os.makedirs(self.output_dir)
if os.path.exists(self.output_dir) and os.listdir(
self.output_dir) and not self.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
self.output_dir))
## data
if 'chinese' in self.model_name_or_path:
tokenizer = BertTokenizer.from_pretrained(self.model_name_or_path,
do_lower_case=self.do_lower_case,
cache_dir=self.cache_dir)
else:
tokenizer = AutoTokenizer.from_pretrained(self.model_name_or_path,
do_lower_case=self.do_lower_case,
cache_dir=self.cache_dir)
if self.do_cv:
kfold = KFold(n_splits=self.k_fold, shuffle=True, random_state=self.seed)
cv = 0
X, y = to_numpy(X), to_numpy(y)
for train_index, dev_index in kfold.split(X, y):
cv += 1
## data
X_train, X_dev = X[train_index], X[dev_index]
y_train, y_dev = y[train_index], y[dev_index]
self.overwrite_cache = True
train_processor = TextDataProcessor(X_train, y_train)
dev_processor = TextDataProcessor(X_dev, y_dev)
self.label_list = train_processor.get_labels()
self.num_labels = len(self.label_list)
train_ds = text_load_and_cache_examples(self, tokenizer, train_processor)
dev_ds = text_load_and_cache_examples(self, tokenizer, dev_processor)
if self.focal_loss:
self.alpha = self.init_focal_loss_params(y)
self.single_fit(train_ds, dev_ds, cv=cv)
else:
processor = TextDataProcessor(X, y)
dataset = text_load_and_cache_examples(self, tokenizer, processor)
self.label_list = processor.get_labels()
self.num_labels = len(self.label_list)
ds_len = len(dataset)
dev_len = int(len(dataset) * self.dev_fraction)
train_ds, dev_ds = random_split(dataset, [ds_len - dev_len, dev_len])
if self.focal_loss:
self.alpha = self.init_focal_loss_params(y)
self.single_fit(train_ds, dev_ds)
def single_fit(self, train_ds, dev_ds, cv=None):
## data
batch_size = self.n_gpu * self.per_train_batch_size
train_sampler = RandomSampler(train_ds)
train_iter = DataLoader(train_ds, sampler=train_sampler, batch_size=batch_size)
dev_sampler = SequentialSampler(dev_ds)
dev_iter = DataLoader(dev_ds, sampler=dev_sampler, batch_size=batch_size)
## model
model = BertologyForClassification(model_name_or_path=self.model_name_or_path,
num_labels=self.num_labels,cache_dir=self.cache_dir,
dropout=self.classifier_dropout,kernel_num=self.kernel_num,
kernel_sizes=self.kernel_sizes,num_layers=self.num_layers,
classifier_type=self.classifier_type)
model.to(self.device)
## optimizer
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n,p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': self.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
t_total = len(train_iter) // self.gradient_accumulation_steps * self.max_epochs
optimizer = AdamW(optimizer_grouped_parameters, lr=self.learning_rate)
warmup_steps = t_total * self.warmup
if self.schedule_type == "linear":
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
num_training_steps=t_total)
elif self.schedule_type == "cosine":
scheduler = get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
num_training_steps=t_total)
elif self.schedule_type == "constant":
scheduler = get_constant_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps)
elif self.schedule_type == "cosine_restarts":
scheduler = get_cosine_with_hard_restarts_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
num_training_steps=t_total)
if self.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=self.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if self.n_gpu > 1:
model = torch.nn.DataParallel(model)
tb_writer = SummaryWriter()
def train_fn(engine, batch):
model.train()
optimizer.zero_grad()
batch = tuple(t.to(self.device) for t in batch)
labels = batch[3]
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2]
}
logits = model(**inputs)
if self.num_labels == 1:
loss_fn = nn.MSELoss()
loss = loss_fn(logits.view(-1), labels.view(-1))
preds = logits.view(-1)
labels_np = labels.detach().cpu().numpy()
preds_np = preds.detach().cpu().numpy()
score = (spearmanr(preds_np, labels_np)[0] + pearsonr(preds_np, labels_np)[0]) / 2
else:
if not self.multi_label:
if self.focal_loss:
loss_fct = Focal_Loss(alpha=self.alpha, num_classes=self.num_labels)
else:
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
preds = logits.argmax(dim=-1)
labels_np = labels.detach().cpu().numpy()
preds_np = preds.detach().cpu().numpy()
# if self.num_labels == 2:
# score = f1_score(labels_np, preds_np)
# else:
# score = f1_score(labels_np, preds_np, average="macro", zero_division=1)
score = f1_score(y_true=labels_np, y_pred=preds_np, average="macro")
# score = (labels == preds).float().mean()
else:
loss_fct = nn.MultiLabelSoftMarginLoss()
loss = loss_fct(logits.float(), labels.float())
y_pred = logits.sigmoid()
y_pred = y_pred.detach().cpu().numpy()
labels_np = labels.detach().cpu().numpy()
score = ((y_pred > self.multi_label_threshold) == (labels_np > 0)).mean()
if self.n_gpu > 1:
loss = loss.mean()
## tensorboard
global_step = global_step_from_engine(trainer)(engine, engine.last_event_name)
tb_writer.add_scalar('learning_rate', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('train_loss', loss.item(), global_step)
tb_writer.add_scalar('train_score', score.item(), global_step)
loss.backward()
optimizer.step()
scheduler.step()
model.zero_grad()
return loss.item(), score.item()
trainer = Engine(train_fn)
RunningAverage(output_transform=lambda x: x[0]).attach(trainer, 'loss')
RunningAverage(output_transform=lambda x: x[1]).attach(trainer, 'score')
def eval_fn(engine, batch):
model.eval()
batch = tuple(t.to(self.device) for t in batch)
with torch.no_grad():
optimizer.zero_grad()
labels = batch[3]
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2]
}
logits = model(**inputs)
if self.num_labels == 1:
loss_fn = nn.MSELoss()
loss = loss_fn(logits.view(-1), labels.view(-1))
preds = logits.view(-1)
labels_np = labels.detach().cpu().numpy()
preds_np = preds.detach().cpu().numpy()
score = (spearmanr(preds_np, labels_np)[0] + pearsonr(preds_np, labels_np)[0]) / 2
else:
if not self.multi_label:
if self.focal_loss:
loss_fct = Focal_Loss(alpha=self.alpha, num_classes=self.num_labels)
else:
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
preds = logits.argmax(dim=-1)
labels_np = labels.detach().cpu().numpy()
preds_np = preds.detach().cpu().numpy()
# if self.num_labels == 2:
# score = f1_score(labels_np, preds_np)
# else:
# score = f1_score(labels_np, preds_np, average="macro", zero_division=1)
score = f1_score(y_true=labels_np, y_pred=preds_np, average="macro")
# score = (labels == preds).float().mean()
else:
loss_fct = nn.BCEWithLogitsLoss()
loss = loss_fct(logits.float(), labels.float())
y_pred = logits.sigmoid()
y_pred = y_pred.detach().cpu().numpy()
labels_np = labels.detach().cpu().numpy()
score = ((y_pred > self.multi_label_threshold) == (labels_np > 0)).mean()
if self.n_gpu > 1:
loss = loss.mean()
## tensorboard
global_step = global_step_from_engine(trainer)(engine, engine.last_event_name)
tb_writer.add_scalar('dev_loss', loss.item(), global_step)
tb_writer.add_scalar('dev_score', score.item(), global_step)
return loss.item(), score.item()
dev_evaluator = Engine(eval_fn)
RunningAverage(output_transform=lambda x: x[0]).attach(dev_evaluator, 'loss')
RunningAverage(output_transform=lambda x: x[1]).attach(dev_evaluator, 'score')
pbar = ProgressBar(persist=True, bar_format="")
pbar.attach(trainer, ['loss', 'score'])
pbar.attach(dev_evaluator, ['loss', 'score'])
def score_fn(engine):
loss = engine.state.metrics['loss']
score = engine.state.metrics['score']
return score / (loss + 1e-12)
handler = EarlyStopping(patience=self.patience, score_function=score_fn, trainer=trainer)
dev_evaluator.add_event_handler(Events.COMPLETED, handler)
@trainer.on(Events.EPOCH_COMPLETED)
def log_dev_results(engine):
dev_evaluator.run(dev_iter)
dev_metrics = dev_evaluator.state.metrics
avg_score = dev_metrics['score']
avg_loss = dev_metrics['loss']
logger.info(
"Validation Results - Epoch: {} Avg score: {:.2f} Avg loss: {:.2f}"
.format(engine.state.epoch, avg_score, avg_loss))
l = self.model_name_or_path.split('/')
if len(l) > 1:
model_name = l[-1]
else:
model_name = self.model_name_or_path
def model_score(engine):
score = engine.state.metrics['score']
return score
model_prefix = "best" if cv is None else "cv_{}_best".format(cv)
checkpointer = ModelCheckpoint(self.output_dir, model_prefix, n_saved=self.n_saved,
create_dir=True,score_name="model_score",
score_function=model_score,
global_step_transform=global_step_from_engine(trainer),
require_empty=False)
dev_evaluator.add_event_handler(Events.COMPLETED, checkpointer,
{model_name: model.module if hasattr(model, 'module') else model})
# Clear cuda cache between training/testing
def empty_cuda_cache(engine):
torch.cuda.empty_cache()
import gc
gc.collect()
trainer.add_event_handler(Events.EPOCH_COMPLETED, empty_cuda_cache)
dev_evaluator.add_event_handler(Events.COMPLETED, empty_cuda_cache)
# save config
@trainer.on(Events.COMPLETED)
def save_config(engine):
torch.save(self, os.path.join(self.output_dir, 'fit_args.pkl'))
trainer.run(train_iter, max_epochs=self.max_epochs)
def predict(self, X):
args = torch.load(os.path.join(self.output_dir, 'fit_args.pkl'))
## data
processor = TextDataProcessor(X)
if 'chinese' in self.model_name_or_path:
tokenizer = BertTokenizer.from_pretrained(self.model_name_or_path,
do_lower_case=self.do_lower_case,
cache_dir=self.cache_dir)
else:
tokenizer = AutoTokenizer.from_pretrained(self.model_name_or_path,
do_lower_case=self.do_lower_case,
cache_dir=self.cache_dir)
dataset = text_load_and_cache_examples(args, tokenizer, processor, evaluate=True)
sampler = SequentialSampler(dataset)
batch_size = self.per_val_batch_size * max(1, self.n_gpu)
dataloader = DataLoader(dataset, sampler=sampler, batch_size=batch_size)
## model
model = BertologyForClassification(model_name_or_path=self.model_name_or_path,
num_labels=args.num_labels, cache_dir=self.cache_dir,
dropout=self.classifier_dropout, classifier_type=self.classifier_type,
kernel_num=self.kernel_num, kernel_sizes=self.kernel_sizes,
num_layers=self.num_layers)
y_preds = []
for model_state_path in glob(os.path.join(self.output_dir, '*.pt*')):
model.load_state_dict(torch.load(model_state_path))
y_pred = self.single_predict(model, dataloader)
y_preds.append(y_pred)
if self.task_type == "classify":
if not self.multi_label:
y_preds = torch.tensor(y_preds)
y_pred = torch.mode(y_preds, dim=0).values
y_pred = y_pred.numpy()
y_pred = [args.label_list[i] for i in y_pred]
else:
tmp_y_pred = np.max(y_preds, axis=0)
y_pred = []
for tmp_y_pred_ in tmp_y_pred:
y_ = []
for i, pred in enumerate(tmp_y_pred_):
if pred > self.multi_label_threshold:
y_.append(args.label_list[i])
y_pred.append(y_)
else:
y_pred = np.mean(y_preds, axis=0)
return y_pred
def single_predict(self, model, data_iter):
model.to(self.device)
# multi-gpu eval
if self.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Predict
logger.info("***** Running predict*****")
logger.info(" Num examples = %d", len(data_iter)*self.per_val_batch_size*self.n_gpu)
preds = None
for batch in tqdm(data_iter, desc="Predicting"):
model.eval()
batch = tuple(t.to(self.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2]
}
logits = model(**inputs)
if not self.multi_label:
logits = logits.sigmoid()
pred = logits.detach().cpu().numpy()
if preds is None:
preds = pred
else:
preds = np.append(preds, pred, axis=0)
if self.task_type == "classify":
output = np.argmax(preds, axis=1)
if not self.multi_label:
output = preds
else:
output = np.squeeze(preds)
return output
def multi_label_to_id(self, y):
args = torch.load(os.path.join(self.output_dir, 'fit_args.pkl'))
label_ids = []
for label_list in y:
label_id = [-1] * len(args.label_list)
for label in label_list:
label_id[args.label_list.index(label)] = 1
label_ids.append(label_id)
return torch.LongTensor(label_ids)
def score(self, X, y, sample_weight=None):
y_pred = self.predict(X)
if self.task_type == "classify":
if not self.multi_label:
y_pred_ids = self.multi_label_to_id(y_pred)
y_true_ids = self.multi_label_to_id(y)
score = (y_pred_ids == y_true_ids).float().mean()
score = score.item()
else:
score = f1_score(y, y_pred, average="macro")
else:
score = (pearsonr(y_pred, y)[0] + spearmanr(y_pred, y)[0]) / 2
return score | [
"numpy.random.seed",
"torch.utils.data.RandomSampler",
"numpy.argmax",
"bertology_sklearn.models.BertologyForClassification",
"torch.cuda.device_count",
"torch.nn.MultiLabelSoftMarginLoss",
"bertology_sklearn.data_utils.text_load_and_cache_examples",
"gc.collect",
"numpy.mean",
"ignite.contrib.han... | [((1733, 1760), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1750, 1760), False, 'import logging\n'), ((4354, 4497), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO)\n", (4373, 4497), False, 'import logging\n'), ((4703, 4723), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4717, 4723), True, 'import numpy as np\n'), ((4732, 4755), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (4749, 4755), False, 'import torch\n'), ((4885, 4896), 'bertology_sklearn.data_utils.common.to_numpy', 'to_numpy', (['y'], {}), '(y)\n', (4893, 4896), False, 'from bertology_sklearn.data_utils.common import to_numpy\n'), ((7989, 8012), 'torch.utils.data.RandomSampler', 'RandomSampler', (['train_ds'], {}), '(train_ds)\n', (8002, 8012), False, 'from torch.utils.data import random_split, DataLoader, SequentialSampler, RandomSampler\n'), ((8034, 8100), 'torch.utils.data.DataLoader', 'DataLoader', (['train_ds'], {'sampler': 'train_sampler', 'batch_size': 'batch_size'}), '(train_ds, sampler=train_sampler, batch_size=batch_size)\n', (8044, 8100), False, 'from torch.utils.data import random_split, DataLoader, SequentialSampler, RandomSampler\n'), ((8124, 8149), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['dev_ds'], {}), '(dev_ds)\n', (8141, 8149), False, 'from torch.utils.data import random_split, DataLoader, SequentialSampler, RandomSampler\n'), ((8169, 8231), 'torch.utils.data.DataLoader', 'DataLoader', (['dev_ds'], {'sampler': 'dev_sampler', 'batch_size': 'batch_size'}), '(dev_ds, sampler=dev_sampler, batch_size=batch_size)\n', (8179, 8231), False, 'from torch.utils.data import random_split, DataLoader, SequentialSampler, RandomSampler\n'), ((8266, 8568), 'bertology_sklearn.models.BertologyForClassification', 'BertologyForClassification', ([], {'model_name_or_path': 'self.model_name_or_path', 'num_labels': 'self.num_labels', 'cache_dir': 'self.cache_dir', 'dropout': 'self.classifier_dropout', 'kernel_num': 'self.kernel_num', 'kernel_sizes': 'self.kernel_sizes', 'num_layers': 'self.num_layers', 'classifier_type': 'self.classifier_type'}), '(model_name_or_path=self.model_name_or_path,\n num_labels=self.num_labels, cache_dir=self.cache_dir, dropout=self.\n classifier_dropout, kernel_num=self.kernel_num, kernel_sizes=self.\n kernel_sizes, num_layers=self.num_layers, classifier_type=self.\n classifier_type)\n', (8292, 8568), False, 'from bertology_sklearn.models import BertologyForClassification, Focal_Loss\n'), ((9254, 9312), 'transformers.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': 'self.learning_rate'}), '(optimizer_grouped_parameters, lr=self.learning_rate)\n', (9259, 9312), False, 'from transformers import AutoTokenizer, AdamW, get_cosine_with_hard_restarts_schedule_with_warmup, BertTokenizer, get_linear_schedule_with_warmup, get_cosine_schedule_with_warmup, get_constant_schedule_with_warmup\n'), ((10724, 10739), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (10737, 10739), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((13460, 13476), 'ignite.engine.Engine', 'Engine', (['train_fn'], {}), '(train_fn)\n', (13466, 13476), False, 'from ignite.engine import Engine, Events\n'), ((16343, 16358), 'ignite.engine.Engine', 'Engine', (['eval_fn'], {}), '(eval_fn)\n', (16349, 16358), False, 'from ignite.engine import Engine, Events\n'), ((16548, 16588), 'ignite.contrib.handlers.ProgressBar', 'ProgressBar', ([], {'persist': '(True)', 'bar_format': '""""""'}), "(persist=True, bar_format='')\n", (16559, 16588), False, 'from ignite.contrib.handlers import ProgressBar\n'), ((16881, 16960), 'ignite.handlers.EarlyStopping', 'EarlyStopping', ([], {'patience': 'self.patience', 'score_function': 'score_fn', 'trainer': 'trainer'}), '(patience=self.patience, score_function=score_fn, trainer=trainer)\n', (16894, 16960), False, 'from ignite.handlers import ModelCheckpoint, EarlyStopping, global_step_from_engine\n'), ((19093, 19113), 'bertology_sklearn.data_utils.TextDataProcessor', 'TextDataProcessor', (['X'], {}), '(X)\n', (19110, 19113), False, 'from bertology_sklearn.data_utils import text_load_and_cache_examples, TextDataProcessor\n'), ((19691, 19762), 'bertology_sklearn.data_utils.text_load_and_cache_examples', 'text_load_and_cache_examples', (['args', 'tokenizer', 'processor'], {'evaluate': '(True)'}), '(args, tokenizer, processor, evaluate=True)\n', (19719, 19762), False, 'from bertology_sklearn.data_utils import text_load_and_cache_examples, TextDataProcessor\n'), ((19782, 19808), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['dataset'], {}), '(dataset)\n', (19799, 19808), False, 'from torch.utils.data import random_split, DataLoader, SequentialSampler, RandomSampler\n'), ((19896, 19955), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'sampler': 'sampler', 'batch_size': 'batch_size'}), '(dataset, sampler=sampler, batch_size=batch_size)\n', (19906, 19955), False, 'from torch.utils.data import random_split, DataLoader, SequentialSampler, RandomSampler\n'), ((19990, 20292), 'bertology_sklearn.models.BertologyForClassification', 'BertologyForClassification', ([], {'model_name_or_path': 'self.model_name_or_path', 'num_labels': 'args.num_labels', 'cache_dir': 'self.cache_dir', 'dropout': 'self.classifier_dropout', 'classifier_type': 'self.classifier_type', 'kernel_num': 'self.kernel_num', 'kernel_sizes': 'self.kernel_sizes', 'num_layers': 'self.num_layers'}), '(model_name_or_path=self.model_name_or_path,\n num_labels=args.num_labels, cache_dir=self.cache_dir, dropout=self.\n classifier_dropout, classifier_type=self.classifier_type, kernel_num=\n self.kernel_num, kernel_sizes=self.kernel_sizes, num_layers=self.num_layers\n )\n', (20016, 20292), False, 'from bertology_sklearn.models import BertologyForClassification, Focal_Loss\n'), ((21855, 21889), 'tqdm.tqdm', 'tqdm', (['data_iter'], {'desc': '"""Predicting"""'}), "(data_iter, desc='Predicting')\n", (21859, 21889), False, 'from tqdm import tqdm\n'), ((23099, 23126), 'torch.LongTensor', 'torch.LongTensor', (['label_ids'], {}), '(label_ids)\n', (23115, 23126), False, 'import torch\n'), ((4238, 4263), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (4261, 4263), False, 'import torch\n'), ((4795, 4827), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (4821, 4827), False, 'import torch\n'), ((4978, 4996), 'numpy.bincount', 'np.bincount', (['y_int'], {}), '(y_int)\n', (4989, 4996), True, 'import numpy as np\n'), ((5099, 5128), 'os.path.exists', 'os.path.exists', (['self.data_dir'], {}), '(self.data_dir)\n', (5113, 5128), False, 'import os\n'), ((5142, 5168), 'os.makedirs', 'os.makedirs', (['self.data_dir'], {}), '(self.data_dir)\n', (5153, 5168), False, 'import os\n'), ((5223, 5254), 'os.path.exists', 'os.path.exists', (['self.output_dir'], {}), '(self.output_dir)\n', (5237, 5254), False, 'import os\n'), ((5308, 5336), 'os.makedirs', 'os.makedirs', (['self.output_dir'], {}), '(self.output_dir)\n', (5319, 5336), False, 'import os\n'), ((5349, 5380), 'os.path.exists', 'os.path.exists', (['self.output_dir'], {}), '(self.output_dir)\n', (5363, 5380), False, 'import os\n'), ((5385, 5412), 'os.listdir', 'os.listdir', (['self.output_dir'], {}), '(self.output_dir)\n', (5395, 5412), False, 'import os\n'), ((5744, 5863), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['self.model_name_or_path'], {'do_lower_case': 'self.do_lower_case', 'cache_dir': 'self.cache_dir'}), '(self.model_name_or_path, do_lower_case=self.\n do_lower_case, cache_dir=self.cache_dir)\n', (5773, 5863), False, 'from transformers import AutoTokenizer, AdamW, get_cosine_with_hard_restarts_schedule_with_warmup, BertTokenizer, get_linear_schedule_with_warmup, get_cosine_schedule_with_warmup, get_constant_schedule_with_warmup\n'), ((6005, 6124), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['self.model_name_or_path'], {'do_lower_case': 'self.do_lower_case', 'cache_dir': 'self.cache_dir'}), '(self.model_name_or_path, do_lower_case=self.\n do_lower_case, cache_dir=self.cache_dir)\n', (6034, 6124), False, 'from transformers import AutoTokenizer, AdamW, get_cosine_with_hard_restarts_schedule_with_warmup, BertTokenizer, get_linear_schedule_with_warmup, get_cosine_schedule_with_warmup, get_constant_schedule_with_warmup\n'), ((6272, 6337), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'self.k_fold', 'shuffle': '(True)', 'random_state': 'self.seed'}), '(n_splits=self.k_fold, shuffle=True, random_state=self.seed)\n', (6277, 6337), False, 'from sklearn.model_selection import KFold\n'), ((7307, 7330), 'bertology_sklearn.data_utils.TextDataProcessor', 'TextDataProcessor', (['X', 'y'], {}), '(X, y)\n', (7324, 7330), False, 'from bertology_sklearn.data_utils import text_load_and_cache_examples, TextDataProcessor\n'), ((7354, 7410), 'bertology_sklearn.data_utils.text_load_and_cache_examples', 'text_load_and_cache_examples', (['self', 'tokenizer', 'processor'], {}), '(self, tokenizer, processor)\n', (7382, 7410), False, 'from bertology_sklearn.data_utils import text_load_and_cache_examples, TextDataProcessor\n'), ((7643, 7693), 'torch.utils.data.random_split', 'random_split', (['dataset', '[ds_len - dev_len, dev_len]'], {}), '(dataset, [ds_len - dev_len, dev_len])\n', (7655, 7693), False, 'from torch.utils.data import random_split, DataLoader, SequentialSampler, RandomSampler\n'), ((9426, 9531), 'transformers.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': 'warmup_steps', 'num_training_steps': 't_total'}), '(optimizer, num_warmup_steps=warmup_steps,\n num_training_steps=t_total)\n', (9457, 9531), False, 'from transformers import AutoTokenizer, AdamW, get_cosine_with_hard_restarts_schedule_with_warmup, BertTokenizer, get_linear_schedule_with_warmup, get_cosine_schedule_with_warmup, get_constant_schedule_with_warmup\n'), ((10490, 10553), 'apex.amp.initialize', 'amp.initialize', (['model', 'optimizer'], {'opt_level': 'self.fp16_opt_level'}), '(model, optimizer, opt_level=self.fp16_opt_level)\n', (10504, 10553), False, 'from apex import amp\n'), ((10674, 10702), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (10695, 10702), False, 'import torch\n'), ((18499, 18523), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (18521, 18523), False, 'import torch\n'), ((18558, 18570), 'gc.collect', 'gc.collect', ([], {}), '()\n', (18568, 18570), False, 'import gc\n'), ((19009, 19054), 'os.path.join', 'os.path.join', (['self.output_dir', '"""fit_args.pkl"""'], {}), "(self.output_dir, 'fit_args.pkl')\n", (19021, 19054), False, 'import os\n'), ((19188, 19307), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['self.model_name_or_path'], {'do_lower_case': 'self.do_lower_case', 'cache_dir': 'self.cache_dir'}), '(self.model_name_or_path, do_lower_case=self.\n do_lower_case, cache_dir=self.cache_dir)\n', (19217, 19307), False, 'from transformers import AutoTokenizer, AdamW, get_cosine_with_hard_restarts_schedule_with_warmup, BertTokenizer, get_linear_schedule_with_warmup, get_cosine_schedule_with_warmup, get_constant_schedule_with_warmup\n'), ((19449, 19568), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['self.model_name_or_path'], {'do_lower_case': 'self.do_lower_case', 'cache_dir': 'self.cache_dir'}), '(self.model_name_or_path, do_lower_case=self.\n do_lower_case, cache_dir=self.cache_dir)\n', (19478, 19568), False, 'from transformers import AutoTokenizer, AdamW, get_cosine_with_hard_restarts_schedule_with_warmup, BertTokenizer, get_linear_schedule_with_warmup, get_cosine_schedule_with_warmup, get_constant_schedule_with_warmup\n'), ((20505, 20543), 'os.path.join', 'os.path.join', (['self.output_dir', '"""*.pt*"""'], {}), "(self.output_dir, '*.pt*')\n", (20517, 20543), False, 'import os\n'), ((21420, 21444), 'numpy.mean', 'np.mean', (['y_preds'], {'axis': '(0)'}), '(y_preds, axis=0)\n', (21427, 21444), True, 'import numpy as np\n'), ((21620, 21648), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (21641, 21648), False, 'import torch\n'), ((22567, 22591), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (22576, 22591), True, 'import numpy as np\n'), ((22695, 22712), 'numpy.squeeze', 'np.squeeze', (['preds'], {}), '(preds)\n', (22705, 22712), True, 'import numpy as np\n'), ((22799, 22844), 'os.path.join', 'os.path.join', (['self.output_dir', '"""fit_args.pkl"""'], {}), "(self.output_dir, 'fit_args.pkl')\n", (22811, 22844), False, 'import os\n'), ((6376, 6387), 'bertology_sklearn.data_utils.common.to_numpy', 'to_numpy', (['X'], {}), '(X)\n', (6384, 6387), False, 'from bertology_sklearn.data_utils.common import to_numpy\n'), ((6389, 6400), 'bertology_sklearn.data_utils.common.to_numpy', 'to_numpy', (['y'], {}), '(y)\n', (6397, 6400), False, 'from bertology_sklearn.data_utils.common import to_numpy\n'), ((6714, 6749), 'bertology_sklearn.data_utils.TextDataProcessor', 'TextDataProcessor', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (6731, 6749), False, 'from bertology_sklearn.data_utils import text_load_and_cache_examples, TextDataProcessor\n'), ((6782, 6813), 'bertology_sklearn.data_utils.TextDataProcessor', 'TextDataProcessor', (['X_dev', 'y_dev'], {}), '(X_dev, y_dev)\n', (6799, 6813), False, 'from bertology_sklearn.data_utils import text_load_and_cache_examples, TextDataProcessor\n'), ((6961, 7023), 'bertology_sklearn.data_utils.text_load_and_cache_examples', 'text_load_and_cache_examples', (['self', 'tokenizer', 'train_processor'], {}), '(self, tokenizer, train_processor)\n', (6989, 7023), False, 'from bertology_sklearn.data_utils import text_load_and_cache_examples, TextDataProcessor\n'), ((7049, 7109), 'bertology_sklearn.data_utils.text_load_and_cache_examples', 'text_load_and_cache_examples', (['self', 'tokenizer', 'dev_processor'], {}), '(self, tokenizer, dev_processor)\n', (7077, 7109), False, 'from bertology_sklearn.data_utils import text_load_and_cache_examples, TextDataProcessor\n'), ((9653, 9758), 'transformers.get_cosine_schedule_with_warmup', 'get_cosine_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': 'warmup_steps', 'num_training_steps': 't_total'}), '(optimizer, num_warmup_steps=warmup_steps,\n num_training_steps=t_total)\n', (9684, 9758), False, 'from transformers import AutoTokenizer, AdamW, get_cosine_with_hard_restarts_schedule_with_warmup, BertTokenizer, get_linear_schedule_with_warmup, get_cosine_schedule_with_warmup, get_constant_schedule_with_warmup\n'), ((11192, 11204), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (11202, 11204), True, 'import torch.nn as nn\n'), ((12980, 13012), 'ignite.handlers.global_step_from_engine', 'global_step_from_engine', (['trainer'], {}), '(trainer)\n', (13003, 13012), False, 'from ignite.handlers import ModelCheckpoint, EarlyStopping, global_step_from_engine\n'), ((13485, 13532), 'ignite.metrics.RunningAverage', 'RunningAverage', ([], {'output_transform': '(lambda x: x[0])'}), '(output_transform=lambda x: x[0])\n', (13499, 13532), False, 'from ignite.metrics import RunningAverage\n'), ((13565, 13612), 'ignite.metrics.RunningAverage', 'RunningAverage', ([], {'output_transform': '(lambda x: x[1])'}), '(output_transform=lambda x: x[1])\n', (13579, 13612), False, 'from ignite.metrics import RunningAverage\n'), ((13777, 13792), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13790, 13792), False, 'import torch\n'), ((16063, 16095), 'ignite.handlers.global_step_from_engine', 'global_step_from_engine', (['trainer'], {}), '(trainer)\n', (16086, 16095), False, 'from ignite.handlers import ModelCheckpoint, EarlyStopping, global_step_from_engine\n'), ((16367, 16414), 'ignite.metrics.RunningAverage', 'RunningAverage', ([], {'output_transform': '(lambda x: x[0])'}), '(output_transform=lambda x: x[0])\n', (16381, 16414), False, 'from ignite.metrics import RunningAverage\n'), ((16453, 16500), 'ignite.metrics.RunningAverage', 'RunningAverage', ([], {'output_transform': '(lambda x: x[1])'}), '(output_transform=lambda x: x[1])\n', (16467, 16500), False, 'from ignite.metrics import RunningAverage\n'), ((18123, 18155), 'ignite.handlers.global_step_from_engine', 'global_step_from_engine', (['trainer'], {}), '(trainer)\n', (18146, 18155), False, 'from ignite.handlers import ModelCheckpoint, EarlyStopping, global_step_from_engine\n'), ((18847, 18892), 'os.path.join', 'os.path.join', (['self.output_dir', '"""fit_args.pkl"""'], {}), "(self.output_dir, 'fit_args.pkl')\n", (18859, 18892), False, 'import os\n'), ((20580, 20608), 'torch.load', 'torch.load', (['model_state_path'], {}), '(model_state_path)\n', (20590, 20608), False, 'import torch\n'), ((20810, 20831), 'torch.tensor', 'torch.tensor', (['y_preds'], {}), '(y_preds)\n', (20822, 20831), False, 'import torch\n'), ((21040, 21063), 'numpy.max', 'np.max', (['y_preds'], {'axis': '(0)'}), '(y_preds, axis=0)\n', (21046, 21063), True, 'import numpy as np\n'), ((21994, 22009), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (22007, 22009), False, 'import torch\n'), ((22473, 22503), 'numpy.append', 'np.append', (['preds', 'pred'], {'axis': '(0)'}), '(preds, pred, axis=0)\n', (22482, 22503), True, 'import numpy as np\n'), ((23546, 23582), 'sklearn.metrics.f1_score', 'f1_score', (['y', 'y_pred'], {'average': '"""macro"""'}), "(y, y_pred, average='macro')\n", (23554, 23582), False, 'from sklearn.metrics import f1_score, multilabel_confusion_matrix\n'), ((4158, 4183), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4181, 4183), False, 'import torch\n'), ((9882, 9957), 'transformers.get_constant_schedule_with_warmup', 'get_constant_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': 'warmup_steps'}), '(optimizer, num_warmup_steps=warmup_steps)\n', (9915, 9957), False, 'from transformers import AutoTokenizer, AdamW, get_cosine_with_hard_restarts_schedule_with_warmup, BertTokenizer, get_linear_schedule_with_warmup, get_cosine_schedule_with_warmup, get_constant_schedule_with_warmup\n'), ((12323, 12383), 'sklearn.metrics.f1_score', 'f1_score', ([], {'y_true': 'labels_np', 'y_pred': 'preds_np', 'average': '"""macro"""'}), "(y_true=labels_np, y_pred=preds_np, average='macro')\n", (12331, 12383), False, 'from sklearn.metrics import f1_score, multilabel_confusion_matrix\n'), ((12500, 12529), 'torch.nn.MultiLabelSoftMarginLoss', 'nn.MultiLabelSoftMarginLoss', ([], {}), '()\n', (12527, 12529), True, 'import torch.nn as nn\n'), ((14162, 14174), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (14172, 14174), True, 'import torch.nn as nn\n'), ((20857, 20883), 'torch.mode', 'torch.mode', (['y_preds'], {'dim': '(0)'}), '(y_preds, dim=0)\n', (20867, 20883), False, 'import torch\n'), ((10036, 10160), 'transformers.get_cosine_with_hard_restarts_schedule_with_warmup', 'get_cosine_with_hard_restarts_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': 'warmup_steps', 'num_training_steps': 't_total'}), '(optimizer,\n num_warmup_steps=warmup_steps, num_training_steps=t_total)\n', (10086, 10160), False, 'from transformers import AutoTokenizer, AdamW, get_cosine_with_hard_restarts_schedule_with_warmup, BertTokenizer, get_linear_schedule_with_warmup, get_cosine_schedule_with_warmup, get_constant_schedule_with_warmup\n'), ((11658, 11715), 'bertology_sklearn.models.Focal_Loss', 'Focal_Loss', ([], {'alpha': 'self.alpha', 'num_classes': 'self.num_labels'}), '(alpha=self.alpha, num_classes=self.num_labels)\n', (11668, 11715), False, 'from bertology_sklearn.models import BertologyForClassification, Focal_Loss\n'), ((11777, 11798), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (11796, 11798), True, 'import torch.nn as nn\n'), ((15373, 15433), 'sklearn.metrics.f1_score', 'f1_score', ([], {'y_true': 'labels_np', 'y_pred': 'preds_np', 'average': '"""macro"""'}), "(y_true=labels_np, y_pred=preds_np, average='macro')\n", (15381, 15433), False, 'from sklearn.metrics import f1_score, multilabel_confusion_matrix\n'), ((15562, 15584), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (15582, 15584), True, 'import torch.nn as nn\n'), ((23619, 23638), 'scipy.stats.pearsonr', 'pearsonr', (['y_pred', 'y'], {}), '(y_pred, y)\n', (23627, 23638), False, 'from scipy.stats import pearsonr, spearmanr\n'), ((23644, 23664), 'scipy.stats.spearmanr', 'spearmanr', (['y_pred', 'y'], {}), '(y_pred, y)\n', (23653, 23664), False, 'from scipy.stats import pearsonr, spearmanr\n'), ((11450, 11480), 'scipy.stats.spearmanr', 'spearmanr', (['preds_np', 'labels_np'], {}), '(preds_np, labels_np)\n', (11459, 11480), False, 'from scipy.stats import pearsonr, spearmanr\n'), ((11486, 11515), 'scipy.stats.pearsonr', 'pearsonr', (['preds_np', 'labels_np'], {}), '(preds_np, labels_np)\n', (11494, 11515), False, 'from scipy.stats import pearsonr, spearmanr\n'), ((14664, 14721), 'bertology_sklearn.models.Focal_Loss', 'Focal_Loss', ([], {'alpha': 'self.alpha', 'num_classes': 'self.num_labels'}), '(alpha=self.alpha, num_classes=self.num_labels)\n', (14674, 14721), False, 'from bertology_sklearn.models import BertologyForClassification, Focal_Loss\n'), ((14791, 14812), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (14810, 14812), True, 'import torch.nn as nn\n'), ((14440, 14470), 'scipy.stats.spearmanr', 'spearmanr', (['preds_np', 'labels_np'], {}), '(preds_np, labels_np)\n', (14449, 14470), False, 'from scipy.stats import pearsonr, spearmanr\n'), ((14476, 14505), 'scipy.stats.pearsonr', 'pearsonr', (['preds_np', 'labels_np'], {}), '(preds_np, labels_np)\n', (14484, 14505), False, 'from scipy.stats import pearsonr, spearmanr\n')] |
import cv2 as cv
import numpy as np
import math
import time
cv.startWindowThread()
cap = cv.VideoCapture('video/Sentry_2.mkv')
img= cv.imread("Sample2.png")
fourcc = cv.VideoWriter_fourcc(*'mp4v')
out = cv.VideoWriter('output2.mp4', fourcc, 15.0, (449,809),True)
scale_percent = 40 # percent of original size
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
frame = cv.resize(img, dim, interpolation = cv.INTER_AREA)
pts1 = np.float32([[178,141],[211,79],[390,91],[468,177]])
pts2 = np.float32([[105,404],[225,190],[347,403],[226,618]])#4 top vaale
M = cv.getPerspectiveTransform(pts1,pts2)
frames=1
while(frames<299):
map_img= cv.imread("arena4.png")
frames=frames+1
ret, img = cap.read()
scale_percent = 40 # percent of original size
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
frame = cv.resize(img, dim, interpolation = cv.INTER_AREA)
frame2=frame.copy()
frame3=frame.copy()
frame4=np.zeros(frame.shape)
''' frame : hb detection '''
hsv=cv.cvtColor(frame2,cv.COLOR_BGR2HSV)
mask=cv.inRange(hsv,np.array([0, 0,0]),np.array([255,200,255]))
frame2=cv.bitwise_and(frame2,frame2,mask=mask)
ekkaurmask=cv.inRange(frame2,np.array([0, 0,235]),np.array([255,255,255]))
kernel = np.ones((5,5),np.uint8)
erosion = cv.dilate(ekkaurmask,kernel,iterations = 2)
contours, hierarchy = cv.findContours(erosion, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
if len(contours) != 0:
i = max(contours, key = cv.contourArea)
rect=cv.minAreaRect(i)
angle=rect[2]
# print( angle)
angle=angle-90
if angle<-179:
angle=angle+180
# int angle = 45;
length = 20
box=cv.boxPoints(rect)
box=np.int0(box)
centre=rect[0]
# np.cos()
P1=centre
P2 = ((P1[0] + length * np.cos(angle * 3.14 / 180.0)), (P1[1] + length * np.sin(angle * 3.14 / 180.0)))
cv.drawContours(frame, [box],0,(255,255,255),2)
# print(P1,P2)
# cv.circle(frame,(int(centre[0]), int(centre[1])), 7, (0,0,0), -1)
# cv.circle(frame,(int(centre[0]), int(centre[1])), 50, (77,93,100), -1)
point1= np.float32([[centre[0]],[centre[1]],[1]])
point2= np.float32([[P2[0]],[P2[1]],[1]])
point1_new=np.matmul(M,point1)
point1_new=point1_new/point1_new[2]
# point1_new[1]=point1_new[1]
point2_new=np.matmul(M,point2)
point2_new=point2_new/point2_new[2]
cv.arrowedLine(map_img,(int(point1_new[0]), int(point1_new[1]-30)),(int(point2_new[0]), int(point2_new[1]-30)),(0,0,255),2)
cv.circle(map_img,(int(point1_new[0]), int(point1_new[1]-30)), 15, (255,255,255), 2)
# cv.arrowedLine(frame4,(int(centre[0]), int(centre[1])),(int(P2[0]), int(P2[1])),(0,0,255),4)
# print(frames)
# print('image dtype ',map_img.dtype)
# print(point_new)
# map_img=cv.add(map_img,dest,dtype = cv.CV_8U)
# cv.circle(map_img,(int(point_new[0]), int(point_new[1]-30)), 5, (0,0,255), -1)
cv.imshow('b',map_img)
out.write(map_img)
# cv.imwrite('tracing2.png',map_img)
# cv.imshow('l',frame)
if cv.waitKey(1) & 0xFF == ord('q'):
break
cv.waitKey(0)
out.release() | [
"cv2.VideoWriter_fourcc",
"cv2.bitwise_and",
"cv2.getPerspectiveTransform",
"numpy.ones",
"cv2.boxPoints",
"numpy.sin",
"cv2.minAreaRect",
"cv2.startWindowThread",
"cv2.VideoWriter",
"cv2.imshow",
"cv2.dilate",
"cv2.cvtColor",
"cv2.drawContours",
"cv2.resize",
"numpy.int0",
"cv2.waitKe... | [((61, 83), 'cv2.startWindowThread', 'cv.startWindowThread', ([], {}), '()\n', (81, 83), True, 'import cv2 as cv\n'), ((90, 127), 'cv2.VideoCapture', 'cv.VideoCapture', (['"""video/Sentry_2.mkv"""'], {}), "('video/Sentry_2.mkv')\n", (105, 127), True, 'import cv2 as cv\n'), ((133, 157), 'cv2.imread', 'cv.imread', (['"""Sample2.png"""'], {}), "('Sample2.png')\n", (142, 157), True, 'import cv2 as cv\n'), ((167, 197), 'cv2.VideoWriter_fourcc', 'cv.VideoWriter_fourcc', (["*'mp4v'"], {}), "(*'mp4v')\n", (188, 197), True, 'import cv2 as cv\n'), ((204, 265), 'cv2.VideoWriter', 'cv.VideoWriter', (['"""output2.mp4"""', 'fourcc', '(15.0)', '(449, 809)', '(True)'], {}), "('output2.mp4', fourcc, 15.0, (449, 809), True)\n", (218, 265), True, 'import cv2 as cv\n'), ((438, 486), 'cv2.resize', 'cv.resize', (['img', 'dim'], {'interpolation': 'cv.INTER_AREA'}), '(img, dim, interpolation=cv.INTER_AREA)\n', (447, 486), True, 'import cv2 as cv\n'), ((496, 554), 'numpy.float32', 'np.float32', (['[[178, 141], [211, 79], [390, 91], [468, 177]]'], {}), '([[178, 141], [211, 79], [390, 91], [468, 177]])\n', (506, 554), True, 'import numpy as np\n'), ((555, 615), 'numpy.float32', 'np.float32', (['[[105, 404], [225, 190], [347, 403], [226, 618]]'], {}), '([[105, 404], [225, 190], [347, 403], [226, 618]])\n', (565, 615), True, 'import numpy as np\n'), ((625, 663), 'cv2.getPerspectiveTransform', 'cv.getPerspectiveTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (651, 663), True, 'import cv2 as cv\n'), ((3386, 3399), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (3396, 3399), True, 'import cv2 as cv\n'), ((705, 728), 'cv2.imread', 'cv.imread', (['"""arena4.png"""'], {}), "('arena4.png')\n", (714, 728), True, 'import cv2 as cv\n'), ((968, 1016), 'cv2.resize', 'cv.resize', (['img', 'dim'], {'interpolation': 'cv.INTER_AREA'}), '(img, dim, interpolation=cv.INTER_AREA)\n', (977, 1016), True, 'import cv2 as cv\n'), ((1078, 1099), 'numpy.zeros', 'np.zeros', (['frame.shape'], {}), '(frame.shape)\n', (1086, 1099), True, 'import numpy as np\n'), ((1141, 1178), 'cv2.cvtColor', 'cv.cvtColor', (['frame2', 'cv.COLOR_BGR2HSV'], {}), '(frame2, cv.COLOR_BGR2HSV)\n', (1152, 1178), True, 'import cv2 as cv\n'), ((1257, 1298), 'cv2.bitwise_and', 'cv.bitwise_and', (['frame2', 'frame2'], {'mask': 'mask'}), '(frame2, frame2, mask=mask)\n', (1271, 1298), True, 'import cv2 as cv\n'), ((1389, 1414), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (1396, 1414), True, 'import numpy as np\n'), ((1427, 1470), 'cv2.dilate', 'cv.dilate', (['ekkaurmask', 'kernel'], {'iterations': '(2)'}), '(ekkaurmask, kernel, iterations=2)\n', (1436, 1470), True, 'import cv2 as cv\n'), ((1497, 1563), 'cv2.findContours', 'cv.findContours', (['erosion', 'cv.RETR_EXTERNAL', 'cv.CHAIN_APPROX_SIMPLE'], {}), '(erosion, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n', (1512, 1563), True, 'import cv2 as cv\n'), ((1202, 1221), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (1210, 1221), True, 'import numpy as np\n'), ((1221, 1246), 'numpy.array', 'np.array', (['[255, 200, 255]'], {}), '([255, 200, 255])\n', (1229, 1246), True, 'import numpy as np\n'), ((1330, 1351), 'numpy.array', 'np.array', (['[0, 0, 235]'], {}), '([0, 0, 235])\n', (1338, 1351), True, 'import numpy as np\n'), ((1351, 1376), 'numpy.array', 'np.array', (['[255, 255, 255]'], {}), '([255, 255, 255])\n', (1359, 1376), True, 'import numpy as np\n'), ((1652, 1669), 'cv2.minAreaRect', 'cv.minAreaRect', (['i'], {}), '(i)\n', (1666, 1669), True, 'import cv2 as cv\n'), ((1857, 1875), 'cv2.boxPoints', 'cv.boxPoints', (['rect'], {}), '(rect)\n', (1869, 1875), True, 'import cv2 as cv\n'), ((1888, 1900), 'numpy.int0', 'np.int0', (['box'], {}), '(box)\n', (1895, 1900), True, 'import numpy as np\n'), ((2082, 2134), 'cv2.drawContours', 'cv.drawContours', (['frame', '[box]', '(0)', '(255, 255, 255)', '(2)'], {}), '(frame, [box], 0, (255, 255, 255), 2)\n', (2097, 2134), True, 'import cv2 as cv\n'), ((2326, 2369), 'numpy.float32', 'np.float32', (['[[centre[0]], [centre[1]], [1]]'], {}), '([[centre[0]], [centre[1]], [1]])\n', (2336, 2369), True, 'import numpy as np\n'), ((2384, 2419), 'numpy.float32', 'np.float32', (['[[P2[0]], [P2[1]], [1]]'], {}), '([[P2[0]], [P2[1]], [1]])\n', (2394, 2419), True, 'import numpy as np\n'), ((2437, 2457), 'numpy.matmul', 'np.matmul', (['M', 'point1'], {}), '(M, point1)\n', (2446, 2457), True, 'import numpy as np\n'), ((2558, 2578), 'numpy.matmul', 'np.matmul', (['M', 'point2'], {}), '(M, point2)\n', (2567, 2578), True, 'import numpy as np\n'), ((3200, 3223), 'cv2.imshow', 'cv.imshow', (['"""b"""', 'map_img'], {}), "('b', map_img)\n", (3209, 3223), True, 'import cv2 as cv\n'), ((3338, 3351), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (3348, 3351), True, 'import cv2 as cv\n'), ((1994, 2022), 'numpy.cos', 'np.cos', (['(angle * 3.14 / 180.0)'], {}), '(angle * 3.14 / 180.0)\n', (2000, 2022), True, 'import numpy as np\n'), ((2043, 2071), 'numpy.sin', 'np.sin', (['(angle * 3.14 / 180.0)'], {}), '(angle * 3.14 / 180.0)\n', (2049, 2071), True, 'import numpy as np\n')] |
# Copyright (c) 2013, Preferred Infrastructure, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import functools
import itertools
import json
import numpy.random
import types
import numpy as np
def aggregator(callback_body):
"""Creates an aggregator using function ``callback_body`` independent from
waf.
This function creates a wrapper of given callback function that behaves as
a rule of an aggregation task. It supposes that input files are represented
by JSON files each of which is a flat JSON object (i.e. an object that does
not contain any objects) or a JSON array of flat objects. The created rule
first combines these JSON objects into an array of Python dictionaries, and
then passes it to the user-defined callback body.
There are two ways to write the result to the output node. First is to let
``callback_body`` return the content string to be written to the output
node; then the rule automatically writes it to the output node. Second is
to let ``callback_body`` write it using its second argument (called
``abspath``), which is the absolute path to the output node. In this case,
``callback_body`` **MUST** return None to suppress the automatic writing.
This function is often used as a decorator.
See :py:mod:`maflib.rules` or :py:mod:`maflib.plot` to get
examples of ``callback_body``.
:param callback_body: A function or a callable object that takes three
arguments: ``values``, ``abspath``, and ``parameter``. ``values`` is an
array of dictionaries that represents the content of input files.
``abspath`` is an absolute path to the output node. ``parameter`` is
the parameter of the output node, i.e. the parameter of this task. This
function should return str or None.
:type callback_body: ``function`` or callble object of signature
``(list, str)``.
:return: An aggregator function that calls ``callback_body``.
:rtype: ``function``
"""
@functools.wraps(callback_body)
def callback(task):
values = []
for node, parameter in zip(task.inputs, task.env.source_parameter):
content = json.loads(node.read())
if not isinstance(content, list):
content = [content]
for element in content:
element.update(parameter)
values += content
abspath = task.outputs[0].abspath()
result = callback_body(values, abspath, task.parameter)
if result is not None:
task.outputs[0].write(result)
return callback
def json_aggregator(callback_body):
"""Create an aggregator specific to output the aggregated result into json.
Result of aggregator task is often json-formatted for later tasks, such as
py:mod:`maflib.rules.max` and py:mod:`maflib.rules.average`. In
py:mod:`maflib.rules.max`, for example, the parameter setting corresponding
to the max is necessary in future task, so the parameter must also be dumped
to json-format. However, this is problematic when parameter is not
json-serializable, e.g., an object of user-defined class. To avoid this
problem, this aggregator decorator first converts ``parameter`` to
json-serializable one by converting not json-serializable values of
``parameter`` (``dict`` type) into string. All json-serializable values
remain the same, e.g., ``int`` values are not converted to string.
:param callback_body: A function or a callable object that takes the same
arguments as that of ``aggregator``, but return an object, which is
going to be serialized to json. See :py:mod:`maflib.rules.max` for example.
:type callback_body: ``function`` or callable object of signature
``(list, str, parameter)``
:return: An aggregator.
:rtype: ``function``
"""
@functools.wraps(callback_body)
@aggregator
def callback(values, abspath, parameter):
def to_jsonable(v):
try:
json.dumps(v)
return v
except:
return str(v)
parameter = dict([(k, to_jsonable(parameter[k])) for k in parameter])
result = callback_body(values, abspath, parameter)
return json.dumps(result)
return callback
def product(parameter):
"""Generates a direct product of given listed parameters.
Here is an example.
.. code-block:: python
maflib.util.product({'x': [0, 1, 2], 'y': [1, 3, 5]})
# => [{'x': 0, 'y': 1}, {'x': 0, 'y': 3}, {'x': 0, 'y': 5},
# {'x': 1, 'y': 1}, {'x': 1, 'y': 3}, {'x': 1, 'y': 5},
# {'x': 2, 'y': 1}, {'x': 2, 'y': 3}, {'x': 2, 'y': 5}]
# (the order of parameters may be different)
:param parameter: A dictionary that represents a set of parameters. Its
values are lists of values to be enumerated.
:type parameter: ``dict`` from ``str`` to ``list``.
:return: A direct product of a set of parameters.
:rtype: ``list`` of ``dict``.
"""
keys = sorted(parameter)
values = [parameter[key] for key in keys]
values_product = itertools.product(*values)
return [dict(zip(keys, vals)) for vals in values_product]
def sample(num_samples, distribution):
"""Randomly samples parameters from given distributions.
This function samples parameter combinations each of which is a dictionary
from key to value sampled from a distribution corresponding to the key.
It is useful for hyper-parameter optimization compared to using ``product``,
since every instance can be different on all dimensions for each other.
:param num_samples: Number of samples. Resulting meta node contains this
number of physical nodes for each input parameter set.
:type num_samples: ``int``
:param distribution: Dictionary from parameter names to values specifying
distributions to sample from. Acceptable values are following:
**Pair of numbers**
``(a, b)`` specifies a uniform distribution on the continuous
interval [a, b).
**List of values**
This specifies a uniform distribution on the descrete set of
values.
**Callable object or function**
``f`` can be used for an arbitrary generator of values. Multiple
calls of ``f()`` should generate random samples of user-defined
distribution.
:return: A list of sampled parameters.
:rtype: ``list`` of ``dict``.
"""
parameter_gens = {}
keys = sorted(distribution)
sampled = []
for key in keys:
# float case is specified by begin/end in a tuple.
if isinstance(distribution[key], tuple):
begin, end = distribution[key]
begin = float(begin)
end = float(end)
# random_sample() generate a point from [0,1), so we scale and
# shift it.
gen = lambda: (end-begin) * numpy.random.random_sample() + begin
# Discrete case is specified by a list
elif isinstance(distribution[key], list):
gen = lambda mult_ks=distribution[key]: mult_ks[
numpy.random.randint(0,len(mult_ks))]
# Any random generating function
elif isinstance(distribution[key], types.FunctionType):
gen = distribution[key]
else:
gen = lambda: distribution[key] # constant
parameter_gens[key] = gen
for i in range(num_samples):
instance = {}
for key in keys:
instance[key] = parameter_gens[key]()
sampled.append(instance)
return sampled
def set_random_seed(x):
np.random.seed(x)
# Set the random seed of numpy to a fixed value.
# Without this, util.sample method generate different random numbers in each
# call, that is, we get a different parameter combination without any modify to
# the wscript. This is problematic when we add or remove snippets to the
# wscript; we don't want to re-run the experiments that have been already
# completed.
#
# WARNING: By fixing the random seed, we can control the generation of random
# numbers, but it is limited to some extent: if we add in wscript a experiment
# with util.sample above the previously defined experiment, which also use
# util.sample, generations of random number no longer follow the previous
# execution.
set_random_seed(10)
| [
"numpy.random.seed",
"json.dumps",
"functools.wraps",
"itertools.product"
] | [((3289, 3319), 'functools.wraps', 'functools.wraps', (['callback_body'], {}), '(callback_body)\n', (3304, 3319), False, 'import functools\n'), ((5168, 5198), 'functools.wraps', 'functools.wraps', (['callback_body'], {}), '(callback_body)\n', (5183, 5198), False, 'import functools\n'), ((6451, 6477), 'itertools.product', 'itertools.product', (['*values'], {}), '(*values)\n', (6468, 6477), False, 'import itertools\n'), ((8990, 9007), 'numpy.random.seed', 'np.random.seed', (['x'], {}), '(x)\n', (9004, 9007), True, 'import numpy as np\n'), ((5563, 5581), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (5573, 5581), False, 'import json\n'), ((5322, 5335), 'json.dumps', 'json.dumps', (['v'], {}), '(v)\n', (5332, 5335), False, 'import json\n')] |
import os
import glob
import random
import numpy as np
import cv2
from tqdm.auto import tqdm
import torch
from torch.autograd import Variable
import torchvision.transforms as transforms
class ImageChunker(object):
def __init__(self, rows, cols, overlap):
self.rows = rows
self.cols = cols
self.overlap = overlap
def perform_chunking(self, img_size, chunk_size):
"""
Given an image dimension img_size, return list of (start, stop)
tuples to perform chunking of chunk_size
"""
chunks, i = [], 0
while True:
chunks.append((i*(chunk_size - self.overlap/2), i *
(chunk_size - self.overlap/2)+chunk_size))
i += 1
if chunks[-1][1] > img_size:
break
n_count = len(chunks)
chunks[-1] = tuple(x - (n_count*chunk_size - img_size -
(n_count-1)*self.overlap/2) for x in chunks[-1])
chunks = [(int(x), int(y)) for x, y in chunks]
return chunks
def get_chunks(self, img, scale=1):
"""
Get width and height lists of (start, stop) tuples for chunking of img.
"""
x_chunks, y_chunks = [(0, self.rows)], [(0, self.cols)]
if img.shape[0] > self.rows:
x_chunks = self.perform_chunking(img.shape[0], self.rows)
else:
x_chunks = [(0, img.shape[0])]
if img.shape[1] > self.cols:
y_chunks = self.perform_chunking(img.shape[1], self.cols)
else:
y_chunks = [(0, img.shape[1])]
return x_chunks, y_chunks
def dimension_preprocess(self, img, padding=True):
"""
In case of prediction on image of different size than 512x512,
this function is used to add padding and chunk up the image into pieces
of 512x512, which can then later be reconstructed into the original image
using the dimension_postprocess() function.
"""
# Assert single image input
assert len(img.shape) == 3, "Image dimension expected to be (H, W, C)"
# Check if we are adding padding for too small images
if padding:
# Check if height is too small
if img.shape[0] < self.rows:
padding = np.ones(
(self.rows - img.shape[0], img.shape[1], img.shape[2]))
img = np.concatenate((img, padding), axis=0)
# Check if width is too small
if img.shape[1] < self.cols:
padding = np.ones(
(img.shape[0], self.cols - img.shape[1], img.shape[2]))
img = np.concatenate((img, padding), axis=1)
# Get chunking of the image
x_chunks, y_chunks = self.get_chunks(img)
# Chunk up the image
images = []
for x in x_chunks:
for y in y_chunks:
images.append(
img[x[0]:x[1], y[0]:y[1], :]
)
images = np.array(images)
return images
def dimension_postprocess(self, chunked_images, original_image, scale=1, padding=True):
"""
In case of prediction on image of different size than 512x512,
the dimension_preprocess function is used to add padding and chunk
up the image into pieces of 512x512, and this function is used to
reconstruct these pieces into the original image.
"""
# Assert input dimensions
assert len(
original_image.shape) == 3, "Image dimension expected to be (H, W, C)"
assert len(
chunked_images.shape) == 4, "Chunked images dimension expected to be (B, H, W, C)"
# Check if we are adding padding for too small images
if padding:
# Check if height is too small
if original_image.shape[0] < self.rows:
new_images = []
for img in chunked_images:
new_images.append(
img[0:scale*original_image.shape[0], :, :])
chunked_images = np.array(new_images)
# Check if width is too small
if original_image.shape[1] < self.cols:
new_images = []
for img in chunked_images:
new_images.append(
img[:, 0:scale*original_image.shape[1], :])
chunked_images = np.array(new_images)
# Put reconstruction into this array
new_shape = (
original_image.shape[0]*scale,
original_image.shape[1]*scale,
original_image.shape[2]
)
reconstruction = np.zeros(new_shape)
# Get the chunks for this image
x_chunks, y_chunks = self.get_chunks(original_image)
i = 0
s = scale
for x in x_chunks:
for y in y_chunks:
prior_fill = reconstruction != 0
chunk = np.zeros(new_shape)
chunk[x[0]*s:x[1]*s, y[0]*s:y[1]*s, :] += chunked_images[i]
chunk_fill = chunk != 0
reconstruction += chunk
reconstruction[prior_fill &
chunk_fill] = reconstruction[prior_fill & chunk_fill] / 2
i += 1
return reconstruction
# cv Version
def load_image(filename, size=None, scale=None):
img = cv2.imread(filename)
if size is not None:
img = img.resize(size, size)
elif scale is not None:
img = img.resize((int(img.size[0] / scale), int(img.size[1] / scale)))
return img
def save_image(filename, chunked_imgs, count=True):
# filename : ~~~.jpg
# path cropped : global variable
if count:
os.chdir(path_cropped)
for i in tqdm(range(chunked_imgs.shape[0])):
cv2.imwrite('{0}_{1}.jpg'.format(os.path.splitext(filename)[0], i), chunked_imgs[i])
else:
os.chdir(path_cropped2)
for i in tqdm(range(chunked_imgs.shape[0])):
cv2.imwrite('{0}_{1}.jpg'.format(os.path.splitext(filename)[0], i), chunked_imgs[i])
def show_image(img, key=1, ver='file'):
# cv2.destroyAllWindows()
assert isinstance(key, int)
if ver == 'file':
origin = cv2.imread(img, cv2.IMREAD_COLOR)
cv2.imshow('origin', origin)
cv2.waitKey(key)
else: # img
cv2.imshow('origin', img)
cv2.waitKey(key)
# def save_image(filename, data):
# img = data.clone().add(1).div(2).mul(255).clamp(0, 255).numpy()
# img = img.transpose(1, 2, 0).astype("uint8")
# img = Image.fromarray(img)
# img.save(filename)
# local path setting
path = '/home/ubuntu/context/data'
print(path)
path_cropped = '/home/ubuntu/context/context_encoder_pytorch-master_ver_1/dataset/train/annals'
path_cropped2 = '/home/ubuntu/context/context_encoder_pytorch-master_ver_1/dataset/val/annals'
# original data path
os.chdir(path)
file_list = os.listdir(os.getcwd())
cnt = 0
for l in tqdm(file_list):
# a,b,...
path_img = os.path.join(path,l)
os.chdir(path_img)
image_list = glob.glob('*.jpg')
print(len(image_list))
for i, img in tqdm(enumerate(image_list)):
try:
image = load_image(os.path.join(path_img,img))
chunk = ImageChunker(256, 256, overlap=0)
results = chunk.dimension_preprocess(image)
print("=======cropped========>", img)
cnt +=1
if cnt < 20000:
save_image(img, results, count=True)
elif cnt>=20000 and cnt <28000:
save_image(img, results, count=False)
elif cnt ==28000:
break
except ValueError as e:
print(str(e))
cnt += 1
# def cropimage(img_name):
# row_list_top = []
# row_list_top_idx = []
# row_list_bottom = []
# row_list_bottom_idx = []
# img_original = cv2.imread(img_name, 0)
# H_original, W_original = img_original.shape[:2]
# img_resize = cv2.resize(img_original, (256, 256),
# interpolation=cv2.INTER_LINEAR)
# H_resize, W_resize = img_resize.shape[:2]
# for i in range(int(H_resize/2)):
# row_top = sum(img_resize[i, :])
# row_bottom = sum(img_resize[i+int(H_resize/2), :])
# row_list_top.append(row_top)
# row_list_top_idx.append(row_top)
# row_list_bottom.append(row_bottom)
# row_list_bottom_idx.append(row_bottom)
# row_list_top.sort()
# row_list_bottom.sort()
# top_row = row_list_top[0]
# bottom_row = row_list_bottom[0]
# for i in range(len(row_list_top)):
# if row_list_top_idx[i] == top_row:
# idx_top = i
# for i in range(len(row_list_bottom)):
# if row_list_bottom_idx[i] == bottom_row:
# idx_bottom = i + int(H_resize/2)
# img_temp = img_resize[idx_top:idx_bottom, 0:512]
# return img_temp
| [
"os.getcwd",
"cv2.waitKey",
"numpy.zeros",
"numpy.ones",
"tqdm.auto.tqdm",
"cv2.imread",
"numpy.array",
"os.path.splitext",
"glob.glob",
"cv2.imshow",
"os.path.join",
"os.chdir",
"numpy.concatenate"
] | [((6931, 6945), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (6939, 6945), False, 'import os\n'), ((7001, 7016), 'tqdm.auto.tqdm', 'tqdm', (['file_list'], {}), '(file_list)\n', (7005, 7016), False, 'from tqdm.auto import tqdm\n'), ((5406, 5426), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (5416, 5426), False, 'import cv2\n'), ((6969, 6980), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6978, 6980), False, 'import os\n'), ((7047, 7068), 'os.path.join', 'os.path.join', (['path', 'l'], {}), '(path, l)\n', (7059, 7068), False, 'import os\n'), ((7072, 7090), 'os.chdir', 'os.chdir', (['path_img'], {}), '(path_img)\n', (7080, 7090), False, 'import os\n'), ((7108, 7126), 'glob.glob', 'glob.glob', (['"""*.jpg"""'], {}), "('*.jpg')\n", (7117, 7126), False, 'import glob\n'), ((3018, 3034), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (3026, 3034), True, 'import numpy as np\n'), ((4679, 4698), 'numpy.zeros', 'np.zeros', (['new_shape'], {}), '(new_shape)\n', (4687, 4698), True, 'import numpy as np\n'), ((5752, 5774), 'os.chdir', 'os.chdir', (['path_cropped'], {}), '(path_cropped)\n', (5760, 5774), False, 'import os\n'), ((5943, 5966), 'os.chdir', 'os.chdir', (['path_cropped2'], {}), '(path_cropped2)\n', (5951, 5966), False, 'import os\n'), ((6260, 6293), 'cv2.imread', 'cv2.imread', (['img', 'cv2.IMREAD_COLOR'], {}), '(img, cv2.IMREAD_COLOR)\n', (6270, 6293), False, 'import cv2\n'), ((6302, 6330), 'cv2.imshow', 'cv2.imshow', (['"""origin"""', 'origin'], {}), "('origin', origin)\n", (6312, 6330), False, 'import cv2\n'), ((6339, 6355), 'cv2.waitKey', 'cv2.waitKey', (['key'], {}), '(key)\n', (6350, 6355), False, 'import cv2\n'), ((6382, 6407), 'cv2.imshow', 'cv2.imshow', (['"""origin"""', 'img'], {}), "('origin', img)\n", (6392, 6407), False, 'import cv2\n'), ((6416, 6432), 'cv2.waitKey', 'cv2.waitKey', (['key'], {}), '(key)\n', (6427, 6432), False, 'import cv2\n'), ((2306, 2369), 'numpy.ones', 'np.ones', (['(self.rows - img.shape[0], img.shape[1], img.shape[2])'], {}), '((self.rows - img.shape[0], img.shape[1], img.shape[2]))\n', (2313, 2369), True, 'import numpy as np\n'), ((2413, 2451), 'numpy.concatenate', 'np.concatenate', (['(img, padding)'], {'axis': '(0)'}), '((img, padding), axis=0)\n', (2427, 2451), True, 'import numpy as np\n'), ((2562, 2625), 'numpy.ones', 'np.ones', (['(img.shape[0], self.cols - img.shape[1], img.shape[2])'], {}), '((img.shape[0], self.cols - img.shape[1], img.shape[2]))\n', (2569, 2625), True, 'import numpy as np\n'), ((2669, 2707), 'numpy.concatenate', 'np.concatenate', (['(img, padding)'], {'axis': '(1)'}), '((img, padding), axis=1)\n', (2683, 2707), True, 'import numpy as np\n'), ((4102, 4122), 'numpy.array', 'np.array', (['new_images'], {}), '(new_images)\n', (4110, 4122), True, 'import numpy as np\n'), ((4433, 4453), 'numpy.array', 'np.array', (['new_images'], {}), '(new_images)\n', (4441, 4453), True, 'import numpy as np\n'), ((4966, 4985), 'numpy.zeros', 'np.zeros', (['new_shape'], {}), '(new_shape)\n', (4974, 4985), True, 'import numpy as np\n'), ((7246, 7273), 'os.path.join', 'os.path.join', (['path_img', 'img'], {}), '(path_img, img)\n', (7258, 7273), False, 'import os\n'), ((5873, 5899), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (5889, 5899), False, 'import os\n'), ((6065, 6091), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (6081, 6091), False, 'import os\n')] |
from scipy.signal import argrelmin
from scipy.optimize import minimize
from scipy.integrate import trapz, cumtrapz
import pleque.utils.surfaces as surf
from pleque.utils.surfaces import points_inside_curve, find_contour
import numpy as np
import xarray as xa
def is_monotonic(f, x0, x1, n_test=10):
"""
Test whether line connection of two points is monotonic on f.
:param f: 2D spline `f(x[0], x[1])`
:param x0: start point (2d) of the line
:param x1: end point (2d) of the line
:param n_test: number of points which are tested.
:return: logic value
"""
rpts = np.linspace(x0[0], x1[0], n_test)
zpts = np.linspace(x0[1], x1[1], n_test)
psi_test = f(rpts, zpts, grid=False)
return np.abs(np.sum(np.sign(np.diff(psi_test)))) == n_test - 1
def minimize_in_vicinity(point, func, r_lims, z_lims):
"""
:param point: (R, Z) point.
:param func: f(point) function to be minimized
:param r_lims: R limits where func is valid
:param z_lims: Z limits where func is valid
:return:
"""
# TODO: test performance of the both methods
# minimize in the vicinity:
# Study different methods and find the most propriate and fastest!
bounds = ((np.max((r_lims[0], point[0] - 0.1)),
np.min((r_lims[-1], point[0] + 0.1))),
(np.max((z_lims[0], point[1] - 0.1)),
np.min((z_lims[-1], point[1] + 0.1))))
res = minimize(func, point, method='Powell', options={'xtol': 1e-7})
res_point = np.array((res['x'][0], res['x'][1]))
# If unbounded Powell algorithm finds wrong minimum, algorithm with bounds is used.
if np.sum(res_point ** 2 - point ** 2) > 1e-2:
res = minimize(func, point, method='TNC', bounds=bounds, options={'xtol': 1e-7})
res_point = np.array((res['x'][0], res['x'][1]))
return res_point
def find_extremes(rs, zs, psi_spl):
"""
Find the extremes on grid given by rs and zs.
x-points: Candidates for x-point
o-points: Candidates for magnetic axis
:param rs: array-like(n) R - major radius coordinate
:param zs: array-like(m) Z - vertical coordinate
:param psi_spl:
:return: tuple(x-points, o-points) of arrays(N, 2)
"""
psi = psi_spl(rs, zs)
psi_x = psi_spl(rs, zs, dx=1, dy=0)
psi_y = psi_spl(rs, zs, dx=0, dy=1)
psi_xysq = psi_x ** 2 + psi_y ** 2
# this find extremes along first and second dimension
mins0 = tuple(argrelmin(psi_xysq, axis=0))
mins1 = tuple(argrelmin(psi_xysq, axis=1))
# use these values to define psi_xysq_func threshold
# psi_diff = (np.max(psi) - np.min(psi)) ** 2
# x_diff = ((rs[-1] - rs[0]) / len(rs)) ** 2 + ((zs[-1] - zs[0]) / len(zs)) ** 2
def psi_xysq_func(x):
"""
Return sum of squre of gradients of psi spline in R a Z direction.
return: array
"""
return psi_spl(x[0], x[1], dx=1, dy=0, grid=False) ** 2 \
+ psi_spl(x[0], x[1], dx=0, dy=1, grid=False) ** 2
x_points = []
o_points = []
for i, (ar, az) in enumerate(zip(mins0[0], mins0[1])):
for j, (br, bz) in enumerate(zip(mins1[0], mins1[1])):
if ar == br and az == bz:
r_ex = rs[ar]
z_ex = zs[az]
# XXX Remove bad candidates for the extreme (this is potentional trouble point):
if psi_xysq_func((r_ex, z_ex)) > 1: # 1e3 * dpsidx:
continue
psi_xx = (psi_spl(r_ex, z_ex, dx=2, dy=0, grid=False))
psi_yy = (psi_spl(r_ex, z_ex, dx=0, dy=2, grid=False))
psi_xy = (psi_spl(r_ex, z_ex, dx=1, dy=1, grid=False)) ** 2
D = psi_xx * psi_yy - psi_xy
if D > 0:
o_points.append((r_ex, z_ex))
else:
x_points.append((r_ex, z_ex))
o_points = np.array(o_points)
x_points = np.array(x_points)
return x_points, o_points
def recognize_mg_axis(o_points, psi_spl, r_lims, z_lims, first_wall=None, mg_axis_candidate=None):
"""
Try to recognize which o_points is the magnetic axis.
If `mg_axis_candidate` is not specified o point is identified as a point most in the center of
calculation area and in the first wall if specified. If the candidate is specified, magnetic axis is recognize
as the point closest to the candidate. The position of o-point finally found by minimization of sum of square of
the first partial derivatives of psi.
:param o_points: array-like(N, 2)
:param psi_spl: 2D spline psi(R,Z)
:param r_lims: tuple(Rmin, Rmax)
:param z_lims: tuple(Zmin, Zmax)
:param first_wall: array-like (N, 2) specification of the first wall.
:param mg_axis_candidate: tuple (r, z)
:return: tuple with recognize magnetic axis point and arguments of sorted o-points
"""
if mg_axis_candidate is None:
r_centr = (r_lims[0] + r_lims[-1]) / 2
z_centr = (z_lims[0] + z_lims[-1]) / 2
# vertical distance if favoured
op_dist = 5 * (o_points[:, 0] - r_centr) ** 2 + (o_points[:, 1] - z_centr) ** 2
else:
op_dist = (o_points[:, 0] - mg_axis_candidate[0]) ** 2 + (o_points[:, 1] - mg_axis_candidate[1]) ** 2
# normalise the maximal distance to one
op_dist = op_dist / np.max(op_dist)
# assume that psi value has its minimum in the center (is this check really needed?
op_psiscale = 1
# XXX this code may be usefull for axis recognition
# op_psiscale = psi_spln(o_points[:, 0], o_points[:, 1], grid=False)
# op_psiscale = 1 + (op_psiscale - np.min(op_psiscale)) / (np.max(op_psiscale) - np.min(op_psiscale))
op_in_first_wall = np.zeros_like(op_dist)
if first_wall is not None and len(first_wall) > 2:
mask_in = points_inside_curve(o_points, first_wall)
op_in_first_wall[mask_in] = 1
op_in_first_wall[not mask_in] = 1e-3
sortidx = np.argsort(op_dist * op_psiscale * (1 - op_in_first_wall))
o_point = o_points[sortidx[0]]
def psi_xysq_func(x):
"""
Return sum of squre of gradients of psi spline in R a Z direction.
return: array
"""
return psi_spl(x[0], x[1], dx=1, dy=0, grid=False) ** 2 \
+ psi_spl(x[0], x[1], dx=0, dy=1, grid=False) ** 2
o_point = minimize_in_vicinity(o_point, psi_xysq_func, r_lims, z_lims)
return o_point, sortidx
def recognize_x_points(x_points, mg_axis, psi_axis, psi_spl, r_lims, z_lims, psi_lcfs_candidate=None,
x_point_candidates=None):
if x_points is None or len(x_points) == 0:
return (None, None), list([])
def psi_xysq_func(x):
"""
Return sum of squre of gradients of psi spline in R a Z direction.
return: array
"""
return psi_spl(x[0], x[1], dx=1, dy=0, grid=False) ** 2 \
+ psi_spl(x[0], x[1], dx=0, dy=1, grid=False) ** 2
len_diff = np.ones(x_points.shape[0])
monotonic = np.zeros(x_points.shape[0])
psi_xps = psi_spl(x_points[:, 0], x_points[:, 1], grid=False)
if psi_lcfs_candidate is None:
psi_diff = np.abs(psi_xps - psi_axis)
else:
psi_diff = np.abs(psi_xps - psi_lcfs_candidate)
if x_point_candidates is not None:
if len(np.shape(x_point_candidates)) > 1:
len_diff = (x_point_candidates[:, 0, np.newaxis] - x_points[np.newaxis, :, 0]) ** 2 + \
(x_point_candidates[:, 1, np.newaxis] - x_points[np.newaxis, :, 1]) ** 2
len_diff = np.prod(len_diff, axis=0)
else:
len_diff = (x_point_candidates[0] - x_points[:, 0]) ** 2 + (x_point_candidates[1] - x_points[:, 1]) ** 2
len_diff = len_diff / np.max(len_diff)
for i, xpoint in enumerate(x_points):
monotonic[i] = is_monotonic(psi_spl, mg_axis, xpoint, 10)
monotonic[i] = (1 - monotonic[i] * 1) + 1e-3
sortidx = np.argsort(psi_diff * monotonic * len_diff)
xp1 = x_points[sortidx[0]]
if len(x_points) < 1:
xp2 = x_points[sortidx[1]]
if psi_diff[sortidx[0]] > psi_diff[sortidx[1]]:
xp1, xp2 = xp2, xp1
sortidx[0], sortidx[1] = sortidx[1], sortidx[0]
xp2 = minimize_in_vicinity(xp2, psi_xysq_func, r_lims, z_lims)
else:
xp2 = None
xp1 = minimize_in_vicinity(xp1, psi_xysq_func, r_lims, z_lims)
return (xp1, xp2), sortidx
def recognize_plasma_type(x_point, first_wall, mg_axis, psi_axis, psi_spl):
"""
Recognize whether the plasma is limited or with x-point and find point which limit the plasma (limiter point).
In case of limiter plasma it is contact point, in case of x-point the plasma is limited by x-point.
:param x_point: (R, Z) position of point suspected to by x-point or `None` if there is any.
:param first_wall: array(N, 2) Points which may limit the plasma.
:param mg_axis: (R, Z) position of the magnetic axis of plasma.
:param psi_axis: psi on axis
:param psi_spl: 2D (R, Z) spline with psi values
:return: tuple of (bool, array of points) if bool is True plasma is limited, x-point otherwise.
"""
psi_wall = psi_spl(first_wall[:, 0], first_wall[:, 1], grid=False)
psi_wall_diff = np.abs(psi_wall - psi_axis)
idxs_wall = np.argsort(psi_wall_diff)
# todo: tmp solution (this is not the fastest way of doing this
# I would like to take x-point - mg_axis vector and check whether the point is
# on reliable place
# iwall_min = np.argmin(psi_wall_diff)
# wall_min_diff = psi_wall_diff[iwall_min]
i = 0
while not (i == len(idxs_wall) or is_monotonic(psi_spl, first_wall[idxs_wall[i]], mg_axis, 50)):
i += 1
if i == len(idxs_wall):
iwall_min = -1
wall_min_diff = np.inf
else:
iwall_min = i
wall_min_diff = psi_wall_diff[idxs_wall[i]]
limiter_plasma = True
limiter_point = first_wall[idxs_wall[iwall_min]]
if x_point is not None and (len(first_wall) < 4 or points_inside_curve([x_point], first_wall)[0]):
diff_psi_xp = np.abs(psi_spl(*x_point, grid=False) - psi_axis)
if diff_psi_xp < wall_min_diff or iwall_min == -1:
limiter_plasma = False
limiter_point = x_point
return limiter_plasma, limiter_point
def find_close_lcfs(psi_lcfs, rs, zs, psi_spl, mg_axis, psi_axis=0):
"""
Localize the field line at the 99.9% of psi.
:param psi_lcfs: float
:param rs: array(N), R axis of the grid where to find the contour
:param zs: array(M), Z axis of the grid where to find the contour
:param first_wall: array(N_wall, 2)
:param psi_spl: float
:param psi_axis: float
:return:
"""
new_psi_lcfs = psi_lcfs - 1e-4 * (psi_lcfs - psi_axis)
contours = find_contour(psi_spl(rs, zs, grid=True).T, new_psi_lcfs, rs, zs)
if contours is not None:
for contour in contours:
if surf.curve_is_closed(contour) and surf.points_inside_curve([mg_axis], contour):
return contour
return None
def find_strike_points(psi_spl, rs, zs, psi_lcfs, first_wall):
"""
Find strike points. As a strike point is assumed any intersection iso-psi-value with the first wall.
:param psi_spl: 2D spline
:param rs: array(N) - R component of grid used for contour finding.
:param zs: array(Z) - Z component of grid used for contour finding.
:param psi_lcfs: float
:param first_wall: array(N, 2)
:return: array(M, 2) or None
"""
contours = find_contour(psi_spl(rs, zs, grid=True).T, psi_lcfs, rs, zs)
sp = []
if contours is not None:
for contour in contours:
intersects = surf.intersection(contour, first_wall)
if intersects.size != 0:
sp.append(intersects)
if len(sp) > 0:
sp_array = np.concatenate(sp)
else:
sp_array = None
return sp_array
def find_surface_step(psi_spl, psi_target, flux_surf):
"""
Use simple down-hill algorithm to make step towards `psi_target`.
:param psi_spl: 2D spline
:param psi_target: float
:param flux_surf: array(2, N)
:return:
"""
psi = psi_spl(flux_surf[:, 0], flux_surf[:, 1], grid=False)
psix = psi_spl(flux_surf[:, 0], flux_surf[:, 1], grid=False, dx=1, dy=0)
psiy = psi_spl(flux_surf[:, 0], flux_surf[:, 1], grid=False, dx=0, dy=1)
deriv_norm = np.sqrt(psix ** 2 + psiy ** 2)
psix = psix / (deriv_norm ** 2)
psiy = psiy / (deriv_norm ** 2)
flux_surf[:, 0] -= 0.99 * psix * (psi - psi_target)
flux_surf[:, 1] -= 0.99 * psiy * (psi - psi_target)
return flux_surf
def pprime2p(pprime, psi_ax, psi_bnd):
coef = (psi_bnd - psi_ax)
if isinstance(pprime, xa.DataArray):
psi_n = pprime.psi_n
else:
psi_n = np.linspace(0, 1, len(pprime), endpoint=True)
p = coef * cumtrapz(pprime, psi_n, initial=0)
p = p - p[-1]
if isinstance(pprime, xa.DataArray):
return xa.DataArray(p, [psi_n], ['psi_n'])
else:
return p
def ffprime2f(ffprime, psi_ax, psi_bnd, f0):
coef = (psi_bnd - psi_ax)
if isinstance(ffprime, xa.DataArray):
psi_n = ffprime.psi_n
else:
psi_n = np.linspace(0, 1, len(ffprime), endpoint=True)
f_sq = 2 * coef * cumtrapz(ffprime, psi_n, initial=0)
f = np.sign(f0) * np.sqrt(f_sq - f_sq[-1] + f0**2)
if isinstance(ffprime, xa.DataArray):
return xa.DataArray(f, [psi_n], ['psi_n'])
else:
return f | [
"numpy.abs",
"numpy.sum",
"numpy.ones",
"numpy.argsort",
"numpy.shape",
"pleque.utils.surfaces.points_inside_curve",
"pleque.utils.surfaces.intersection",
"pleque.utils.surfaces.curve_is_closed",
"numpy.prod",
"scipy.optimize.minimize",
"numpy.zeros_like",
"scipy.signal.argrelmin",
"numpy.ma... | [((600, 633), 'numpy.linspace', 'np.linspace', (['x0[0]', 'x1[0]', 'n_test'], {}), '(x0[0], x1[0], n_test)\n', (611, 633), True, 'import numpy as np\n'), ((645, 678), 'numpy.linspace', 'np.linspace', (['x0[1]', 'x1[1]', 'n_test'], {}), '(x0[1], x1[1], n_test)\n', (656, 678), True, 'import numpy as np\n'), ((1431, 1494), 'scipy.optimize.minimize', 'minimize', (['func', 'point'], {'method': '"""Powell"""', 'options': "{'xtol': 1e-07}"}), "(func, point, method='Powell', options={'xtol': 1e-07})\n", (1439, 1494), False, 'from scipy.optimize import minimize\n'), ((1510, 1546), 'numpy.array', 'np.array', (["(res['x'][0], res['x'][1])"], {}), "((res['x'][0], res['x'][1]))\n", (1518, 1546), True, 'import numpy as np\n'), ((3890, 3908), 'numpy.array', 'np.array', (['o_points'], {}), '(o_points)\n', (3898, 3908), True, 'import numpy as np\n'), ((3924, 3942), 'numpy.array', 'np.array', (['x_points'], {}), '(x_points)\n', (3932, 3942), True, 'import numpy as np\n'), ((5708, 5730), 'numpy.zeros_like', 'np.zeros_like', (['op_dist'], {}), '(op_dist)\n', (5721, 5730), True, 'import numpy as np\n'), ((5944, 6002), 'numpy.argsort', 'np.argsort', (['(op_dist * op_psiscale * (1 - op_in_first_wall))'], {}), '(op_dist * op_psiscale * (1 - op_in_first_wall))\n', (5954, 6002), True, 'import numpy as np\n'), ((6976, 7002), 'numpy.ones', 'np.ones', (['x_points.shape[0]'], {}), '(x_points.shape[0])\n', (6983, 7002), True, 'import numpy as np\n'), ((7019, 7046), 'numpy.zeros', 'np.zeros', (['x_points.shape[0]'], {}), '(x_points.shape[0])\n', (7027, 7046), True, 'import numpy as np\n'), ((7952, 7995), 'numpy.argsort', 'np.argsort', (['(psi_diff * monotonic * len_diff)'], {}), '(psi_diff * monotonic * len_diff)\n', (7962, 7995), True, 'import numpy as np\n'), ((9264, 9291), 'numpy.abs', 'np.abs', (['(psi_wall - psi_axis)'], {}), '(psi_wall - psi_axis)\n', (9270, 9291), True, 'import numpy as np\n'), ((9308, 9333), 'numpy.argsort', 'np.argsort', (['psi_wall_diff'], {}), '(psi_wall_diff)\n', (9318, 9333), True, 'import numpy as np\n'), ((12436, 12466), 'numpy.sqrt', 'np.sqrt', (['(psix ** 2 + psiy ** 2)'], {}), '(psix ** 2 + psiy ** 2)\n', (12443, 12466), True, 'import numpy as np\n'), ((1643, 1678), 'numpy.sum', 'np.sum', (['(res_point ** 2 - point ** 2)'], {}), '(res_point ** 2 - point ** 2)\n', (1649, 1678), True, 'import numpy as np\n'), ((1701, 1776), 'scipy.optimize.minimize', 'minimize', (['func', 'point'], {'method': '"""TNC"""', 'bounds': 'bounds', 'options': "{'xtol': 1e-07}"}), "(func, point, method='TNC', bounds=bounds, options={'xtol': 1e-07})\n", (1709, 1776), False, 'from scipy.optimize import minimize\n'), ((1796, 1832), 'numpy.array', 'np.array', (["(res['x'][0], res['x'][1])"], {}), "((res['x'][0], res['x'][1]))\n", (1804, 1832), True, 'import numpy as np\n'), ((2448, 2475), 'scipy.signal.argrelmin', 'argrelmin', (['psi_xysq'], {'axis': '(0)'}), '(psi_xysq, axis=0)\n', (2457, 2475), False, 'from scipy.signal import argrelmin\n'), ((2495, 2522), 'scipy.signal.argrelmin', 'argrelmin', (['psi_xysq'], {'axis': '(1)'}), '(psi_xysq, axis=1)\n', (2504, 2522), False, 'from scipy.signal import argrelmin\n'), ((5323, 5338), 'numpy.max', 'np.max', (['op_dist'], {}), '(op_dist)\n', (5329, 5338), True, 'import numpy as np\n'), ((5804, 5845), 'pleque.utils.surfaces.points_inside_curve', 'points_inside_curve', (['o_points', 'first_wall'], {}), '(o_points, first_wall)\n', (5823, 5845), False, 'from pleque.utils.surfaces import points_inside_curve, find_contour\n'), ((7169, 7195), 'numpy.abs', 'np.abs', (['(psi_xps - psi_axis)'], {}), '(psi_xps - psi_axis)\n', (7175, 7195), True, 'import numpy as np\n'), ((7225, 7261), 'numpy.abs', 'np.abs', (['(psi_xps - psi_lcfs_candidate)'], {}), '(psi_xps - psi_lcfs_candidate)\n', (7231, 7261), True, 'import numpy as np\n'), ((11876, 11894), 'numpy.concatenate', 'np.concatenate', (['sp'], {}), '(sp)\n', (11890, 11894), True, 'import numpy as np\n'), ((12905, 12939), 'scipy.integrate.cumtrapz', 'cumtrapz', (['pprime', 'psi_n'], {'initial': '(0)'}), '(pprime, psi_n, initial=0)\n', (12913, 12939), False, 'from scipy.integrate import trapz, cumtrapz\n'), ((13016, 13051), 'xarray.DataArray', 'xa.DataArray', (['p', '[psi_n]', "['psi_n']"], {}), "(p, [psi_n], ['psi_n'])\n", (13028, 13051), True, 'import xarray as xa\n'), ((13325, 13360), 'scipy.integrate.cumtrapz', 'cumtrapz', (['ffprime', 'psi_n'], {'initial': '(0)'}), '(ffprime, psi_n, initial=0)\n', (13333, 13360), False, 'from scipy.integrate import trapz, cumtrapz\n'), ((13370, 13381), 'numpy.sign', 'np.sign', (['f0'], {}), '(f0)\n', (13377, 13381), True, 'import numpy as np\n'), ((13384, 13418), 'numpy.sqrt', 'np.sqrt', (['(f_sq - f_sq[-1] + f0 ** 2)'], {}), '(f_sq - f_sq[-1] + f0 ** 2)\n', (13391, 13418), True, 'import numpy as np\n'), ((13475, 13510), 'xarray.DataArray', 'xa.DataArray', (['f', '[psi_n]', "['psi_n']"], {}), "(f, [psi_n], ['psi_n'])\n", (13487, 13510), True, 'import xarray as xa\n'), ((1223, 1258), 'numpy.max', 'np.max', (['(r_lims[0], point[0] - 0.1)'], {}), '((r_lims[0], point[0] - 0.1))\n', (1229, 1258), True, 'import numpy as np\n'), ((1275, 1311), 'numpy.min', 'np.min', (['(r_lims[-1], point[0] + 0.1)'], {}), '((r_lims[-1], point[0] + 0.1))\n', (1281, 1311), True, 'import numpy as np\n'), ((1329, 1364), 'numpy.max', 'np.max', (['(z_lims[0], point[1] - 0.1)'], {}), '((z_lims[0], point[1] - 0.1))\n', (1335, 1364), True, 'import numpy as np\n'), ((1381, 1417), 'numpy.min', 'np.min', (['(z_lims[-1], point[1] + 0.1)'], {}), '((z_lims[-1], point[1] + 0.1))\n', (1387, 1417), True, 'import numpy as np\n'), ((7571, 7596), 'numpy.prod', 'np.prod', (['len_diff'], {'axis': '(0)'}), '(len_diff, axis=0)\n', (7578, 7596), True, 'import numpy as np\n'), ((7758, 7774), 'numpy.max', 'np.max', (['len_diff'], {}), '(len_diff)\n', (7764, 7774), True, 'import numpy as np\n'), ((11722, 11760), 'pleque.utils.surfaces.intersection', 'surf.intersection', (['contour', 'first_wall'], {}), '(contour, first_wall)\n', (11739, 11760), True, 'import pleque.utils.surfaces as surf\n'), ((7317, 7345), 'numpy.shape', 'np.shape', (['x_point_candidates'], {}), '(x_point_candidates)\n', (7325, 7345), True, 'import numpy as np\n'), ((10041, 10083), 'pleque.utils.surfaces.points_inside_curve', 'points_inside_curve', (['[x_point]', 'first_wall'], {}), '([x_point], first_wall)\n', (10060, 10083), False, 'from pleque.utils.surfaces import points_inside_curve, find_contour\n'), ((10961, 10990), 'pleque.utils.surfaces.curve_is_closed', 'surf.curve_is_closed', (['contour'], {}), '(contour)\n', (10981, 10990), True, 'import pleque.utils.surfaces as surf\n'), ((10995, 11039), 'pleque.utils.surfaces.points_inside_curve', 'surf.points_inside_curve', (['[mg_axis]', 'contour'], {}), '([mg_axis], contour)\n', (11019, 11039), True, 'import pleque.utils.surfaces as surf\n'), ((753, 770), 'numpy.diff', 'np.diff', (['psi_test'], {}), '(psi_test)\n', (760, 770), True, 'import numpy as np\n')] |
# -*- coding: UTF-8 -*-
import functools
import hasher
import numpy as np
import scipy.spatial.distance as distance
from pyspark.mllib.linalg import SparseVector
def distance_metric(kv):
"""
Generates a pairwise, summed Jaccard distance metric for all the elements
in a cluster/bucket.
Returns a <k, v> pair: <bucket id, jaccard distance>.
Parameters
----------
kv: tuple of (int, list of array_like)
A tuple of the form <k, v> pair where k is the cluster/bucket
id and v a list of vectors.
Returns
-------
bid: int
The cluster/bucket id
distance: float
The average Jaccard distance of the elements of the bucket.
"""
bid, X = kv[0], kv[1].data
if type(X[0]) is SparseVector:
X = np.array([x.toArray() for x in X])
return [bid, distance.pdist(np.array(X), 'jaccard').mean()]
class PyLSHModel:
"""
Wrapper class for LSH model.
"""
def __init__(self, budget, min_clusters=2, target_threshold=None, n_bands=None):
"""
Initialize the LSH model. Only one of the parameters target_threshold or
n_bands is required.
Parameters
----------
budget : integer
Total number of rows to split the signatures into.
min_clusters : integer
Minimum allowable cluster size.
target_threshold: float, optional
Value of desired threshold if bands not specified.
n_bands : integer, optional
Number of bands.
"""
self.budget = budget # budget is the total number of rows: rows*bands
self.target_threshold = target_threshold
self.min_clusters = min_clusters
if n_bands:
self.n_bands = n_bands
self.n_rows = budget / n_bands
self.threshold = target_threshold
else:
self.__tune_parameters()
self.sigs = None
self.bands = None
self.vectors_buckets = None
self.buckets_vectors = None
self.buckets = None
self.scores = None
def __tune_parameters(self):
for bands in xrange(1, self.budget / 2):
if self.budget % bands == 0:
rows = self.budget / bands
threshold = (1.0 / bands) ** (1.0 / rows)
if (threshold < self.target_threshold):
self.n_bands = bands
self.n_rows = rows
self.threshold = threshold
return
def run(self, data, p, m):
"""
Starts the main LSH process.
Parameters
----------
data : RDD[Vector]
RDD of data points. Acceptable vector types are numpy.ndarray,
list or PySpark SparseVector.
p : integer
Prime number larger than the largest value in data.
m : integer
Number of bins for hashing.
"""
zdata = data.zipWithIndex()
seeds = np.vstack([np.random.random_integers(p, size=self.budget), np.random.random_integers(0, p, size=self.budget)]).T
hashes = [functools.partial(hasher.minhash, a=s[0], b=s[1], p=p, m=m) for s in seeds]
# Start by generating the signatures for each data point.
# Output format is:
# <(vector idx, band idx), (row idx, minhash)>
sigs = zdata.flatMap(lambda x: [[(x[1], i % self.n_bands), (i, h(x[0]))] for i, h in enumerate(hashes)]).cache()
# Put together the vector minhashes in the same band.
# Output format is:
# <(band idx, hash minhash-list), vector idx>
bands = sigs.groupByKey().mapValues(sorted) \
.map(lambda x: [(x[0][1], hash(tuple(x[1]))), x[0][0]]) \
.groupByKey().cache()
# Filter the bucket with size < min_clusters
if self.min_clusters > 0:
bands = bands.filter(lambda x: len(x[1]) >= self.min_clusters).cache()
# Remaps each element to a cluster / bucket index.
# Output format is:
# <vector idx, bucket idx>
vector_bucket = bands.map(lambda x: frozenset(sorted(x[1]))).distinct() \
.zipWithIndex().flatMap(lambda x: map(lambda y: (np.long(y), x[1]), x[0])) \
.cache()
# Reverses indices, to key the vectors by their buckets.
# Output format is:
# <bucket idx, vector idx>
bucket_vector = vector_bucket.map(lambda x: (x[1], x[0])).cache()
# Joins indices up with original data to provide clustering results.
# Output format is:
# <bucket idx, list of vectors>
buckets = zdata.map(lambda x: (x[1], x[0])).join(vector_bucket) \
.map(lambda x: (x[1][1], x[1][0])).groupByKey().cache()
# Computes Jaccard similarity of each bucket.
scores = buckets.map(distance_metric).cache()
# Update the class fields at the end to avoid inconsistencies
self.signatures = sigs
self.bands = bands
self.vectors_buckets = vector_bucket
self.buckets_vectors = bucket_vector
self.buckets = buckets
self.scores = scores
| [
"functools.partial",
"numpy.array",
"numpy.random.random_integers",
"numpy.long"
] | [((3179, 3238), 'functools.partial', 'functools.partial', (['hasher.minhash'], {'a': 's[0]', 'b': 's[1]', 'p': 'p', 'm': 'm'}), '(hasher.minhash, a=s[0], b=s[1], p=p, m=m)\n', (3196, 3238), False, 'import functools\n'), ((856, 867), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (864, 867), True, 'import numpy as np\n'), ((3059, 3105), 'numpy.random.random_integers', 'np.random.random_integers', (['p'], {'size': 'self.budget'}), '(p, size=self.budget)\n', (3084, 3105), True, 'import numpy as np\n'), ((3107, 3156), 'numpy.random.random_integers', 'np.random.random_integers', (['(0)', 'p'], {'size': 'self.budget'}), '(0, p, size=self.budget)\n', (3132, 3156), True, 'import numpy as np\n'), ((4266, 4276), 'numpy.long', 'np.long', (['y'], {}), '(y)\n', (4273, 4276), True, 'import numpy as np\n')] |
from typing import Tuple
import numpy as np
import pandas as pd
import pytest
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from ml_tooling import Model
from ml_tooling.data import Dataset, load_demo_dataset
from ml_tooling.transformers import DFStandardScaler
from ml_tooling.utils import DataType
class TestDemoDatasetModule:
@pytest.fixture
def load_dataset_iris(self) -> Dataset:
return load_demo_dataset("iris")
@pytest.fixture
def iris_df(self):
iris_data = load_iris()
return (
pd.DataFrame(data=iris_data.data, columns=iris_data.feature_names),
iris_data.target,
)
def test_repr_is_correct_load(self, load_dataset_iris: Dataset):
result = str(load_dataset_iris)
assert result == "<IrisData - Dataset>"
def test_dataset_return_correct_x_attribute(
self, load_dataset_iris: Dataset, iris_df: Tuple[pd.DataFrame, DataType]
):
x_expected, y_expected = iris_df
pd.testing.assert_frame_equal(load_dataset_iris.x, x_expected)
def test_dataset_return_correct_y_attribute(
self, load_dataset_iris: Dataset, iris_df: Tuple[pd.DataFrame, DataType]
):
x_expected, y_expected = iris_df
assert np.array_equal(load_dataset_iris.y, y_expected)
def test_dataset_from_fetchopenml_works(self):
dataset = load_demo_dataset("openml", name="miceprotein")
assert len(dataset.x) == 1080
def test_dataset_x_from_fetchopenml_with_parameters_works(self):
dataset = load_demo_dataset(
"openml", name="blood-transfusion-service-center", target_column="V1"
)
features_x = dataset.x
assert features_x.shape == (748, 4)
def test_dataset_y_from_fetchopenml_with_two_target_columns_works(self):
dataset = load_demo_dataset(
"openml",
name="blood-transfusion-service-center",
target_column=["V1", "V2"],
)
features_y = dataset.y
assert features_y.shape == (748, 2)
def test_load_prediction_data_works_as_expected(self):
dataset = load_demo_dataset("iris")
dataset.create_train_test(stratify=True)
feature_pipeline = Pipeline([("scale", DFStandardScaler())])
model = Model(LogisticRegression(), feature_pipeline=feature_pipeline)
model.train_estimator(dataset)
result = model.make_prediction(dataset, 5)
expected = pd.DataFrame({"Prediction": [0]})
pd.testing.assert_frame_equal(result, expected, check_dtype=False)
| [
"pandas.DataFrame",
"sklearn.datasets.load_iris",
"pandas.testing.assert_frame_equal",
"ml_tooling.data.load_demo_dataset",
"sklearn.linear_model.LogisticRegression",
"ml_tooling.transformers.DFStandardScaler",
"numpy.array_equal"
] | [((494, 519), 'ml_tooling.data.load_demo_dataset', 'load_demo_dataset', (['"""iris"""'], {}), "('iris')\n", (511, 519), False, 'from ml_tooling.data import Dataset, load_demo_dataset\n'), ((584, 595), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (593, 595), False, 'from sklearn.datasets import load_iris\n'), ((1078, 1140), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['load_dataset_iris.x', 'x_expected'], {}), '(load_dataset_iris.x, x_expected)\n', (1107, 1140), True, 'import pandas as pd\n'), ((1335, 1382), 'numpy.array_equal', 'np.array_equal', (['load_dataset_iris.y', 'y_expected'], {}), '(load_dataset_iris.y, y_expected)\n', (1349, 1382), True, 'import numpy as np\n'), ((1453, 1500), 'ml_tooling.data.load_demo_dataset', 'load_demo_dataset', (['"""openml"""'], {'name': '"""miceprotein"""'}), "('openml', name='miceprotein')\n", (1470, 1500), False, 'from ml_tooling.data import Dataset, load_demo_dataset\n'), ((1627, 1719), 'ml_tooling.data.load_demo_dataset', 'load_demo_dataset', (['"""openml"""'], {'name': '"""blood-transfusion-service-center"""', 'target_column': '"""V1"""'}), "('openml', name='blood-transfusion-service-center',\n target_column='V1')\n", (1644, 1719), False, 'from ml_tooling.data import Dataset, load_demo_dataset\n'), ((1909, 2009), 'ml_tooling.data.load_demo_dataset', 'load_demo_dataset', (['"""openml"""'], {'name': '"""blood-transfusion-service-center"""', 'target_column': "['V1', 'V2']"}), "('openml', name='blood-transfusion-service-center',\n target_column=['V1', 'V2'])\n", (1926, 2009), False, 'from ml_tooling.data import Dataset, load_demo_dataset\n'), ((2206, 2231), 'ml_tooling.data.load_demo_dataset', 'load_demo_dataset', (['"""iris"""'], {}), "('iris')\n", (2223, 2231), False, 'from ml_tooling.data import Dataset, load_demo_dataset\n'), ((2539, 2572), 'pandas.DataFrame', 'pd.DataFrame', (["{'Prediction': [0]}"], {}), "({'Prediction': [0]})\n", (2551, 2572), True, 'import pandas as pd\n'), ((2581, 2647), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['result', 'expected'], {'check_dtype': '(False)'}), '(result, expected, check_dtype=False)\n', (2610, 2647), True, 'import pandas as pd\n'), ((625, 691), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'iris_data.data', 'columns': 'iris_data.feature_names'}), '(data=iris_data.data, columns=iris_data.feature_names)\n', (637, 691), True, 'import pandas as pd\n'), ((2372, 2392), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (2390, 2392), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2328, 2346), 'ml_tooling.transformers.DFStandardScaler', 'DFStandardScaler', ([], {}), '()\n', (2344, 2346), False, 'from ml_tooling.transformers import DFStandardScaler\n')] |
# ---------------------------------------------------------------
# dense_reppoints_target.py
# Set-up time: 2020/9/24 22:40
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: <EMAIL> [OR] <EMAIL>
# ---------------------------------------------------------------
import cv2
import mmcv
import numpy as np
import torch
from mmdet.core.bbox import assign_and_sample, build_assigner, PseudoSampler
from mmdet.core.utils import multi_apply
def dense_reppoints_target(proposals_list,
proposals_pts_list,
valid_flag_list,
gt_bboxes_list,
gt_masks_list,
img_metas,
cfg,
gt_bboxes_ignore_list=None,
gt_labels_list=None,
label_channels=1,
sampling=True,
unmap_outputs=True,
num_pts=49, ):
"""Compute refinement and classification targets for points.
Args:
proposals_list(list(list)): Multi level bouding box of each image
proposals_pts_list (list(list)): Multi level points of each image.
valid_flag_list (list(list)): Multi level valid flags of each image.
gt_bboxes_list (list(Tensor)): Ground truth bboxes of each image.
img_metas (list(dict)): Meta info of each image.
cfg (dict): Train sample configs.
num_pts(int) Number of point sets
Returns:
tuple
"""
num_imgs = len(img_metas)
assert len(proposals_list) == len(valid_flag_list) == len(proposals_pts_list) == num_imgs
# points number of multi levels
num_level_proposals = [points.size(0) for points in proposals_list[0]]
num_level_proposals_list = [num_level_proposals] * num_imgs
# concat all level points and flags to a single tensor
for i in range(num_imgs):
assert len(proposals_list[i]) == len(valid_flag_list[i])
proposals_list[i] = torch.cat(proposals_list[i])
valid_flag_list[i] = torch.cat(valid_flag_list[i])
proposals_pts_list[i] = torch.cat(proposals_pts_list[i])
# compute targets for each image
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
if gt_labels_list is None:
gt_labels_list = [None for _ in range(num_imgs)]
(all_labels, all_label_weights, all_bbox_gt, all_mask_gt_index, all_mask_gt, all_mask_gt_label, all_proposals,
all_proposal_weights, pos_inds_list, neg_inds_list) = multi_apply(
dense_reppoints_target_sinle,
proposals_list,
proposals_pts_list,
num_level_proposals_list,
valid_flag_list,
gt_bboxes_list,
gt_masks_list,
gt_bboxes_ignore_list,
gt_labels_list,
num_pts=num_pts,
cfg=cfg,
label_channels=label_channels,
sampling=sampling,
unmap_outputs=unmap_outputs)
# no valid points
if any([labels is None for labels in all_labels]):
return None
# sampled points of all images
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
labels_list = images_to_levels(all_labels, num_level_proposals, keep_dim=True)
label_weights_list = images_to_levels(all_label_weights, num_level_proposals, keep_dim=True)
bbox_gt_list = images_to_levels(all_bbox_gt, num_level_proposals, keep_dim=True)
proposals_list = images_to_levels(all_proposals, num_level_proposals, keep_dim=True)
proposal_weights_list = images_to_levels(all_proposal_weights, num_level_proposals, keep_dim=True)
mask_gt_index_list = images_to_levels(all_mask_gt_index, num_level_proposals, keep_dim=True)
mask_gt_list = mask_to_levels(all_mask_gt, mask_gt_index_list)
mask_gt_label_list = mask_to_levels(all_mask_gt_label, mask_gt_index_list)
return (labels_list, label_weights_list, bbox_gt_list, mask_gt_list, mask_gt_label_list, proposals_list,
proposal_weights_list, num_total_pos, num_total_neg)
def images_to_levels(target, num_level_grids, keep_dim=False):
"""
Convert targets by image to targets by feature level.
[target_img0, target_img1] -> [target_level0, target_level1, ...]
"""
target = torch.stack(target, 0)
level_targets = []
start = 0
for n in num_level_grids:
end = start + n
if not keep_dim:
level_targets.append(target[:, start:end].squeeze(0))
else:
level_targets.append(target[:, start:end])
start = end
return level_targets
def mask_to_levels(target, mask_index_list):
"""
Convert target by mask_index_list
"""
target_gt_list = []
for lvl in range(len(mask_index_list)):
mask_gt_lvl_list = []
for i in range(mask_index_list[lvl].shape[0]):
index = mask_index_list[lvl][i]
index = index[index > 0]
mask_gt_lvl = target[i][index - 1]
mask_gt_lvl_list.append(mask_gt_lvl)
target_gt_list.append(mask_gt_lvl_list)
return target_gt_list
def dense_reppoints_target_sinle(flat_proposals,
flat_proposals_pts,
num_level_proposals,
valid_flags,
gt_bboxes,
gt_masks,
gt_bboxes_ignore,
gt_labels,
cfg,
label_channels=1,
sampling=True,
unmap_outputs=True,
num_pts=49):
inside_flags = valid_flags
num_level_proposals_inside = get_num_level_proposals_inside(num_level_proposals, inside_flags)
if not inside_flags.any():
return (None,) * 8
# assign gt and sample points
proposals = flat_proposals[inside_flags, :]
proposals_pts = flat_proposals_pts[inside_flags, :]
if sampling:
assign_result, sampling_result = assign_and_sample(
proposals, gt_bboxes, gt_bboxes_ignore, None, cfg)
else:
bbox_assigner = build_assigner(cfg.assigner)
if cfg.assigner.type != "ATSSAssigner":
assign_result = bbox_assigner.assign(proposals, gt_bboxes, None, gt_labels)
else:
assign_result = bbox_assigner.assign(proposals, num_level_proposals_inside, gt_bboxes, None, gt_labels)
bbox_sampler = PseudoSampler()
sampling_result = bbox_sampler.sample(assign_result, proposals,
gt_bboxes)
gt_ind = sampling_result.pos_assigned_gt_inds.cpu().numpy()
sample_func = cfg.get('sample_func', 'distance_sample_pts')
gt_pts_numpy = eval(sample_func)(gt_bboxes, gt_masks, cfg, num_pts)
pts_label_list = []
proposals_pos_pts = proposals_pts[sampling_result.pos_inds, :].detach().cpu().numpy().round().astype(np.long)
for i in range(len(gt_ind)):
gt_mask = gt_masks[gt_ind[i]]
h, w = gt_mask.shape
pts_long = proposals_pos_pts[i]
_pts_label = gt_mask[pts_long[1::2].clip(0, h - 1), pts_long[0::2].clip(0, w - 1)]
pts_label_list.append(_pts_label)
del proposals_pos_pts
if len(gt_ind) != 0:
gt_pts = gt_bboxes.new_tensor(gt_pts_numpy)
pos_gt_pts = gt_pts[gt_ind]
pts_label = np.stack(pts_label_list, 0)
pos_gt_pts_label = gt_bboxes.new_tensor(pts_label)
else:
pos_gt_pts = None
pos_gt_pts_label = None
num_valid_proposals = proposals.shape[0]
bbox_gt = proposals.new_zeros([num_valid_proposals, 4])
mask_gt = proposals.new_zeros([0, num_pts * 2])
mask_gt_label = proposals.new_zeros([0, num_pts]).long()
mask_gt_index = proposals.new_zeros([num_valid_proposals, ], dtype=torch.long)
pos_proposals = torch.zeros_like(proposals)
proposals_weights = proposals.new_zeros([num_valid_proposals, 4])
labels = proposals.new_zeros(num_valid_proposals, dtype=torch.long)
label_weights = proposals.new_zeros(num_valid_proposals, dtype=torch.float)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
pos_gt_bboxes = sampling_result.pos_gt_bboxes
bbox_gt[pos_inds, :] = pos_gt_bboxes
if pos_gt_pts is not None:
mask_gt = pos_gt_pts.type(bbox_gt.type())
mask_gt_index[pos_inds] = torch.arange(len(pos_inds)).long().cuda() + 1
if pos_gt_pts_label is not None:
mask_gt_label = pos_gt_pts_label.long()
pos_proposals[pos_inds, :] = proposals[pos_inds, :]
proposals_weights[pos_inds, :] = 1.0
if gt_labels is None:
labels[pos_inds] = 1
else:
labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds]
if cfg.pos_weight <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = cfg.pos_weight
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
# map up to original set of grids
if unmap_outputs:
num_total_proposals = flat_proposals.size(0)
labels = unmap(labels, num_total_proposals, inside_flags)
label_weights = unmap(label_weights, num_total_proposals, inside_flags)
bbox_gt = unmap(bbox_gt, num_total_proposals, inside_flags)
mask_gt_index = unmap(mask_gt_index, num_total_proposals, inside_flags)
pos_proposals = unmap(pos_proposals, num_total_proposals, inside_flags)
proposals_weights = unmap(proposals_weights, num_total_proposals, inside_flags)
return (labels, label_weights, bbox_gt, mask_gt_index, mask_gt, mask_gt_label, pos_proposals, proposals_weights,
pos_inds, neg_inds)
def unmap(data, count, inds, fill=0):
"""
Unmap a subset of item (data) back to the original set of items (of size count)
"""
if data.dim() == 1:
ret = data.new_full((count,), fill)
ret[inds] = data
else:
new_size = (count,) + data.size()[1:]
ret = data.new_full(new_size, fill)
ret[inds, :] = data
return ret
def get_num_level_proposals_inside(num_level_proposals, inside_flags):
"""
Get number of proposal in different level
"""
split_inside_flags = torch.split(inside_flags, num_level_proposals)
num_level_proposals_inside = [
int(flags.sum()) for flags in split_inside_flags
]
return num_level_proposals_inside
def mask_to_poly(mask):
contours, _ = cv2.findContours(mask.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
polygons = []
for contour in contours:
contour = contour.flatten().tolist()
if len(contour) > 4:
polygons.append(contour)
return polygons
def distance_sample_pts(gt_bboxes, gt_masks, cfg, num_pts):
"""
Sample pts based on distance transformation map.
Args:
gt_bboxes(list(Tensor)): groud-truth bounding box
gt_masks(list(Mask)): ground-truth mask
cfg(dict): sampling config
num_pts(int): number of points
Returns:
numpy: the sampling points based on distance transform map
"""
dist_sample_thr = cfg.get('dist_sample_thr', 3)
pts_list = []
pts_label_list = []
for i in range(len(gt_bboxes)):
x1, y1, x2, y2 = gt_bboxes[i].cpu().numpy().astype(np.int32)
w = np.maximum(x2 - x1 + 1, 1)
h = np.maximum(y2 - y1 + 1, 1)
mask = mmcv.imresize(gt_masks[i][y1:y1 + h, x1:x1 + w],
(cfg.get('mask_size', 56), cfg.get('mask_size', 56)))
polygons = mask_to_poly(mask)
distance_map = np.ones(mask.shape).astype(np.uint8)
for poly in polygons:
poly = np.array(poly).astype(np.int)
for j in range(len(poly) // 2):
x_0, y_0 = poly[2 * j:2 * j + 2]
if j == len(poly) // 2 - 1:
x_1, y_1 = poly[0:2]
else:
x_1, y_1 = poly[2 * j + 2:2 * j + 4]
cv2.line(distance_map, (x_0, y_0), (x_1, y_1), 0, thickness=2)
roi_dist_map = cv2.distanceTransform(distance_map, cv2.DIST_L2, 3)
con_index = np.stack(np.nonzero(roi_dist_map == 0)[::-1], axis=-1)
roi_dist_map[roi_dist_map == 0] = 1
roi_dist_map[roi_dist_map > dist_sample_thr] = 0
index_y, index_x = np.nonzero(roi_dist_map > 0)
index = np.stack([index_x, index_y], axis=-1)
_len = index.shape[0]
if len(con_index) == 0:
pts = np.zeros([2 * num_pts])
else:
repeat = num_pts // _len
mod = num_pts % _len
perm = np.random.choice(_len, mod, replace=False)
draw = [index.copy() for i in range(repeat)]
draw.append(index[perm])
draw = np.concatenate(draw, 0)
draw = np.random.permutation(draw)
draw = draw + np.random.rand(*draw.shape)
x_scale = float(w) / cfg.get('mask_size', 56)
y_scale = float(h) / cfg.get('mask_size', 56)
draw[:, 0] = draw[:, 0] * x_scale + x1
draw[:, 1] = draw[:, 1] * y_scale + y1
pts = draw.reshape(2 * num_pts)
pts_list.append(pts)
pts_long = pts.astype(np.long)
pts_label = gt_masks[i][pts_long[1::2], pts_long[0::2]]
pts_label_list.append(pts_label)
pts_list = np.stack(pts_list, 0)
return pts_list | [
"numpy.maximum",
"torch.cat",
"numpy.ones",
"cv2.line",
"mmdet.core.utils.multi_apply",
"numpy.random.choice",
"numpy.stack",
"torch.zeros_like",
"torch.split",
"mmdet.core.bbox.assign_and_sample",
"numpy.random.permutation",
"mmdet.core.bbox.PseudoSampler",
"cv2.distanceTransform",
"numpy... | [((2797, 3109), 'mmdet.core.utils.multi_apply', 'multi_apply', (['dense_reppoints_target_sinle', 'proposals_list', 'proposals_pts_list', 'num_level_proposals_list', 'valid_flag_list', 'gt_bboxes_list', 'gt_masks_list', 'gt_bboxes_ignore_list', 'gt_labels_list'], {'num_pts': 'num_pts', 'cfg': 'cfg', 'label_channels': 'label_channels', 'sampling': 'sampling', 'unmap_outputs': 'unmap_outputs'}), '(dense_reppoints_target_sinle, proposals_list,\n proposals_pts_list, num_level_proposals_list, valid_flag_list,\n gt_bboxes_list, gt_masks_list, gt_bboxes_ignore_list, gt_labels_list,\n num_pts=num_pts, cfg=cfg, label_channels=label_channels, sampling=\n sampling, unmap_outputs=unmap_outputs)\n', (2808, 3109), False, 'from mmdet.core.utils import multi_apply\n'), ((4624, 4646), 'torch.stack', 'torch.stack', (['target', '(0)'], {}), '(target, 0)\n', (4635, 4646), False, 'import torch\n'), ((8375, 8402), 'torch.zeros_like', 'torch.zeros_like', (['proposals'], {}), '(proposals)\n', (8391, 8402), False, 'import torch\n'), ((10885, 10931), 'torch.split', 'torch.split', (['inside_flags', 'num_level_proposals'], {}), '(inside_flags, num_level_proposals)\n', (10896, 10931), False, 'import torch\n'), ((14080, 14101), 'numpy.stack', 'np.stack', (['pts_list', '(0)'], {}), '(pts_list, 0)\n', (14088, 14101), True, 'import numpy as np\n'), ((2232, 2260), 'torch.cat', 'torch.cat', (['proposals_list[i]'], {}), '(proposals_list[i])\n', (2241, 2260), False, 'import torch\n'), ((2291, 2320), 'torch.cat', 'torch.cat', (['valid_flag_list[i]'], {}), '(valid_flag_list[i])\n', (2300, 2320), False, 'import torch\n'), ((2354, 2386), 'torch.cat', 'torch.cat', (['proposals_pts_list[i]'], {}), '(proposals_pts_list[i])\n', (2363, 2386), False, 'import torch\n'), ((6506, 6574), 'mmdet.core.bbox.assign_and_sample', 'assign_and_sample', (['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'None', 'cfg'], {}), '(proposals, gt_bboxes, gt_bboxes_ignore, None, cfg)\n', (6523, 6574), False, 'from mmdet.core.bbox import assign_and_sample, build_assigner, PseudoSampler\n'), ((6625, 6653), 'mmdet.core.bbox.build_assigner', 'build_assigner', (['cfg.assigner'], {}), '(cfg.assigner)\n', (6639, 6653), False, 'from mmdet.core.bbox import assign_and_sample, build_assigner, PseudoSampler\n'), ((6948, 6963), 'mmdet.core.bbox.PseudoSampler', 'PseudoSampler', ([], {}), '()\n', (6961, 6963), False, 'from mmdet.core.bbox import assign_and_sample, build_assigner, PseudoSampler\n'), ((7887, 7914), 'numpy.stack', 'np.stack', (['pts_label_list', '(0)'], {}), '(pts_label_list, 0)\n', (7895, 7914), True, 'import numpy as np\n'), ((12009, 12035), 'numpy.maximum', 'np.maximum', (['(x2 - x1 + 1)', '(1)'], {}), '(x2 - x1 + 1, 1)\n', (12019, 12035), True, 'import numpy as np\n'), ((12049, 12075), 'numpy.maximum', 'np.maximum', (['(y2 - y1 + 1)', '(1)'], {}), '(y2 - y1 + 1, 1)\n', (12059, 12075), True, 'import numpy as np\n'), ((12773, 12824), 'cv2.distanceTransform', 'cv2.distanceTransform', (['distance_map', 'cv2.DIST_L2', '(3)'], {}), '(distance_map, cv2.DIST_L2, 3)\n', (12794, 12824), False, 'import cv2\n'), ((13034, 13062), 'numpy.nonzero', 'np.nonzero', (['(roi_dist_map > 0)'], {}), '(roi_dist_map > 0)\n', (13044, 13062), True, 'import numpy as np\n'), ((13080, 13117), 'numpy.stack', 'np.stack', (['[index_x, index_y]'], {'axis': '(-1)'}), '([index_x, index_y], axis=-1)\n', (13088, 13117), True, 'import numpy as np\n'), ((13201, 13224), 'numpy.zeros', 'np.zeros', (['[2 * num_pts]'], {}), '([2 * num_pts])\n', (13209, 13224), True, 'import numpy as np\n'), ((13332, 13374), 'numpy.random.choice', 'np.random.choice', (['_len', 'mod'], {'replace': '(False)'}), '(_len, mod, replace=False)\n', (13348, 13374), True, 'import numpy as np\n'), ((13491, 13514), 'numpy.concatenate', 'np.concatenate', (['draw', '(0)'], {}), '(draw, 0)\n', (13505, 13514), True, 'import numpy as np\n'), ((13535, 13562), 'numpy.random.permutation', 'np.random.permutation', (['draw'], {}), '(draw)\n', (13556, 13562), True, 'import numpy as np\n'), ((12288, 12307), 'numpy.ones', 'np.ones', (['mask.shape'], {}), '(mask.shape)\n', (12295, 12307), True, 'import numpy as np\n'), ((12686, 12748), 'cv2.line', 'cv2.line', (['distance_map', '(x_0, y_0)', '(x_1, y_1)', '(0)'], {'thickness': '(2)'}), '(distance_map, (x_0, y_0), (x_1, y_1), 0, thickness=2)\n', (12694, 12748), False, 'import cv2\n'), ((12855, 12884), 'numpy.nonzero', 'np.nonzero', (['(roi_dist_map == 0)'], {}), '(roi_dist_map == 0)\n', (12865, 12884), True, 'import numpy as np\n'), ((13590, 13617), 'numpy.random.rand', 'np.random.rand', (['*draw.shape'], {}), '(*draw.shape)\n', (13604, 13617), True, 'import numpy as np\n'), ((12376, 12390), 'numpy.array', 'np.array', (['poly'], {}), '(poly)\n', (12384, 12390), True, 'import numpy as np\n')] |
#from planenet code is adapted for planercnn code
import cv2
import numpy as np
WIDTH = 640
HEIGHT = 480
ALL_TITLES = ['PlaneNet']
ALL_METHODS = [('sample_np10_hybrid3_bl0_dl0_ds0_crfrnn5_sm0', '', 0, 2)]
def predict3D(folder, index, image, depth, segmentation, planes, info):
writePLYFile(folder, index, image, depth, segmentation, planes, info)
#writePLYFile(options.test_dir, image_index + options.startIndex, segmentationImageBlended, pred_dict['depth'][image_index], segmentation, pred_dict['plane'][image_index], pred_dict['info'][image_index])
print("done")
def getCameraFromInfo(info):
camera = {}
camera['fx'] = info[0]
camera['fy'] = info[5]
camera['cx'] = info[2]
camera['cy'] = info[6]
camera['width'] = info[16]
camera['height'] = info[17]
camera['depth_shift'] = info[18]
return camera
def writePLYFile(folder, index, image, depth, segmentation, planes, info):
imageFilename = str(index) + '_model_texture.png'
cv2.imwrite(folder + '/' + imageFilename, image)
#print("target-",folder + '/' + imageFilename, image)
width = image.shape[1]
height = image.shape[0]
numPlanes = planes.shape[0]
camera = getCameraFromInfo(info)
#camera = getNYURGBDCamera()
#camera = getSUNCGCamera()
urange = (np.arange(width, dtype=np.float32) / width * camera['width'] - camera['cx']) / camera['fx']
urange = urange.reshape(1, -1).repeat(height, 0)
vrange = (np.arange(height, dtype=np.float32) / height * camera['height'] - camera['cy']) / camera['fy']
vrange = vrange.reshape(-1, 1).repeat(width, 1)
X = depth * urange
Y = depth
Z = -depth * vrange
XYZ = np.stack([X, Y, Z], axis=2)
#focalLength = 517.97
faces = []
#minDepthDiff = 0.15
#maxDepthDiff = 0.3
#occlusionBoundary = boundaries[:, :, 1]
betweenRegionThreshold = 0.1
nonPlanarRegionThreshold = 0.02
planesD = np.linalg.norm(planes, axis=1, keepdims=True)
planeNormals = -planes / np.maximum(planesD, 1e-4)
croppingRatio = -0.05
dotThreshold = np.cos(np.deg2rad(30))
for y in range(height - 1):
for x in range(width - 1):
if y < height * croppingRatio or y > height * (1 - croppingRatio) or x < width * croppingRatio or x > width * (1 - croppingRatio):
continue
segmentIndex = segmentation[y][x]
if segmentIndex == -1:
continue
point = XYZ[y][x]
#neighborPixels = []
validNeighborPixels = []
for neighborPixel in [(x, y + 1), (x + 1, y), (x + 1, y + 1)]:
neighborSegmentIndex = segmentation[neighborPixel[1]][neighborPixel[0]]
if neighborSegmentIndex == segmentIndex:
if segmentIndex < numPlanes:
validNeighborPixels.append(neighborPixel)
else:
neighborPoint = XYZ[neighborPixel[1]][neighborPixel[0]]
if np.linalg.norm(neighborPoint - point) < nonPlanarRegionThreshold:
validNeighborPixels.append(neighborPixel)
pass
pass
else:
neighborPoint = XYZ[neighborPixel[1]][neighborPixel[0]]
if segmentIndex < numPlanes and neighborSegmentIndex < numPlanes:
print("line1",planeNormals[segmentIndex].shape ,neighborPoint.shape,planesD[segmentIndex].shape )
print((np.dot(planeNormals[segmentIndex],neighborPoint)))
print(neighborPoint + planesD[segmentIndex])
print(betweenRegionThreshold or abs(np.dot(planeNormals[neighborSegmentIndex], point) + planesD[neighborSegmentIndex]))
print(betweenRegionThreshold and np.abs(np.dot(planeNormals[segmentIndex])))
print(planeNormals[neighborSegmentIndex] , dotThreshold)
# if (abs(np.dot(planeNormals[segmentIndex], neighborPoint) + planesD[segmentIndex]) < betweenRegionThreshold or abs(np.dot(planeNormals[neighborSegmentIndex], point) + planesD[neighborSegmentIndex]) < betweenRegionThreshold) and np.abs(np.dot(planeNormals[segmentIndex], planeNormals[neighborSegmentIndex])) < dotThreshold:
# validNeighborPixels.append(neighborPixel)
# pass
if np.linalg.norm(neighborPoint - point) < betweenRegionThreshold:
validNeighborPixels.append(neighborPixel)
pass
pass
else:
#print("reach1")
if np.linalg.norm(neighborPoint - point) < betweenRegionThreshold:
validNeighborPixels.append(neighborPixel)
pass
pass
pass
continue
if len(validNeighborPixels) == 3:
faces.append((x, y, x + 1, y + 1, x + 1, y))
faces.append((x, y, x, y + 1, x + 1, y + 1))
elif len(validNeighborPixels) == 2 and segmentIndex < numPlanes:
faces.append((x, y, validNeighborPixels[0][0], validNeighborPixels[0][1], validNeighborPixels[1][0], validNeighborPixels[1][1]))
pass
continue
continue
with open(folder + '/' + str(index) + '_model.ply', 'w') as f:
header = """ply
format ascii 1.0
comment VCGLIB generated
comment TextureFile """
header += imageFilename
header += """
element vertex """
header += str(width * height)
header += """
property float x
property float y
property float z
element face """
header += str(len(faces))
header += """
property list uchar int vertex_indices
property list uchar float texcoord
end_header
"""
f.write(header)
for y in range(height):
for x in range(width):
segmentIndex = segmentation[y][x]
if segmentIndex == -1:
f.write("0.0 0.0 0.0\n")
continue
point = XYZ[y][x]
X = point[0]
Y = point[1]
Z = point[2]
#Y = depth[y][x]
#X = Y / focalLength * (x - width / 2) / width * 640
#Z = -Y / focalLength * (y - height / 2) / height * 480
f.write(str(X) + ' ' + str(Z) + ' ' + str(-Y) + '\n')
continue
continue
for face in faces:
f.write('3 ')
for c in range(3):
f.write(str(face[c * 2 + 1] * width + face[c * 2]) + ' ')
continue
f.write('6 ')
for c in range(3):
f.write(str(float(face[c * 2]) / width) + ' ' + str(1 - float(face[c * 2 + 1]) / height) + ' ')
continue
f.write('\n')
continue
f.close()
pass
return
def evaluatePlanes(options):
for image_index in range(options.visualizeImages):
if options.applicationType == 'grids':
cv2.imwrite(options.test_dir + '/' + str(image_index + options.startIndex) + '_image.png', pred_dict['image'][image_index])
segmentation = predictions[0]['segmentation'][image_index]
#segmentation = np.argmax(np.concatenate([segmentation, pred_dict['np_mask'][image_index]], axis=2), -1)
segmentationImage = drawSegmentationImage(segmentation, blackIndex=options.numOutputPlanes)
#cv2.imwrite(options.test_dir + '/' + str(image_index + options.startIndex) + '_segmentation_pred_' + str(0) + '.png', segmentationImage)
segmentationImageBlended = (segmentationImage * 0.7 + pred_dict['image'][image_index] * 0.3).astype(np.uint8)
cv2.imwrite(options.test_dir + '/' + str(image_index + options.startIndex) + '_segmentation_pred_blended_' + str(0) + '.png', segmentationImageBlended)
continue
cv2.imwrite(options.test_dir + '/' + str(image_index + options.startIndex) + '_image.png', pred_dict['image'][image_index])
info = pred_dict['info'][image_index]
for method_index, pred_dict in enumerate(predictions):
cv2.imwrite(options.test_dir + '/' + str(image_index + options.startIndex) + '_depth_pred_' + str(method_index) + '.png', drawDepthImage(pred_dict['depth'][image_index]))
if 'pixelwise' in options.methods[method_index][1]:
continue
allSegmentations = pred_dict['segmentation'][image_index]
segmentation = np.argmax(allSegmentations, axis=-1)
#segmentation = np.argmax(np.concatenate([segmentation, pred_dict['np_mask'][image_index]], axis=2), -1)
segmentationImage = drawSegmentationImage(segmentation, blackIndex=options.numOutputPlanes)
cv2.imwrite(options.test_dir + '/' + str(image_index + options.startIndex) + '_segmentation_pred_' + str(method_index) + '.png', segmentationImage)
segmentationImageBlended = (segmentationImage * 0.7 + pred_dict['image'][image_index] * 0.3).astype(np.uint8)
cv2.imwrite(options.test_dir + '/' + str(image_index + options.startIndex) + '_segmentation_pred_blended_' + str(method_index) + '.png', segmentationImageBlended)
segmentationImageBlended = np.minimum(segmentationImage * 0.3 + pred_dict['image'][image_index] * 0.7, 255).astype(np.uint8)
if options.imageIndex >= 0:
for planeIndex in range(options.numOutputPlanes):
cv2.imwrite(options.test_dir + '/mask_' + str(planeIndex) + '.png', drawMaskImage(segmentation == planeIndex))
continue
if options.applicationType == 'logo_video':
copyLogoVideo(options.textureImageFilename, options.test_dir, image_index + options.startIndex, pred_dict['image'][image_index], pred_dict['depth'][image_index], pred_dict['plane'][image_index], segmentation, pred_dict['info'][image_index], textureType='logo')
elif options.applicationType == 'wall_video':
if options.wallIndices == '':
print('please specify wall indices')
exit(1)
pass
wallIndices = [int(value) for value in options.wallIndices.split(',')]
copyLogoVideo(options.textureImageFilename, options.test_dir, image_index + options.startIndex, pred_dict['image'][image_index], pred_dict['depth'][image_index], pred_dict['plane'][image_index], segmentation, pred_dict['info'][image_index], textureType='wall', wallInds=wallIndices)
elif options.applicationType == 'ruler':
if options.startPixel == '' or options.endPixel == '':
print('please specify start pixel and end pixel')
exit(1)
pass
startPixel = tuple([int(value) for value in options.startPixel.split(',')])
endPixel = tuple([int(value) for value in options.endPixel.split(',')])
addRulerComplete(options.textureImageFilename, options.test_dir, image_index + options.startIndex, pred_dict['image'][image_index], pred_dict['depth'][image_index], pred_dict['plane'][image_index], segmentation, pred_dict['info'][image_index], startPixel=startPixel, endPixel=endPixel, fixedEndPoint=True, numFrames=1000)
elif options.applicationType == 'logo_texture':
resultImage = copyLogo(options.textureImageFilename, options.test_dir, image_index + options.startIndex, pred_dict['image'][image_index], pred_dict['depth'][image_index], pred_dict['plane'][image_index], segmentation, pred_dict['info'][image_index])
cv2.imwrite(options.test_dir + '/' + str(image_index + options.startIndex) + '_result.png', resultImage)
elif options.applicationType == 'wall_texture':
if options.wallIndices == '':
print('please specify wall indices')
exit(1)
pass
wallIndices = [int(value) for value in options.wallIndices.split(',')]
resultImage = copyWallTexture(options.textureImageFilename, options.test_dir, image_index + options.startIndex, pred_dict['image'][image_index], pred_dict['depth'][image_index], pred_dict['plane'][image_index], segmentation, pred_dict['info'][image_index], wallPlanes=wallIndices)
cv2.imwrite(options.test_dir + '/' + str(image_index + options.startIndex) + '_result.png', resultImage)
elif options.applicationType == 'TV':
if options.wallIndices == '':
print('please specify wall indices')
exit(1)
pass
wallIndices = [int(value) for value in options.wallIndices.split(',')]
copyLogoVideo(options.textureImageFilename, options.test_dir, image_index + options.startIndex, pred_dict['image'][image_index], pred_dict['depth'][image_index], pred_dict['plane'][image_index], segmentation, pred_dict['info'][image_index], textureType='TV', wallInds=wallIndices)
elif options.applicationType == 'pool':
print('dump')
newPlanes = []
newSegmentation = np.full(segmentation.shape, -1)
newPlaneIndex = 0
planes = pred_dict['plane'][image_index]
for planeIndex in range(options.numOutputPlanes):
mask = segmentation == planeIndex
if mask.sum() > 0:
newPlanes.append(planes[planeIndex])
newSegmentation[mask] = newPlaneIndex
newPlaneIndex += 1
pass
continue
np.save('pool/dump/' + str(image_index + options.startIndex) + '_planes.npy', np.stack(newPlanes, axis=0))
#print(global_gt['non_plane_mask'].shape)
np.save('pool/dump/' + str(image_index + options.startIndex) + '_segmentation.npy', newSegmentation)
cv2.imwrite('pool/dump/' + str(image_index + options.startIndex) + '_image.png', pred_dict['image'][image_index])
depth = pred_dict['depth'][image_index]
np.save('pool/dump/' + str(image_index + options.startIndex) + '_depth.npy', depth)
info = pred_dict['info'][image_index]
#normal = calcNormal(depth, info)
#np.save('test/' + str(image_index + options.startIndex) + '_normal.npy', normal)
np.save('pool/dump/' + str(image_index + options.startIndex) + '_info.npy', info)
exit(1)
else:
print('please specify application type')
np_mask = (segmentation == options.numOutputPlanes).astype(np.float32)
np_depth = pred_dict['np_depth'][image_index].squeeze()
np_depth = cv2.resize(np_depth, (np_mask.shape[1], np_mask.shape[0]))
cv2.imwrite(options.test_dir + '/' + str(image_index + options.startIndex) + '_np_depth_pred_' + str(method_index) + '.png', drawDepthImage(np_depth * np_mask))
# folder, \ - directory - done
# index, \ - idx number of image - done
# image, \ - segmentationImageBlended
# depth, \ - pred_dict['depth'][image_index] - done
# segmentation, \ - segmentation
# planes, \ - pred_dict['plane'][image_index]
# info - pred_dict['info'][image_index] - done
writePLYFile(options.test_dir, image_index + options.startIndex, segmentationImageBlended, pred_dict['depth'][image_index], segmentation, pred_dict['plane'][image_index], pred_dict['info'][image_index])
pass
exit(1)
pass
continue
continue
writeHTML(options)
return
if __name__=='__main__':
info = np.array([1.82e+03, 0.00e+00, 1.63e+03, 0.00e+00,\
0.00e+00, 1.82e+03, 1.22e+03, 0.00e+00, 0.00e+00, 0.00e+00, \
1.00e+00, 0.00e+00, 0.00e+00, 0.00e+00, 0.00e+00, 1.00e+00, 3.26e+03, 2.45e+03,\
1.00e+03,5.00e+00])
info[16] = 460
info[17] = 640
#image = cv2.imread("genrate_3dmodel/single_rgb_sample/12/12_segmentation_0_final.png") #x,x,3
#depth = cv2.imread("genrate_3dmodel/single_rgb_sample/12/12_depth_0_final_ori.png",0) #x,x
#segmentation = cv2.imread("genrate_3dmodel/single_rgb_sample/12/12_segmentation_0_final.png",0) #change it
# print("segmentation shape",segmentation.shape)
# cv2.imshow("seg1",segmentation)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
#planes = np.load("genrate_3dmodel/single_rgb_sample/12/12_plane_masks_0.npy") #change if its not working
image = cv2.imread("test/inference/0_segmentation_0_final.png") #x,x,3
depth = cv2.imread("test/inference/0_depth_0_final_ori.png",0) #x,x
segmentation = cv2.imread("test/inference/0_segmentation_0_final.png",0) #change it
# print("segmentation shape",segmentation.shape)
cv2.imshow("seg2",segmentation)
cv2.waitKey(0)
cv2.destroyAllWindows()
planes = np.load("test/inference/0_plane_masks_0.npy") #change if its not working
folder = "test2"
index = 0
predict3D(folder, index, image, depth, segmentation, planes, info)
#todo
# try to add focal length
# try to do with rgb based one | [
"numpy.stack",
"numpy.full",
"numpy.load",
"numpy.minimum",
"numpy.maximum",
"numpy.deg2rad",
"cv2.waitKey",
"cv2.imwrite",
"cv2.destroyAllWindows",
"numpy.argmax",
"cv2.imread",
"numpy.linalg.norm",
"numpy.array",
"numpy.arange",
"numpy.dot",
"cv2.imshow",
"cv2.resize"
] | [((995, 1043), 'cv2.imwrite', 'cv2.imwrite', (["(folder + '/' + imageFilename)", 'image'], {}), "(folder + '/' + imageFilename, image)\n", (1006, 1043), False, 'import cv2\n'), ((1695, 1722), 'numpy.stack', 'np.stack', (['[X, Y, Z]'], {'axis': '(2)'}), '([X, Y, Z], axis=2)\n', (1703, 1722), True, 'import numpy as np\n'), ((1961, 2006), 'numpy.linalg.norm', 'np.linalg.norm', (['planes'], {'axis': '(1)', 'keepdims': '(True)'}), '(planes, axis=1, keepdims=True)\n', (1975, 2006), True, 'import numpy as np\n'), ((16751, 16886), 'numpy.array', 'np.array', (['[1820.0, 0.0, 1630.0, 0.0, 0.0, 1820.0, 1220.0, 0.0, 0.0, 0.0, 1.0, 0.0, \n 0.0, 0.0, 0.0, 1.0, 3260.0, 2450.0, 1000.0, 5.0]'], {}), '([1820.0, 0.0, 1630.0, 0.0, 0.0, 1820.0, 1220.0, 0.0, 0.0, 0.0, 1.0,\n 0.0, 0.0, 0.0, 0.0, 1.0, 3260.0, 2450.0, 1000.0, 5.0])\n', (16759, 16886), True, 'import numpy as np\n'), ((17592, 17647), 'cv2.imread', 'cv2.imread', (['"""test/inference/0_segmentation_0_final.png"""'], {}), "('test/inference/0_segmentation_0_final.png')\n", (17602, 17647), False, 'import cv2\n'), ((17667, 17722), 'cv2.imread', 'cv2.imread', (['"""test/inference/0_depth_0_final_ori.png"""', '(0)'], {}), "('test/inference/0_depth_0_final_ori.png', 0)\n", (17677, 17722), False, 'import cv2\n'), ((17746, 17804), 'cv2.imread', 'cv2.imread', (['"""test/inference/0_segmentation_0_final.png"""', '(0)'], {}), "('test/inference/0_segmentation_0_final.png', 0)\n", (17756, 17804), False, 'import cv2\n'), ((17872, 17904), 'cv2.imshow', 'cv2.imshow', (['"""seg2"""', 'segmentation'], {}), "('seg2', segmentation)\n", (17882, 17904), False, 'import cv2\n'), ((17908, 17922), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (17919, 17922), False, 'import cv2\n'), ((17927, 17950), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (17948, 17950), False, 'import cv2\n'), ((17965, 18010), 'numpy.load', 'np.load', (['"""test/inference/0_plane_masks_0.npy"""'], {}), "('test/inference/0_plane_masks_0.npy')\n", (17972, 18010), True, 'import numpy as np\n'), ((2036, 2063), 'numpy.maximum', 'np.maximum', (['planesD', '(0.0001)'], {}), '(planesD, 0.0001)\n', (2046, 2063), True, 'import numpy as np\n'), ((2119, 2133), 'numpy.deg2rad', 'np.deg2rad', (['(30)'], {}), '(30)\n', (2129, 2133), True, 'import numpy as np\n'), ((8890, 8926), 'numpy.argmax', 'np.argmax', (['allSegmentations'], {'axis': '(-1)'}), '(allSegmentations, axis=-1)\n', (8899, 8926), True, 'import numpy as np\n'), ((1317, 1351), 'numpy.arange', 'np.arange', (['width'], {'dtype': 'np.float32'}), '(width, dtype=np.float32)\n', (1326, 1351), True, 'import numpy as np\n'), ((1476, 1511), 'numpy.arange', 'np.arange', (['height'], {'dtype': 'np.float32'}), '(height, dtype=np.float32)\n', (1485, 1511), True, 'import numpy as np\n'), ((9645, 9730), 'numpy.minimum', 'np.minimum', (["(segmentationImage * 0.3 + pred_dict['image'][image_index] * 0.7)", '(255)'], {}), "(segmentationImage * 0.3 + pred_dict['image'][image_index] * 0.7, 255\n )\n", (9655, 9730), True, 'import numpy as np\n'), ((3067, 3104), 'numpy.linalg.norm', 'np.linalg.norm', (['(neighborPoint - point)'], {}), '(neighborPoint - point)\n', (3081, 3104), True, 'import numpy as np\n'), ((3602, 3651), 'numpy.dot', 'np.dot', (['planeNormals[segmentIndex]', 'neighborPoint'], {}), '(planeNormals[segmentIndex], neighborPoint)\n', (3608, 3651), True, 'import numpy as np\n'), ((4532, 4569), 'numpy.linalg.norm', 'np.linalg.norm', (['(neighborPoint - point)'], {}), '(neighborPoint - point)\n', (4546, 4569), True, 'import numpy as np\n'), ((4827, 4864), 'numpy.linalg.norm', 'np.linalg.norm', (['(neighborPoint - point)'], {}), '(neighborPoint - point)\n', (4841, 4864), True, 'import numpy as np\n'), ((3930, 3964), 'numpy.dot', 'np.dot', (['planeNormals[segmentIndex]'], {}), '(planeNormals[segmentIndex])\n', (3936, 3964), True, 'import numpy as np\n'), ((3782, 3831), 'numpy.dot', 'np.dot', (['planeNormals[neighborSegmentIndex]', 'point'], {}), '(planeNormals[neighborSegmentIndex], point)\n', (3788, 3831), True, 'import numpy as np\n'), ((13866, 13897), 'numpy.full', 'np.full', (['segmentation.shape', '(-1)'], {}), '(segmentation.shape, -1)\n', (13873, 13897), True, 'import numpy as np\n'), ((15646, 15704), 'cv2.resize', 'cv2.resize', (['np_depth', '(np_mask.shape[1], np_mask.shape[0])'], {}), '(np_depth, (np_mask.shape[1], np_mask.shape[0]))\n', (15656, 15704), False, 'import cv2\n'), ((14511, 14538), 'numpy.stack', 'np.stack', (['newPlanes'], {'axis': '(0)'}), '(newPlanes, axis=0)\n', (14519, 14538), True, 'import numpy as np\n')] |
from tqdm import tqdm
import pandas as pd
import numpy as np
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import random as rnd
from imblearn.over_sampling import SMOTE
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import confusion_matrix, precision_recall_curve, auc, roc_auc_score, roc_curve, recall_score, classification_report
import sor_wc_wk_joint as sor
import time
from sklearn.decomposition import FastICA
import pickle
scaler = StandardScaler()
pca = PCA(0.99)
risk_class_files = ['Drought.csv', 'Earthquakes.csv', 'Explosions.csv', 'Floods.csv', 'Forest_and_Brush_Fire.csv', 'Hazardous_and_Toxic_Substance.csv', 'Landslides.csv', 'Lighting.csv', 'Snowstorms.csv', 'Tornado.csv', 'Tropical Storms.csv', 'Volcanoes.csv', 'Water_Pollution.csv']
risk_class_dict = {}
for i in range(len(risk_class_files)):
risk_class_dict[i+1] = risk_class_files[i]
def remove_label(docs):
for i in range(len(docs)):
docs[i] = docs[i].replace('"1, ','').replace('"0, ','').replace("'0, ",'').replace("'0, ",'')
return docs
risk_classes = {}
for risk_file in risk_class_files:
print(risk_file)
risk_classes[risk_file] = pd.read_csv('../data/NYTimes_data/'+risk_file, header = None)[0].tolist()
non_risk_file = 'non_risk_docs.csv'
non_risk_class = pd.read_csv('../data/NYTimes_data/'+non_risk_file, header = None)[0].tolist()
X = []
Y = []
class_id = 1
for risk_file in risk_class_files:
X += risk_classes[risk_file]
Y += [class_id] * len(risk_classes[risk_file])
class_id += 1
X += non_risk_class
Y += [0] * len(non_risk_class)
X = remove_label(X)
tfidf = TfidfVectorizer(max_features=1000, ngram_range=(1,1), stop_words='english', token_pattern=u'(?ui)\\b\\w*[a-z]+\\w*\\b')
features = tfidf.fit_transform(X).toarray()
labels = Y
def run_setup(features, labels, train_classes, test_classes, test_fold):
features = np.array(features)
labels = np.array(labels)
xtrain = features[np.isin(labels,train_classes),:]
ytrain = labels[np.isin(labels,train_classes)]
xtest = features[np.isin(labels,test_classes),:]
ytest = labels[np.isin(labels,test_classes)]
train_ids = np.arange(len(ytrain))
np.random.shuffle(train_ids)
train_index = train_ids[:int(len(ytrain)*0.8)]
test_index = train_ids[int(len(ytrain)*0.8+1):]
X_train, X_test = xtrain[train_index], xtrain[test_index]
y_train, y_test = ytrain[train_index], ytrain[test_index]
model = sor.HierarchicalClassifierModel(input_size = X_train[0].size, num_classes = len(risk_class_files), learning_rate = 1e-3, num_epochs = 1000, batch_size = 100, model_name = 'wSOR', l1 = 0.1, l2 = 0)
model.fit_wk(X_train, y_train)
model.save('test_data/trained_model_'+str(test_fold) +'_wk.m')
np.savez('test_data/test_' + str(test_fold) + '.npz', train_classes = train_classes, test_classes = test_classes, train_index = train_index, test_index = test_index)
tfidf = TfidfVectorizer(ngram_range=(1,1), stop_words='english', token_pattern=u'(?ui)\\b\\w*[a-z]+\\w*\\b')
features = tfidf.fit_transform(X).toarray()
features = np.array(features)
xtrain = features[np.isin(labels,train_classes),:]
ytrain = labels[np.isin(labels,train_classes)]
X_train, X_test = xtrain[train_index], xtrain[test_index]
y_train, y_test = ytrain[train_index], ytrain[test_index]
scaler.fit(X_train)
X_train_std = scaler.transform(X_train)
pca.fit(X_train_std)
model = sor.HierarchicalClassifierModel(input_size = pca.transform(X_train_std)[0].size, num_classes = len(risk_class_files), learning_rate = 1e-3, num_epochs = 1000, batch_size = 100, model_name = 'wSOR', l1 = 0.1, l2 = 0)
model.fit_wk(pca.transform(X_train_std), y_train)
model.save('test_data/trained_model_'+str(test_fold) + '_pca_wk.m')
transformer = FastICA(n_components=pca.n_components_, random_state=0, max_iter=500)
xtrain_transformed = transformer.fit_transform(X_train_std)
model = sor.HierarchicalClassifierModel(input_size = transformer.transform(X_train_std)[0].size, num_classes = len(risk_class_files), learning_rate = 1e-3, num_epochs = 1000, batch_size = 100, model_name = 'wSOR', l1 = 0.1, l2 = 0)
model.fit_wk(transformer.transform(X_train_std), y_train)
model.save('test_data/trained_model_'+str(test_fold) +'_ica_wk.m')
pickle.dump(transformer, open('test_data/'+str(test_fold) + '_ica.sav', 'wb'), protocol=4)
class_ids = np.arange(14)
for test_fold in range(5):
np.random.shuffle(class_ids[1:])
print(class_ids)
test_classes = class_ids[10:14]
train_classes = np.array([i for i in range(14) if i not in test_classes])
print('Train classes:', train_classes)
print('Test classes:', test_classes)
run_setup(features, labels, train_classes, test_classes, test_fold) | [
"numpy.isin",
"sklearn.decomposition.FastICA",
"sklearn.preprocessing.StandardScaler",
"sklearn.feature_extraction.text.TfidfVectorizer",
"pandas.read_csv",
"numpy.arange",
"numpy.array",
"sklearn.decomposition.PCA",
"numpy.random.shuffle"
] | [((1037, 1053), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1051, 1053), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1061, 1070), 'sklearn.decomposition.PCA', 'PCA', (['(0.99)'], {}), '(0.99)\n', (1064, 1070), False, 'from sklearn.decomposition import PCA\n'), ((2178, 2302), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'max_features': '(1000)', 'ngram_range': '(1, 1)', 'stop_words': '"""english"""', 'token_pattern': 'u"""(?ui)\\\\b\\\\w*[a-z]+\\\\w*\\\\b"""'}), "(max_features=1000, ngram_range=(1, 1), stop_words='english',\n token_pattern=u'(?ui)\\\\b\\\\w*[a-z]+\\\\w*\\\\b')\n", (2193, 2302), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((5003, 5016), 'numpy.arange', 'np.arange', (['(14)'], {}), '(14)\n', (5012, 5016), True, 'import numpy as np\n'), ((2443, 2461), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (2451, 2461), True, 'import numpy as np\n'), ((2475, 2491), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (2483, 2491), True, 'import numpy as np\n'), ((2745, 2773), 'numpy.random.shuffle', 'np.random.shuffle', (['train_ids'], {}), '(train_ids)\n', (2762, 2773), True, 'import numpy as np\n'), ((3501, 3607), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'ngram_range': '(1, 1)', 'stop_words': '"""english"""', 'token_pattern': 'u"""(?ui)\\\\b\\\\w*[a-z]+\\\\w*\\\\b"""'}), "(ngram_range=(1, 1), stop_words='english', token_pattern=\n u'(?ui)\\\\b\\\\w*[a-z]+\\\\w*\\\\b')\n", (3516, 3607), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((3665, 3683), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (3673, 3683), True, 'import numpy as np\n'), ((4385, 4454), 'sklearn.decomposition.FastICA', 'FastICA', ([], {'n_components': 'pca.n_components_', 'random_state': '(0)', 'max_iter': '(500)'}), '(n_components=pca.n_components_, random_state=0, max_iter=500)\n', (4392, 4454), False, 'from sklearn.decomposition import FastICA\n'), ((5046, 5078), 'numpy.random.shuffle', 'np.random.shuffle', (['class_ids[1:]'], {}), '(class_ids[1:])\n', (5063, 5078), True, 'import numpy as np\n'), ((2567, 2597), 'numpy.isin', 'np.isin', (['labels', 'train_classes'], {}), '(labels, train_classes)\n', (2574, 2597), True, 'import numpy as np\n'), ((2671, 2700), 'numpy.isin', 'np.isin', (['labels', 'test_classes'], {}), '(labels, test_classes)\n', (2678, 2700), True, 'import numpy as np\n'), ((3759, 3789), 'numpy.isin', 'np.isin', (['labels', 'train_classes'], {}), '(labels, train_classes)\n', (3766, 3789), True, 'import numpy as np\n'), ((1856, 1921), 'pandas.read_csv', 'pd.read_csv', (["('../data/NYTimes_data/' + non_risk_file)"], {'header': 'None'}), "('../data/NYTimes_data/' + non_risk_file, header=None)\n", (1867, 1921), True, 'import pandas as pd\n'), ((2514, 2544), 'numpy.isin', 'np.isin', (['labels', 'train_classes'], {}), '(labels, train_classes)\n', (2521, 2544), True, 'import numpy as np\n'), ((2620, 2649), 'numpy.isin', 'np.isin', (['labels', 'test_classes'], {}), '(labels, test_classes)\n', (2627, 2649), True, 'import numpy as np\n'), ((3706, 3736), 'numpy.isin', 'np.isin', (['labels', 'train_classes'], {}), '(labels, train_classes)\n', (3713, 3736), True, 'import numpy as np\n'), ((1727, 1788), 'pandas.read_csv', 'pd.read_csv', (["('../data/NYTimes_data/' + risk_file)"], {'header': 'None'}), "('../data/NYTimes_data/' + risk_file, header=None)\n", (1738, 1788), True, 'import pandas as pd\n')] |
from PIL import Image
import numpy as np
def getAreaAverage(arr, startRow, startCol):
average = 0
for row in range(startRow, startRow + cell_size[0]):
for col in range(startCol, startCol + cell_size[1]):
n1 = int(arr[row][col][0])
n2 = int(arr[row][col][1])
n3 = int(arr[row][col][2])
M = n1 + n2 + n3
average += M
average = int(average // (cell_size[0] * cell_size[1]))
return average
def changeAreaColor(arr, startRow, staartCol, average):
for row in range(startRow, startRow + cell_size[0]):
for col in range(staartCol, staartCol + cell_size[1]):
arr[row][col][0] = int(average // step) * step // 3
arr[row][col][1] = int(average // step) * step // 3
arr[row][col][2] = int(average // step) * step // 3
def createGreyPixelArtFromImage():
arr = np.array(img)
a = len(arr)
a1 = len(arr[1])
image_row = 0
while image_row < a - 1:
image_col = 0
while image_col < a1 - 1:
average = getAreaAverage(arr, image_row, image_col)
changeAreaColor(arr, image_row, image_col, average)
image_col = image_col + cell_size[0]
image_row = image_row + cell_size[1]
res = Image.fromarray(arr)
res.save('res.jpg')
img = Image.open("img2.jpg")
grey_gradations = 50
cell_size = [10, 10]
step = 255 // grey_gradations
createGreyPixelArtFromImage()
| [
"PIL.Image.fromarray",
"numpy.array",
"PIL.Image.open"
] | [((1328, 1350), 'PIL.Image.open', 'Image.open', (['"""img2.jpg"""'], {}), "('img2.jpg')\n", (1338, 1350), False, 'from PIL import Image\n'), ((889, 902), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (897, 902), True, 'import numpy as np\n'), ((1276, 1296), 'PIL.Image.fromarray', 'Image.fromarray', (['arr'], {}), '(arr)\n', (1291, 1296), False, 'from PIL import Image\n')] |
import torch
import torch.utils.data as data_utl
from torch.utils.data.dataloader import default_collate
from PIL import Image
import numpy as np
import json
import csv
import h5py
import os
import os.path
import cv2
import pdb
def video_to_tensor(pic):
"""Convert a ``numpy.ndarray`` to tensor.
Converts a numpy.ndarray (T x H x W x C)
to a torch.FloatTensor of shape (C x T x H x W)
Args:
pic (numpy.ndarray): Video to be converted to tensor.
Returns:
Tensor: Converted video.
"""
return torch.from_numpy(pic.transpose([3,0,1,2]))
class A3D(data_utl.Dataset):
'''
A3D dataset for I3D
'''
def __init__(self,
split_file,
split,
root,
mode, transforms=None, horizontal_flip=None, save_dir='', seq_len=16, overlap=0):
self.split_file = split_file
self.transforms = transforms
self.mode = mode
self.root = root
self.save_dir = save_dir
self.seq_len = seq_len
self.overlap = overlap
self.fps = 10
self.num_classes = 10 # 9 known anomay type plus a normal, 0 is normal
self.name_to_id = {'normal': 0,
'start_stop_or_stationary': 1,
'moving_ahead_or_waiting': 2,
'lateral': 3,
'oncoming': 4,
'turning': 5,
'pedestrian': 6,
'obstacle': 7,
'leave_to_right': 8,
'leave_to_left': 9,
'unknown': 10}
self.id_to_name = {v:k for k, v in self.name_to_id.items()}
if split == 'train':
self.data = self.make_train_dataset(split_file, split, root, mode)
print("Number of used video:", len(self.data))
elif split in ['val', 'test']:
self.data = self.make_test_dataset(split_file, split, root, mode)
def make_train_dataset(self, split_file, split, root, mode):
dataset = []
with open(split_file, 'r') as f:
data = json.load(f)
self.valid_videos = []
sample_category_stats = {v:0 for v in self.name_to_id.values()}
for idx, vid in enumerate(data.keys()):
if data[vid]['video_start'] is None or \
data[vid]['video_start'] is None or \
data[vid]['anomaly_start'] is None or \
data[vid]['anomaly_end'] is None:
# NOTE: Sep 5, Some videos may have null video_start, meaning there is a bug and we skip the video for now
continue
if data[vid]['subset'] != split:
continue
if not os.path.exists(os.path.join(root, vid)):
continue
if int(data[vid]['anomaly_class']) == 10:
# skip unknown
continue
num_frames = data[vid]['num_frames']
if num_frames < self.seq_len:
continue
print("Training videos:", vid)
self.valid_videos.append(vid)
# NOTE: this is for the temporal label
# init label
labels = np.zeros([self.num_classes, num_frames], np.float32)
# normal label
labels[0, :data[vid]['anomaly_start']] = 1
# anomaly label
labels[int(data[vid]['anomaly_class']),
data[vid]['anomaly_start']:data[vid]['anomaly_end']] = 1 # binary classification
# normal label
labels[0, data[vid]['anomaly_end']:] = 1
assert int(data[vid]['anomaly_class']) > 0
for t in range(0, num_frames, (self.seq_len - self.overlap)):
if num_frames - t < self.seq_len:
seq_start = num_frames - self.seq_len
seq_end = num_frames
else:
seq_start = t
seq_end = t + self.seq_len
# label = labels[:, seq_start: seq_end]
# NOTE: for original I3D, one clip has only one label
label = np.zeros(self.num_classes, np.float32)
# NOTE: method 1, assign the label of the middle frame
# middle_idx = int(seq_end-seq_start/2)
# if middle_idx >= data[vid]['anomaly_start'] and middle_idx < data[vid]['anomaly_end']:
# label[int(data[vid]['anomaly_class'])] = 1 # abnormal
# sample_category_stats[int(data[vid]['anomaly_class'])] += 1
# else:
# label[0] = 1 # normal
# sample_category_stats[0] += 1
# NOTE: method 2, assign the accident label if over 1/3 of the frames are abnormal
if sum(labels[:, seq_start:seq_end].nonzero()[0] > 0) >= self.seq_len/3:
label[int(data[vid]['anomaly_class'])] = 1 # abnormal
sample_category_stats[int(data[vid]['anomaly_class'])] += 1
else:
label[0] = 1 # normal
sample_category_stats[0] += 1
dataset.append({"vid": vid,
"label": label,
"start": seq_start, # NOTE: 0-index
"end": seq_end,# NOTE: 0-index
# "image_dir":
})
# if mode == 'flow':
# num_frames = num_frames//2
# NOTE: for over fitting on 10 videos
if idx >=9:
break
print("Number of samples of all categories:")
[print('{}:{}'.format(self.id_to_name[k], v)) for k, v in sample_category_stats.items()]
return dataset
def make_test_dataset(self, split_file, split, root, mode):
dataset = []
with open(split_file, 'r') as f:
data = json.load(f)
self.valid_videos = []
for idx, vid in enumerate(data.keys()):
if data[vid]['video_start'] is None or \
data[vid]['video_start'] is None or \
data[vid]['anomaly_start'] is None or \
data[vid]['anomaly_end'] is None:
# NOTE: Sep 5, Some videos may have null video_start, meaning there is a bug and we skip the video for now
continue
# if data[vid]['subset'] != split:
# continue
if not os.path.exists(os.path.join(root, vid)):
continue
if int(data[vid]['anomaly_class']) == 10:
# skip unknown
continue
print("Validating videos:", vid)
num_frames = data[vid]['num_frames']
self.valid_videos.append(vid)
# # init label
# labels = np.zeros([self.num_classes, num_frames], np.float32)
# # normal label
# labels[0, :data[vid]['anomaly_start']] = 1
# # anomaly label
# labels[int(data[vid]['anomaly_class']),
# data[vid]['anomaly_start']:data[vid]['anomaly_end']] = 1
# # normal label
# labels[0, data[vid]['anomaly_end']:] = 1
# NOTE: for original I3D, one clip has only one label
label = np.zeros([self.num_classes], np.float32)
label[int(data[vid]['anomaly_class'])] = 1
dataset.append({"vid": vid,
"label": label,
"start": 0, # NOTE: 0-index
"end": num_frames# NOTE: 0-index
})
# if mode == 'flow':
# num_frames = num_frames//2
if idx >=9:
break
return dataset
def load_rgb_frames(self, image_dir, vid, start, end):
frames = []
for i in range(start, end):
# img = cv2.imread(os.path.join(image_dir, vid, 'images', str(i).zfill(6)+'.jpg'))[:, :, [2, 1, 0]]
img = Image.open(os.path.join(image_dir, vid, 'images', str(i).zfill(6)+'.jpg'))
w,h = img.size
# if w < 226 or h < 226:
# d = 226.-min(w,h)
# sc = 1+d/min(w,h)
# img = cv2.resize(img,dsize=(0,0),fx=sc,fy=sc)
# img = (img/255.)*2 - 1
frames.append(img)
return frames #torch.stack(frames, dim=1)
# def load_flow_frames(image_dir, vid, start, num):
# frames = []
# for i in range(start, start+num):
# imgx = cv2.imread(os.path.join(image_dir, vid, vid+'-'+str(i).zfill(6)+'x.jpg'), cv2.IMREAD_GRAYSCALE)
# imgy = cv2.imread(os.path.join(image_dir, vid, vid+'-'+str(i).zfill(6)+'y.jpg'), cv2.IMREAD_GRAYSCALE)
# w,h = imgx.shape
# if w < 224 or h < 224:
# d = 224.-min(w,h)
# sc = 1+d/min(w,h)
# imgx = cv2.resize(imgx,dsize=(0,0),fx=sc,fy=sc)
# imgy = cv2.resize(imgy,dsize=(0,0),fx=sc,fy=sc)
# imgx = (imgx/255.)*2 - 1
# imgy = (imgy/255.)*2 - 1
# img = np.asarray([imgx, imgy]).transpose([1,2,0])
# frames.append(img)
# return np.asarray(frames, dtype=np.float32)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
data = self.data[index]
vid = data["vid"]
label = data["label"]
start = data["start"]
end = data["end"]
if os.path.exists(os.path.join(self.save_dir, vid+'.npy')):
return 0, 0, vid
if self.mode == 'rgb':
imgs = self.load_rgb_frames(self.root, vid, start, end)
else:
imgs = self.load_flow_frames(self.root, vid, start, end)
imgs, label = self.transforms(imgs, label)
return imgs, label, vid, start, end
def __len__(self):
return len(self.data)
class A3DBinary(data_utl.Dataset):
'''
A3D dataset for I3D binary classification
'''
def __init__(self,
split_file,
split,
root,
mode, transforms=None, horizontal_flip=None, save_dir='', seq_len=16, overlap=0):
self.split_file = split_file
self.transforms = transforms
self.mode = mode
self.root = root
self.save_dir = save_dir
self.seq_len = seq_len
self.overlap = overlap
self.fps = 10
self.num_classes = 2 # binary
if split == 'train':
self.data = self.make_train_dataset(split_file, split, root, mode)
print("Number of used video:", len(self.data))
elif split in ['val', 'test']:
self.data = self.make_test_dataset(split_file, split, root, mode)
def make_train_dataset(self, split_file, split, root, mode):
dataset = []
with open(split_file, 'r') as f:
data = json.load(f)
self.valid_videos = []
sample_category_stats = {'normal':0, 'abnormal': 0}
for idx, vid in enumerate(data.keys()):
if data[vid]['video_start'] is None or \
data[vid]['video_start'] is None or \
data[vid]['anomaly_start'] is None or \
data[vid]['anomaly_end'] is None:
# NOTE: Sep 5, Some videos may have null video_start, meaning there is a bug and we skip the video for now
continue
if data[vid]['subset'] != split:
continue
if not os.path.exists(os.path.join(root, vid)):
continue
num_frames = data[vid]['num_frames']
if num_frames < self.seq_len:
continue
print("Training videos:", vid)
self.valid_videos.append(vid)
# NOTE: this is for the temporal label
# init label
labels = np.zeros([2, num_frames], np.float32)
# normal label
labels[0, :data[vid]['anomaly_start']] = 1
# anomaly label
labels[1, data[vid]['anomaly_start']:data[vid]['anomaly_end']] = 1 # binary classification
# normal label
labels[0, data[vid]['anomaly_end']:] = 1
assert int(data[vid]['anomaly_class']) > 0
for t in range(0, num_frames, (self.seq_len - self.overlap)):
if num_frames - t < self.seq_len:
seq_start = num_frames - self.seq_len
seq_end = num_frames
else:
seq_start = t
seq_end = t + self.seq_len
# label = labels[:, seq_start: seq_end]
# NOTE: for original I3D, one clip has only one label
label = np.zeros(2, np.float32)
# NOTE: method 1, assign the label of the middle frame
# middle_idx = int(seq_end-seq_start/2)
# if middle_idx >= data[vid]['anomaly_start'] and middle_idx < data[vid]['anomaly_end']:
# label[int(data[vid]['anomaly_class'])] = 1 # abnormal
# sample_category_stats[int(data[vid]['anomaly_class'])] += 1
# else:
# label[0] = 1 # normal
# sample_category_stats[0] += 1
# NOTE: method 2, assign the accident label if over 1/3 of the frames are abnormal
if sum(labels[:, seq_start:seq_end].nonzero()[0] > 0) >= self.seq_len/3:
label[1] = 1 # abnormal
sample_category_stats['abnormal'] += 1
else:
label[0] = 1 # normal
sample_category_stats['normal'] += 1
dataset.append({"vid": vid,
"label": label,
"start": seq_start, # NOTE: 0-index
"end": seq_end,# NOTE: 0-index
# "image_dir":
})
# if mode == 'flow':
# num_frames = num_frames//2
# NOTE: for over fitting on 10 videos
if idx >=9:
break
print("Number of samples of all categories:")
print(sample_category_stats)
return dataset
def make_test_dataset(self, split_file, split, root, mode):
dataset = []
with open(split_file, 'r') as f:
data = json.load(f)
self.valid_videos = []
for idx, vid in enumerate(data.keys()):
if data[vid]['video_start'] is None or \
data[vid]['video_start'] is None or \
data[vid]['anomaly_start'] is None or \
data[vid]['anomaly_end'] is None:
# NOTE: Sep 5, Some videos may have null video_start, meaning there is a bug and we skip the video for now
continue
# if data[vid]['subset'] != split:
# continue
if not os.path.exists(os.path.join(root, vid)):
continue
if int(data[vid]['anomaly_class']) == 10:
# skip unknown
continue
print("Validating videos:", vid)
num_frames = data[vid]['num_frames']
self.valid_videos.append(vid)
# NOTE: for original I3D, one clip has only one label
label = np.zeros(2, np.float32)
label[1] = 1
dataset.append({"vid": vid,
"label": label,
"start": 0, # NOTE: 0-index
"end": num_frames# NOTE: 0-index
})
# if mode == 'flow':
# num_frames = num_frames//2
if idx >=9:
break
return dataset
def load_rgb_frames(self, image_dir, vid, start, end):
frames = []
for i in range(start, end):
# img = cv2.imread(os.path.join(image_dir, vid, 'images', str(i).zfill(6)+'.jpg'))[:, :, [2, 1, 0]]
img = Image.open(os.path.join(image_dir, vid, 'images', str(i).zfill(6)+'.jpg'))
frames.append(img)
return frames #torch.stack(frames, dim=1)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
data = self.data[index]
vid = data["vid"]
label = data["label"]
start = data["start"]
end = data["end"]
if os.path.exists(os.path.join(self.save_dir, vid+'.npy')):
return 0, 0, vid
if self.mode == 'rgb':
imgs = self.load_rgb_frames(self.root, vid, start, end)
else:
imgs = self.load_flow_frames(self.root, vid, start, end)
imgs, label = self.transforms(imgs, label)
return imgs, label, vid, start, end
def __len__(self):
return len(self.data) | [
"json.load",
"numpy.zeros",
"os.path.join"
] | [((2219, 2231), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2228, 2231), False, 'import json\n'), ((3331, 3383), 'numpy.zeros', 'np.zeros', (['[self.num_classes, num_frames]', 'np.float32'], {}), '([self.num_classes, num_frames], np.float32)\n', (3339, 3383), True, 'import numpy as np\n'), ((6177, 6189), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6186, 6189), False, 'import json\n'), ((7591, 7631), 'numpy.zeros', 'np.zeros', (['[self.num_classes]', 'np.float32'], {}), '([self.num_classes], np.float32)\n', (7599, 7631), True, 'import numpy as np\n'), ((9992, 10033), 'os.path.join', 'os.path.join', (['self.save_dir', "(vid + '.npy')"], {}), "(self.save_dir, vid + '.npy')\n", (10004, 10033), False, 'import os\n'), ((11415, 11427), 'json.load', 'json.load', (['f'], {}), '(f)\n', (11424, 11427), False, 'import json\n'), ((12406, 12443), 'numpy.zeros', 'np.zeros', (['[2, num_frames]', 'np.float32'], {}), '([2, num_frames], np.float32)\n', (12414, 12443), True, 'import numpy as np\n'), ((15068, 15080), 'json.load', 'json.load', (['f'], {}), '(f)\n', (15077, 15080), False, 'import json\n'), ((16024, 16047), 'numpy.zeros', 'np.zeros', (['(2)', 'np.float32'], {}), '(2, np.float32)\n', (16032, 16047), True, 'import numpy as np\n'), ((17248, 17289), 'os.path.join', 'os.path.join', (['self.save_dir', "(vid + '.npy')"], {}), "(self.save_dir, vid + '.npy')\n", (17260, 17289), False, 'import os\n'), ((4330, 4368), 'numpy.zeros', 'np.zeros', (['self.num_classes', 'np.float32'], {}), '(self.num_classes, np.float32)\n', (4338, 4368), True, 'import numpy as np\n'), ((13340, 13363), 'numpy.zeros', 'np.zeros', (['(2)', 'np.float32'], {}), '(2, np.float32)\n', (13348, 13363), True, 'import numpy as np\n'), ((2859, 2882), 'os.path.join', 'os.path.join', (['root', 'vid'], {}), '(root, vid)\n', (2871, 2882), False, 'import os\n'), ((6735, 6758), 'os.path.join', 'os.path.join', (['root', 'vid'], {}), '(root, vid)\n', (6747, 6758), False, 'import os\n'), ((12043, 12066), 'os.path.join', 'os.path.join', (['root', 'vid'], {}), '(root, vid)\n', (12055, 12066), False, 'import os\n'), ((15626, 15649), 'os.path.join', 'os.path.join', (['root', 'vid'], {}), '(root, vid)\n', (15638, 15649), False, 'import os\n')] |
"""Plot to test line styles"""
import matplotlib.pyplot as plt
import numpy as np
import mpld3
def create_plot():
fig, ax = plt.subplots()
np.random.seed(0)
numPoints = 10
xx = np.arange(numPoints, dtype=float)
xx[6] = np.nan
yy = np.random.normal(size=numPoints)
yy[3] = np.nan
ax.plot(xx, yy, 'ks-', ms=10, mec='w', mew=3)
ax.set_xlabel('x has uniform spacing')
ax.set_ylabel('y includes a nan')
ax.set_title('NaN test', size=14)
return fig
def test_nan():
fig = create_plot()
html = mpld3.fig_to_html(fig)
plt.close(fig)
if __name__ == "__main__":
mpld3.show(create_plot())
| [
"numpy.random.seed",
"mpld3.fig_to_html",
"matplotlib.pyplot.close",
"numpy.arange",
"numpy.random.normal",
"matplotlib.pyplot.subplots"
] | [((130, 144), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (142, 144), True, 'import matplotlib.pyplot as plt\n'), ((150, 167), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (164, 167), True, 'import numpy as np\n'), ((197, 230), 'numpy.arange', 'np.arange', (['numPoints'], {'dtype': 'float'}), '(numPoints, dtype=float)\n', (206, 230), True, 'import numpy as np\n'), ((260, 292), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'numPoints'}), '(size=numPoints)\n', (276, 292), True, 'import numpy as np\n'), ((551, 573), 'mpld3.fig_to_html', 'mpld3.fig_to_html', (['fig'], {}), '(fig)\n', (568, 573), False, 'import mpld3\n'), ((578, 592), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (587, 592), True, 'import matplotlib.pyplot as plt\n')] |
import cv2
import numpy as np
def answer1(x):
if 0<=x<=90:
ans ="a"
if 90<x<=180:
ans ="b"
if 180<x<=270:
ans ="c"
if 270<x<=400:
ans ="d"
return ans
def answer2(x):
if 0<=x<=144:
ans ="a"
if 144<x<=216:
ans ="b"
if 216<x<=288:
ans ="c"
if 288<x<=400:
ans ="d"
return ans
def question(question):
question_array = [""]*40
questionno = 0
h,w = question.shape
crop= question[15:h-15,15:w-15]
# vis=cv2.cvtColor(crop,cv2.COLOR_GRAY2BGR)
# print(question.shape)
# h,w = crop.shape
# plt.imshow(crop)
# crop=cv2.equalizeHist(crop)
th, im_th = cv2.threshold(crop,220,255,cv2.THRESH_BINARY_INV)
# im_th+(np.uint8(im_th))
# im_th1=~im_th
# th, im_th2 = cv2.threshold(crop,20,255,cv2.THRESH_BINARY_INV)
im_th0=im_th
kernel = np.ones((5,5), np.uint8)
im_th0=cv2.morphologyEx(im_th0, cv2.MORPH_OPEN, kernel)
im_th1=im_th
im_th2=im_th1
kernel = np.ones((1,60), np.uint8)
im_th1 = cv2.morphologyEx(im_th1, cv2.MORPH_CLOSE, kernel)
kernel = np.ones((5,20), np.uint8)
im_th = cv2.erode(im_th,kernel,iterations = 1)
## kernel = np.ones((5,5), np.uint8)
# im_th1 = cv2.morphologyEx(im_th1, cv2.MORPH_CLOSE, kernel)
kernel = np.ones((5,100), np.uint8)
# im_th1 = cv2.erode(im_th1,kernel,iterations = 1)
# kernel = np.ones((2,10), np.uint8)
# im_th1 = cv2.erode(im_th1,kernel,iterations = 1)
# kernel = np.ones((10,40), np.uint8)
im_th1 = cv2.morphologyEx(im_th1, cv2.MORPH_OPEN, kernel)
kernel = np.ones((5,w), np.uint8)
im_th1 = cv2.morphologyEx(im_th1, cv2.MORPH_OPEN, kernel)
kernel = np.ones((4,4), np.uint8)
im_th1 = cv2.morphologyEx(im_th1, cv2.MORPH_CLOSE, kernel)
# cv2.imshow('mask.jpg',im_th1)
# cv2.waitKey(0)
kernel = np.ones((40,5), np.uint8)
# im_th2 = cv2.erode(im_th,kernel,iterations = 1)
## kernel = np.ones((2,1), np.uint8)
## im_th = cv2.erode(im_th,kernel,iterations = 1)
## kernel = np.ones((5,5), np.uint8)
im_th2 = cv2.morphologyEx(im_th2, cv2.MORPH_CLOSE, kernel)
# kernel = np.ones((5,5), np.uint8)
## im_th2 = cv2.erode(im_th2,kernel,iterations = 1)
kernel = np.ones((10,10), np.uint8)
im_th2 = cv2.erode(im_th2,kernel,iterations = 1)
kernel = np.ones((h,5), np.uint8)
im_th2 = cv2.morphologyEx(im_th2, cv2.MORPH_CLOSE, kernel)
kernel = np.ones((h,10), np.uint8)
im_th2 = cv2.morphologyEx(im_th2, cv2.MORPH_OPEN, kernel)
#
mycol=255*np.ones([crop.shape[0],crop.shape[1]])
#
(_,cnts22, _) = cv2.findContours(im_th2, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
# print(len(cnts22))
# for z in range(0,crop.shape[1]):
# st=sum(im_th2[:,z])
#
# if st<=4000:
#
# mycol[:,z]=np.zeros([crop.shape[0]])
#
for z in range(0,crop.shape[0]):
st=sum(im_th1[z,:])
if st<=4000:
mycol[z,:]=np.zeros([crop.shape[1]])
# cv2.imshow('masdsdk.jpg',mycol)
# cv2.waitKey(2)
(_,cnts, _) = cv2.findContours(np.uint8(mycol), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for x in reversed(cnts):
# print(x)
col=crop[x[0][0][1]:x[2][0][1],x[0][0][0]:x[2][0][0]]
# print(col.shape)
ret,thresh2 = cv2.threshold(col,20,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
kernel = np.ones((18,18), np.uint8)
im_th4 = cv2.morphologyEx(thresh2, cv2.MORPH_OPEN, kernel)
kernel = np.ones((5,5), np.uint8)
im_th4 = cv2.morphologyEx(im_th4, cv2.MORPH_CLOSE, kernel)
# kernel = np.ones((5,5), np.uint8)
# im_th4 = cv2.morphologyEx(thresh2, cv2.MORPH_CLOSE, kernel)
# kernel = np.ones((20,20), np.uint8)
# im_th4 = cv2.morphologyEx(im_th4, cv2.MORPH_OPEN, kernel)
# cv2.imshow('mask.jpg',im_th4)
# cv2.waitKey(0)
#
(_,cnts, _) = cv2.findContours(im_th4, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
# print(len(cnts))
if len(cnts) <= 0 :
question_array[questionno] = ""
# if cv2.contourArea(cnts[0])>950 or cv2.contourArea(cnts[0]) <600:
#
elif len(cnts) == 1:
M = cv2.moments(im_th4)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
# print((cX))
if len(cnts22)==4:
question_array[questionno] = answer1(cX)
else:
question_array[questionno] = answer2(cX)
#
elif len(cnts) == 2:
M1 = cv2.moments(cnts[1])
cX1 = int(M1["m10"] / M1["m00"])
cY1 = int(M1["m01"] / M1["m00"])
# print(cX1)
M2 = cv2.moments(cnts[0])
cX2 = int(M2["m10"] / M2["m00"])
cY2 = int(M2["m01"] / M2["m00"])
if len(cnts22)==4:
question_array[questionno] = answer1(cX1)+','+answer1(cX2)
else:
question_array[questionno] = answer2(cX1)+','+answer2(cX2)
elif len(cnts) == 3:
M1 = cv2.moments(cnts[2])
cX1 = int(M1["m10"] / M1["m00"])
cY1 = int(M1["m01"] / M1["m00"])
M2 = cv2.moments(cnts[1])
cX2 = int(M2["m10"] / M2["m00"])
cY2 = int(M2["m01"] / M2["m00"])
M3 = cv2.moments(cnts[0])
cX3 = int(M3["m10"] / M3["m00"])
cY3 = int(M3["m01"] / M3["m00"])
if len(cnts22)==4:
question_array[questionno] = answer1(cX1)+','+answer1(cX2)+','+answer1(cX3)
else:
question_array[questionno] = answer2(cX1)+','+answer2(cX2)+','+answer2(cX3)
else:
question_array[questionno]="a,b,c,d"
# print(questionno+1,question_array[questionno])
questionno=questionno+1
return question_array
# thresh2 = cv2.GaussianBlur(col,(5,5),cv2.BORDER_DEFAULT)
###
# binary = cv2.morphologyEx(thresh2, cv2.MORPH_CLOSE, kernel)
# binary1 = cv2.morphologyEx(binary, cv2.MORPH_OPEN, kernel)
# (_,cnts, _) = cv2.findContours(binary, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
# orginal=cv2.imread("Aligned/Question.png",0)
#cv2.imshow("Foreground", orginal)
#cv2.waitKey(0)
# th, im_th = cv2.threshold(orginal,127,255,0)
#
# im_th=~im_th
# kernel = np.ones((4,4), np.uint8)
# binary = cv2.erode(im_th, kernel, iterations=2)
#
# h,w = binary.shape
#
# question = 0
# question_array = [""]*201
#
# for y in range(0, w,np.uint(np.floor(w/5))):
#
# if (y +int(w/5-15) > w):
# break
#
# if y== 0:
# column = binary[20:h-20, y+75: y +int(w/5-15)]
# visc= orginal[20:h-20, y+75: y +int(w/5-15)]
#
# else:
# column = binary[20:h-20, y+90: y +int(w/5-15)]
# visc= orginal[20:h-20, y+90: y +int(w/5-15)]
#
# # cv2.imshow("Foreground", visc)
# # cv2.waitKey(0)
# ##column = orginal[15:1696, 0:420]
#
# for x in range(0, column.shape[0],np.uint(np.floor(h/40))):
#
# if (x+40 > h):
# break
#
# question+=1
# row = column[x:x+40,:]
# visr=visc[x:x+40,:]
# # cv2.imshow("Foreground", visr)
# # cv2.waitKey(0)
#
# (_,cnts, _) = cv2.findContours(row, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
#
#
# if len(cnts) <= 0 :
#
# question_array[question] = ""
#
# # if cv2.contourArea(cnts[0])>950 or cv2.contourArea(cnts[0]) <600:
# #
# elif len(cnts) == 1:
#
# M = cv2.moments(row)
# cX = int(M["m10"] / M["m00"])
# cY = int(M["m01"] / M["m00"])
#
# question_array[question] = answer(cX)
#
# elif len(cnts) == 2:
#
# M1 = cv2.moments(cnts[1])
# cX1 = int(M1["m10"] / M1["m00"])
# cY1 = int(M1["m01"] / M1["m00"])
#
#
# M2 = cv2.moments(cnts[0])
# cX2 = int(M2["m10"] / M2["m00"])
# cY2 = int(M2["m01"] / M2["m00"])
#
# question_array[question] = answer(cX1)+answer(cX2)
#
# elif len(cnts) == 3:
#
# M1 = cv2.moments(cnts[2])
# cX1 = int(M1["m10"] / M1["m00"])
# cY1 = int(M1["m01"] / M1["m00"])
#
#
# M2 = cv2.moments(cnts[1])
# cX2 = int(M2["m10"] / M2["m00"])
# cY2 = int(M2["m01"] / M2["m00"])
#
# M3 = cv2.moments(cnts[0])
# cX3 = int(M3["m10"] / M3["m00"])
# cY3 = int(M3["m01"] / M3["m00"])
#
# question_array[question] = answer(cX1)+answer(cX2)+answer(cX3)
# else:
# question_array[question]="a,b,c,d"
# # print(question,question_array[question])
#
# return question_array
#
#
# | [
"numpy.uint8",
"cv2.morphologyEx",
"cv2.threshold",
"cv2.moments",
"numpy.zeros",
"numpy.ones",
"cv2.erode",
"cv2.findContours"
] | [((727, 779), 'cv2.threshold', 'cv2.threshold', (['crop', '(220)', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(crop, 220, 255, cv2.THRESH_BINARY_INV)\n', (740, 779), False, 'import cv2\n'), ((932, 957), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (939, 957), True, 'import numpy as np\n'), ((969, 1017), 'cv2.morphologyEx', 'cv2.morphologyEx', (['im_th0', 'cv2.MORPH_OPEN', 'kernel'], {}), '(im_th0, cv2.MORPH_OPEN, kernel)\n', (985, 1017), False, 'import cv2\n'), ((1071, 1097), 'numpy.ones', 'np.ones', (['(1, 60)', 'np.uint8'], {}), '((1, 60), np.uint8)\n', (1078, 1097), True, 'import numpy as np\n'), ((1111, 1160), 'cv2.morphologyEx', 'cv2.morphologyEx', (['im_th1', 'cv2.MORPH_CLOSE', 'kernel'], {}), '(im_th1, cv2.MORPH_CLOSE, kernel)\n', (1127, 1160), False, 'import cv2\n'), ((1174, 1200), 'numpy.ones', 'np.ones', (['(5, 20)', 'np.uint8'], {}), '((5, 20), np.uint8)\n', (1181, 1200), True, 'import numpy as np\n'), ((1213, 1251), 'cv2.erode', 'cv2.erode', (['im_th', 'kernel'], {'iterations': '(1)'}), '(im_th, kernel, iterations=1)\n', (1222, 1251), False, 'import cv2\n'), ((1370, 1397), 'numpy.ones', 'np.ones', (['(5, 100)', 'np.uint8'], {}), '((5, 100), np.uint8)\n', (1377, 1397), True, 'import numpy as np\n'), ((1602, 1650), 'cv2.morphologyEx', 'cv2.morphologyEx', (['im_th1', 'cv2.MORPH_OPEN', 'kernel'], {}), '(im_th1, cv2.MORPH_OPEN, kernel)\n', (1618, 1650), False, 'import cv2\n'), ((1664, 1689), 'numpy.ones', 'np.ones', (['(5, w)', 'np.uint8'], {}), '((5, w), np.uint8)\n', (1671, 1689), True, 'import numpy as np\n'), ((1703, 1751), 'cv2.morphologyEx', 'cv2.morphologyEx', (['im_th1', 'cv2.MORPH_OPEN', 'kernel'], {}), '(im_th1, cv2.MORPH_OPEN, kernel)\n', (1719, 1751), False, 'import cv2\n'), ((1765, 1790), 'numpy.ones', 'np.ones', (['(4, 4)', 'np.uint8'], {}), '((4, 4), np.uint8)\n', (1772, 1790), True, 'import numpy as np\n'), ((1804, 1853), 'cv2.morphologyEx', 'cv2.morphologyEx', (['im_th1', 'cv2.MORPH_CLOSE', 'kernel'], {}), '(im_th1, cv2.MORPH_CLOSE, kernel)\n', (1820, 1853), False, 'import cv2\n'), ((1942, 1968), 'numpy.ones', 'np.ones', (['(40, 5)', 'np.uint8'], {}), '((40, 5), np.uint8)\n', (1949, 1968), True, 'import numpy as np\n'), ((2170, 2219), 'cv2.morphologyEx', 'cv2.morphologyEx', (['im_th2', 'cv2.MORPH_CLOSE', 'kernel'], {}), '(im_th2, cv2.MORPH_CLOSE, kernel)\n', (2186, 2219), False, 'import cv2\n'), ((2328, 2355), 'numpy.ones', 'np.ones', (['(10, 10)', 'np.uint8'], {}), '((10, 10), np.uint8)\n', (2335, 2355), True, 'import numpy as np\n'), ((2369, 2408), 'cv2.erode', 'cv2.erode', (['im_th2', 'kernel'], {'iterations': '(1)'}), '(im_th2, kernel, iterations=1)\n', (2378, 2408), False, 'import cv2\n'), ((2422, 2447), 'numpy.ones', 'np.ones', (['(h, 5)', 'np.uint8'], {}), '((h, 5), np.uint8)\n', (2429, 2447), True, 'import numpy as np\n'), ((2461, 2510), 'cv2.morphologyEx', 'cv2.morphologyEx', (['im_th2', 'cv2.MORPH_CLOSE', 'kernel'], {}), '(im_th2, cv2.MORPH_CLOSE, kernel)\n', (2477, 2510), False, 'import cv2\n'), ((2524, 2550), 'numpy.ones', 'np.ones', (['(h, 10)', 'np.uint8'], {}), '((h, 10), np.uint8)\n', (2531, 2550), True, 'import numpy as np\n'), ((2564, 2612), 'cv2.morphologyEx', 'cv2.morphologyEx', (['im_th2', 'cv2.MORPH_OPEN', 'kernel'], {}), '(im_th2, cv2.MORPH_OPEN, kernel)\n', (2580, 2612), False, 'import cv2\n'), ((2705, 2773), 'cv2.findContours', 'cv2.findContours', (['im_th2', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(im_th2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (2721, 2773), False, 'import cv2\n'), ((2639, 2678), 'numpy.ones', 'np.ones', (['[crop.shape[0], crop.shape[1]]'], {}), '([crop.shape[0], crop.shape[1]])\n', (2646, 2678), True, 'import numpy as np\n'), ((3286, 3301), 'numpy.uint8', 'np.uint8', (['mycol'], {}), '(mycol)\n', (3294, 3301), True, 'import numpy as np\n'), ((3507, 3575), 'cv2.threshold', 'cv2.threshold', (['col', '(20)', '(255)', '(cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)'], {}), '(col, 20, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n', (3520, 3575), False, 'import cv2\n'), ((3588, 3615), 'numpy.ones', 'np.ones', (['(18, 18)', 'np.uint8'], {}), '((18, 18), np.uint8)\n', (3595, 3615), True, 'import numpy as np\n'), ((3642, 3691), 'cv2.morphologyEx', 'cv2.morphologyEx', (['thresh2', 'cv2.MORPH_OPEN', 'kernel'], {}), '(thresh2, cv2.MORPH_OPEN, kernel)\n', (3658, 3691), False, 'import cv2\n'), ((3709, 3734), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (3716, 3734), True, 'import numpy as np\n'), ((3752, 3801), 'cv2.morphologyEx', 'cv2.morphologyEx', (['im_th4', 'cv2.MORPH_CLOSE', 'kernel'], {}), '(im_th4, cv2.MORPH_CLOSE, kernel)\n', (3768, 3801), False, 'import cv2\n'), ((4123, 4191), 'cv2.findContours', 'cv2.findContours', (['im_th4', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(im_th4, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (4139, 4191), False, 'import cv2\n'), ((3143, 3168), 'numpy.zeros', 'np.zeros', (['[crop.shape[1]]'], {}), '([crop.shape[1]])\n', (3151, 3168), True, 'import numpy as np\n'), ((4509, 4528), 'cv2.moments', 'cv2.moments', (['im_th4'], {}), '(im_th4)\n', (4520, 4528), False, 'import cv2\n'), ((4894, 4914), 'cv2.moments', 'cv2.moments', (['cnts[1]'], {}), '(cnts[1])\n', (4905, 4914), False, 'import cv2\n'), ((5059, 5079), 'cv2.moments', 'cv2.moments', (['cnts[0]'], {}), '(cnts[0])\n', (5070, 5079), False, 'import cv2\n'), ((5472, 5492), 'cv2.moments', 'cv2.moments', (['cnts[2]'], {}), '(cnts[2])\n', (5483, 5492), False, 'import cv2\n'), ((5622, 5642), 'cv2.moments', 'cv2.moments', (['cnts[1]'], {}), '(cnts[1])\n', (5633, 5642), False, 'import cv2\n'), ((5763, 5783), 'cv2.moments', 'cv2.moments', (['cnts[0]'], {}), '(cnts[0])\n', (5774, 5783), False, 'import cv2\n')] |
import numpy as np
# import comet_ml in the top of your file
from comet_ml import Experiment
# Add the following code anywhere in your machine learning file
experiment = Experiment(api_key="<KEY>")
from keras.models import Model
from keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization, Conv2D, MaxPooling1D, Input
from keras.layers import Conv1D, MaxPooling1D
from keras.utils import np_utils
from keras.datasets import mnist
from sklearn.model_selection import train_test_split
def main():
X, y = np.load('pysam-dataset-n3-X.npy'), np.load('pysam-dataset-n3-y.npy')
new_X = list()
for xi in X:
new_xi = np.dstack((xi[:, 0].reshape(7, 1), xi[:, 1].reshape(7, 1), xi[:, 2].reshape(7, 1), xi[:, 3].reshape(7, 1)))
new_X.append(new_xi)
new_X = np.array(new_X)
X = new_X
X_train, X_validate, y_train, y_validate = train_test_split(X, y, test_size=0.10)
input_layer = Input(shape=(7, 1, 4))
conv_1 = Conv2D(filters=5, kernel_size=3, padding='same', activation='relu')(input_layer)
# pool_1 = MaxPooling1D(pool_size=2)(conv_1)
flatten = Flatten()(conv_1)
predictions = Dense(4, activation='softmax')(flatten)
# model.add(Conv1D(filters=5, kernel_size=3, padding='same', activation='relu', input_shape=(7, 1)))
# model.add(MaxPooling1D())
# # model.add(Dense(40, activation='relu'))
# # model.add(BatchNormalization())
# # model.add(Dense(40, activation='relu'))
# # model.add(Dropout(0.25))
# model.add(Dense(4, activation='softmax'))
model = Model(input_layer, predictions)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
batch_size = 10000
epochs = 200
model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_validate, y_validate))
if __name__ == '__main__':
main()
| [
"numpy.load",
"sklearn.model_selection.train_test_split",
"keras.layers.Flatten",
"keras.models.Model",
"comet_ml.Experiment",
"keras.layers.Dense",
"numpy.array",
"keras.layers.Conv2D",
"keras.layers.Input"
] | [((172, 199), 'comet_ml.Experiment', 'Experiment', ([], {'api_key': '"""<KEY>"""'}), "(api_key='<KEY>')\n", (182, 199), False, 'from comet_ml import Experiment\n'), ((806, 821), 'numpy.array', 'np.array', (['new_X'], {}), '(new_X)\n', (814, 821), True, 'import numpy as np\n'), ((884, 921), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.1)'}), '(X, y, test_size=0.1)\n', (900, 921), False, 'from sklearn.model_selection import train_test_split\n'), ((942, 964), 'keras.layers.Input', 'Input', ([], {'shape': '(7, 1, 4)'}), '(shape=(7, 1, 4))\n', (947, 964), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization, Conv2D, MaxPooling1D, Input\n'), ((1568, 1599), 'keras.models.Model', 'Model', (['input_layer', 'predictions'], {}), '(input_layer, predictions)\n', (1573, 1599), False, 'from keras.models import Model\n'), ((533, 566), 'numpy.load', 'np.load', (['"""pysam-dataset-n3-X.npy"""'], {}), "('pysam-dataset-n3-X.npy')\n", (540, 566), True, 'import numpy as np\n'), ((568, 601), 'numpy.load', 'np.load', (['"""pysam-dataset-n3-y.npy"""'], {}), "('pysam-dataset-n3-y.npy')\n", (575, 601), True, 'import numpy as np\n'), ((978, 1045), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(5)', 'kernel_size': '(3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=5, kernel_size=3, padding='same', activation='relu')\n", (984, 1045), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization, Conv2D, MaxPooling1D, Input\n'), ((1123, 1132), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1130, 1132), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization, Conv2D, MaxPooling1D, Input\n'), ((1159, 1189), 'keras.layers.Dense', 'Dense', (['(4)'], {'activation': '"""softmax"""'}), "(4, activation='softmax')\n", (1164, 1189), False, 'from keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization, Conv2D, MaxPooling1D, Input\n')] |
import numpy as np
import pandas as pd
import os
import faiss
from .fast_similarity_matching import FSM
from .utils import m_estimate, dim_estimate, apply_dim_reduct, apply_dim_reduct_inference, preprocess, evaluate_clusters
from sklearn.metrics.pairwise import euclidean_distances, cosine_similarity
from sklearn.metrics import silhouette_score, adjusted_rand_score, adjusted_mutual_info_score, normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import warnings
warnings.filterwarnings('ignore')
import OCAT.example as example
from sklearn import svm
##############################################
# In: data_list [(a,n)...(z,n)] -- list of datasets
# m -- num of anchors
# Out:
# anchor list [(m,n)...(m,n)] -- list of anchors
# Description: The function generates the corresponding anchors for the datasets
##############################################
def find_anchors(data_list, m):
anchor_list = []
for i, X in enumerate(data_list):
X = X.astype(np.float32)
d = X.shape[1]
kmeans = faiss.Kmeans(d, m[i], niter=20, verbose=False)
kmeans.train(X)
anchors = kmeans.centroids
anchor_list.append(anchors)
return anchor_list
##############################################
# In: X (c,n)
# Out:
# S (c,n)
# Description: The function sorts data in descending order and
# picks the first kk th that sums up to 1 and scales them
# to be of range 0 to 1
##############################################
def SimplexPr(X):
C, N = X.shape
#to sort in descending order
T = np.sort(X,axis=0) [::-1]
S = np.array(X).astype(np.float)
for i in range(N):
kk = -1
t = T[:,i]
#find kk where first kk element >= 1 otherwise kk = last index
for j in range(C):
tep = t[j] - (np.sum(t[0:j+1]) - 1)/(j+1)
if tep <= 0:
kk = j-1
break
if kk == -1:
kk = C-1
#scale X to be at 1
theta = (np.sum(t[0:kk+1]) - 1)/(kk+1)
#theta = np.expand_dims(theta, axis=1)
S[:,i] = (X[:,i] - theta).clip(min=0).flatten()
#X = np.subtract(X, theta.clip(min=0))
return S
####################################################################
# In: X (d,1) -- data from one cell
# U (d,s) -- Anchor data, s is the # of the closest anchors
# cn () -- # of iterations, 5-20
# Out:
# z (s,1) --
# Description: This function updates z cn times to find the best z
# such that x <- U * z
#
###################################################################
def LAE (x,U,cn):
d, s = U.shape
#print("d, s: ", d, s)
z0 = np.ones((s,1))/s
z1 = z0
delta = np.zeros((1,cn+2))
delta[0][0] = 0
delta[0][1] = 1
beta = np.zeros((1,cn+1))
beta[0][0] = 1
for t in range(cn):
alpha = (delta[0][t]-1)/delta[0][t+1]
v = z1 + alpha*(z1-z0)
dif = x - np.matmul(U,v)
gv = np.matmul(dif.transpose(),dif/2)
dgv = np.matmul(U.transpose(),np.matmul(U,v)-x)
for j in range(d+1):
b = 2**j*beta[0][t]
z = SimplexPr(v-dgv/b)
dif = x - np.matmul(U,z)
gz = np.matmul(dif.transpose(),dif/2)
dif = z - v
gvz = gv + np.matmul(dgv.transpose(),dif) + b * np.matmul(dif.transpose(),dif/2)
if gz <= gvz:
beta[0][t+1] = b
z0 = z1
z1 = z
break
if beta[0][t+1] == 0:
beta[0][t+1] = b
z0 = z1
z1 = z
delta[0][t+2] = ( 1+np.sqrt(1+4*delta[0][t+1]**2) )/2
if np.sum(abs(z1-z0)) <= 1e-4:
break
z = z1
return z, np.sum(abs(z1-z0))
####################################################################
# In: TrainData (d,n) -- input data matrix, d dimension, n # of samples
# Anchor (d,m) -- Anchor data, m # of anchors
# s () -- # of closest anchors
# flag () -- 0 gives a Gaussian kernel-defined Z and 1 gives a LAE-optimized Z
# cn () -- # of iterations for LAE, usually set to 5-20
# Out:
# Z (n,m) -- anchor-to-data regression weight matrix
# rL (m,m) -- reduced graph Laplacian matrix
#
###################################################################
def AnchorGraph (TrainData, Anchor, s, flag, cn = 5):
d,m = Anchor.shape
_, n = TrainData.shape
Similarity = cosine_similarity(TrainData.transpose(), Anchor.transpose())
val = np.sort(Similarity, axis=1)[:, -s:]
pos = np.argsort(Similarity, axis=1)[:, -s:]
#flatten matrix and reshape back to nxm
ind = ((pos)*n + np.repeat(np.array([range(n)]).transpose(),s,1)).astype(int)
# Gaussian kernel-defined Z
if flag == 0:
sigma = np.mean(np.sqrt(val[:,s-1]))
val = np.exp(-val/(sigma*sigma))
val = np.repeat(np.array([1/np.sum(val,1)]).transpose(),s,1)*val
#LAE-optimized Z
if flag == 1:
val = val/np.expand_dims(np.sum(val,1), axis=1)
else:
Anchor = np.matmul(Anchor,np.diag(1/np.sqrt(np.sum(Anchor*Anchor,axis=0))))
for i in range(n):
x = TrainData[:,i]
x = (x/np.linalg.norm(x,2)).reshape((len(x),1))
U = Anchor[:,pos[i,:]]
a = example.LAE(x,U,cn)
val[i,:] = a[0].flatten()
#expand s # closest anchors to m
Z = np.zeros((n* m))
Z[ind] = [val]
Z = Z.reshape((m,n)).transpose().astype(np.float32)
#Z = np.matmul(Z, np.diag(np.sqrt(np.sum(Z,0)**-1)))
return Z
####################################################################
# In: Z (n,m) -- normalized anchor-to-data regression weight matrix
# zW (n,m) -- unnormalized anchor-to-data regression weight matrix
# Out: ZW (n,m) -- approximation for ZW, where W=(n,n) similarity matrix
###################################################################
def Z_to_ZW(Z):
W_anchor = np.matmul(Z.T, Z)
W_diag = np.diag(np.sqrt(np.sum(W_anchor,0)**-1))
W_anchor = np.linalg.multi_dot([W_diag, W_anchor, W_diag])
ZW = np.matmul(Z, W_anchor)
return ZW, W_anchor
####################################################################
# In: Z (n,m) -- input regression weight matrix
# Out:
# Z (n,m) -- matrix norm normalized regression weight matrix
###################################################################
def norm(Z):
Z_norm = np.linalg.norm(Z, axis=1)
Z_norm = np.expand_dims(Z_norm, axis=1)
Z = np.divide(Z, Z_norm)
Z = np.nan_to_num(Z)
return Z
####################################################################
# In: data_list [(a,dim)...(z,dim)] -- list of datasets (dim PCs)
# m_list -- num of anchors
# s_list -- num of anchors to be selected
# p -- percentage of NNs to consider
# cn -- rounds of optimization
# if_inference -- flag for cell inference
# Out: ZW (a+...+z, m) -- OCAT feature matrix
###################################################################
def sparse_encoding_integration(data_list, m_list, s_list=None, p=0.3, cn=5, if_inference=True):
# find anchors
anchor_list = find_anchors(data_list, m_list)
# construct sparse anchor graph
Z_list = []
for i, dataset in enumerate(data_list):
dataset_Z_list = []
for j, anchor in enumerate(anchor_list):
Z = AnchorGraph(dataset.transpose(), anchor.transpose(), s_list[j], 2, cn)
dataset_Z_list.append(Z)
dataset_Z = np.concatenate(dataset_Z_list, axis=1)
Z_list.append(dataset_Z)
Z = np.nan_to_num(np.concatenate(Z_list, axis=0))
ZW, W_anchor = Z_to_ZW(Z)
ZW = norm(np.nan_to_num(ZW))
if if_inference:
return ZW, anchor_list, s_list, W_anchor
else:
return ZW
####################################################################
# In: data_list [(a,dim)...(z,dim)] -- list of datasets (dim PCs)
# m_list -- num of anchors
# s_list -- num of anchors to be selected
# dim -- num of dimensions after dim reduct
# p -- percentage of NNs to consider
# log_norm -- if apply log norm
# l2_norm -- if apply l2 norm
# if_inference -- if prepare for cell inference
# random_seed -- random seed
# cn -- rounds of optimization
# Out: ZW (a+...+z, m) -- OCAT feature matrix
# db_list -- anchor_list, s_list, W_anchor, Wm
# from reference dataset for cell inference
###################################################################
def run_OCAT(data_list, m_list=None, s_list=None, dim=None, p=0.3, log_norm=True, l2_norm=True, if_inference=False, random_seed=42):
if m_list == None:
m_list = m_estimate(data_list)
if s_list ==None:
s_list = [round(p*m) for m in m_list]
if dim == None:
dim = dim_estimate(data_list)
data_list = preprocess(data_list, log_norm=log_norm, l2_norm=l2_norm)
if if_inference:
data_list, Wm = apply_dim_reduct(data_list, dim=dim, mode='FSM', random_seed=random_seed)
ZW, anchor_list, s_list, W_anchor = sparse_encoding_integration(data_list, m_list=m_list, s_list=s_list, p=p, cn=5, if_inference=True)
db_list = [anchor_list, s_list, W_anchor, Wm]
return ZW, db_list
else:
data_list, _ = apply_dim_reduct(data_list, dim=dim, mode='FSM', random_seed=random_seed)
ZW = sparse_encoding_integration(data_list, m_list=m_list, s_list=s_list, p=p, cn=5, if_inference=False)
return ZW
####################################################################
# In: data_list [(a,dim)...(z,dim)] -- list of inference datasets (dim PCs)
# ZW_db -- OCAT features of the reference dataset
# labels_db -- cell type annotations from reference dataset
# db_list -- reference db info returned from run_OCAT
# log_norm -- if apply log norm
# l2_norm -- if apply l2 norm
# cn -- rounds of optimization
# Out: ZW (a+...+z, m) -- OCAT features of the inference dataset
# labels -- inferred cell type labels from inference dataset
###################################################################
def run_cell_inference(data_list, ZW_db, labels_db, db_list, log_norm=True, l2_norm=True, cn=5):
[anchor_list, s_list, W_anchor, Wm] = db_list
data_list = preprocess(data_list, log_norm=log_norm, l2_norm=l2_norm)
data_list = apply_dim_reduct_inference(data_list, Wm)
Z_list = []
for i, dataset in enumerate(data_list):
dataset_Z_list = []
for j, anchor in enumerate(anchor_list):
Z = AnchorGraph(dataset.transpose(), anchor.transpose(), s_list[j], 2, cn)
dataset_Z_list.append(Z)
dataset_Z = np.concatenate(dataset_Z_list, axis=1)
Z_list.append(dataset_Z)
Z = np.nan_to_num(np.concatenate(Z_list, axis=0))
ZW = np.matmul(Z, W_anchor)
ZW = norm(np.nan_to_num(ZW))
clf = SVC(random_state=42)
clf.fit(ZW_db, labels_db)
labels = clf.predict(ZW)
return ZW, labels
def post_processing_pca(Z, topk=20):
# center data by standard scaling
scaler=StandardScaler()
scaler.fit(Z)
Z = scaler.transform(Z)
# take topk PCs
pca = PCA(n_components=topk, svd_solver='arpack')
pca_result = pca.fit_transform(Z)
return pca_result
def tune_hyperparameters(data_list, if_tune_m=True, m_range=None, if_tune_dim=True, dim_range=None, if_tune_p=False, p_range=None, log_norm=True, l2_norm=True, true_labels=None, verbose=True):
# Specify data normalization
data_list = preprocess(data_list, log_norm=log_norm, l2_norm=l2_norm)
num_datasets = len(data_list)
# Impute m if None
if m_range==None:
m_est = max(m_estimate(data_list))
if if_tune_m:
m_range = [m_est+i*5 for i in range(-3, 3)]
else:
m_range = [m_est]
print('WARNING no value of m is given, default m={} for the dataset(s) from estimation.'.format(m_est))
# Impute dim if None
if dim_range==None:
dim_est = dim_estimate(data_list)
if if_tune_dim:
dim_range = [dim_est+i*10 for i in range(-2, 2)]
else:
dim_range = [dim_est]
print('WARNING no value of dim is given, default dim={} for the dataset(s) from estimation.'.format(dim_est))
# Impute p if None
if p_range==None:
if if_tune_p:
p_range = [0.1, 0.3, 0.5]
else:
p_range = [0.3]
print('WARNING no value of p is given, default p=0.3 for the dataset(s) from estimation.')
# If ground truth given, find n_clusters
if true_labels is not None:
n_clusters = len(np.unique(true_labels))
out = []
if verbose:
print('Testing hyperparameters in the range below:')
print('Range for m: {}'.format(m_range))
print('Range for dim: {}'.format(dim_range))
print('Range for p: {}'.format(p_range))
for m in m_range:
for n_dim in dim_range:
for p in p_range:
if m*p < 3:
print('Skip m={} and p={} as the number of ghost cells is smaller than 3.'.format(m, p))
continue
ZW = run_OCAT(data_list=data_list, m_list=[m]*num_datasets, dim=n_dim, p=p, log_norm=False, l2_norm=False)
if true_labels is None:
labels_pred, n_clusters = evaluate_clusters(ZW, return_num_cluster=True)
sil_score = silhouette_score(ZW, labels_pred)
out.append([m, n_dim, p, n_clusters, sil_score])
else:
labels_pred = evaluate_clusters(ZW, num_cluster=n_clusters)
NMI_cell = normalized_mutual_info_score(true_labels, labels_pred)
AMI_cell = adjusted_mutual_info_score(true_labels, labels_pred)
ARI_cell = adjusted_rand_score(true_labels, labels_pred)
out.append([m, n_dim, p, NMI_cell, AMI_cell, ARI_cell])
out = np.array(out)
if true_labels is not None:
df = pd.DataFrame(data=out, columns=['m', 'n_dim', 'p', 'NMI_score', 'AMI_score', 'ARI_score'])
else:
df = pd.DataFrame(data=out, columns=['m', 'n_dim', 'p', 'n_clusters', 'silhoutte_score'])
if verbose:
print(df)
return df
| [
"sklearn.preprocessing.StandardScaler",
"numpy.nan_to_num",
"numpy.sum",
"sklearn.metrics.adjusted_mutual_info_score",
"numpy.ones",
"numpy.argsort",
"numpy.linalg.norm",
"numpy.exp",
"sklearn.svm.SVC",
"sklearn.metrics.adjusted_rand_score",
"numpy.unique",
"pandas.DataFrame",
"faiss.Kmeans"... | [((591, 624), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (614, 624), False, 'import warnings\n'), ((2893, 2914), 'numpy.zeros', 'np.zeros', (['(1, cn + 2)'], {}), '((1, cn + 2))\n', (2901, 2914), True, 'import numpy as np\n'), ((2963, 2984), 'numpy.zeros', 'np.zeros', (['(1, cn + 1)'], {}), '((1, cn + 1))\n', (2971, 2984), True, 'import numpy as np\n'), ((5613, 5628), 'numpy.zeros', 'np.zeros', (['(n * m)'], {}), '(n * m)\n', (5621, 5628), True, 'import numpy as np\n'), ((6189, 6206), 'numpy.matmul', 'np.matmul', (['Z.T', 'Z'], {}), '(Z.T, Z)\n', (6198, 6206), True, 'import numpy as np\n'), ((6276, 6323), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', (['[W_diag, W_anchor, W_diag]'], {}), '([W_diag, W_anchor, W_diag])\n', (6295, 6323), True, 'import numpy as np\n'), ((6333, 6355), 'numpy.matmul', 'np.matmul', (['Z', 'W_anchor'], {}), '(Z, W_anchor)\n', (6342, 6355), True, 'import numpy as np\n'), ((6688, 6713), 'numpy.linalg.norm', 'np.linalg.norm', (['Z'], {'axis': '(1)'}), '(Z, axis=1)\n', (6702, 6713), True, 'import numpy as np\n'), ((6727, 6757), 'numpy.expand_dims', 'np.expand_dims', (['Z_norm'], {'axis': '(1)'}), '(Z_norm, axis=1)\n', (6741, 6757), True, 'import numpy as np\n'), ((6766, 6786), 'numpy.divide', 'np.divide', (['Z', 'Z_norm'], {}), '(Z, Z_norm)\n', (6775, 6786), True, 'import numpy as np\n'), ((6795, 6811), 'numpy.nan_to_num', 'np.nan_to_num', (['Z'], {}), '(Z)\n', (6808, 6811), True, 'import numpy as np\n'), ((11940, 11962), 'numpy.matmul', 'np.matmul', (['Z', 'W_anchor'], {}), '(Z, W_anchor)\n', (11949, 11962), True, 'import numpy as np\n'), ((12007, 12027), 'sklearn.svm.SVC', 'SVC', ([], {'random_state': '(42)'}), '(random_state=42)\n', (12010, 12027), False, 'from sklearn.svm import SVC\n'), ((12196, 12212), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (12210, 12212), False, 'from sklearn.preprocessing import StandardScaler\n'), ((12289, 12332), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'topk', 'svd_solver': '"""arpack"""'}), "(n_components=topk, svd_solver='arpack')\n", (12292, 12332), False, 'from sklearn.decomposition import PCA\n'), ((15095, 15108), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (15103, 15108), True, 'import numpy as np\n'), ((1196, 1242), 'faiss.Kmeans', 'faiss.Kmeans', (['d', 'm[i]'], {'niter': '(20)', 'verbose': '(False)'}), '(d, m[i], niter=20, verbose=False)\n', (1208, 1242), False, 'import faiss\n'), ((1733, 1751), 'numpy.sort', 'np.sort', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1740, 1751), True, 'import numpy as np\n'), ((2852, 2867), 'numpy.ones', 'np.ones', (['(s, 1)'], {}), '((s, 1))\n', (2859, 2867), True, 'import numpy as np\n'), ((4732, 4759), 'numpy.sort', 'np.sort', (['Similarity'], {'axis': '(1)'}), '(Similarity, axis=1)\n', (4739, 4759), True, 'import numpy as np\n'), ((4778, 4808), 'numpy.argsort', 'np.argsort', (['Similarity'], {'axis': '(1)'}), '(Similarity, axis=1)\n', (4788, 4808), True, 'import numpy as np\n'), ((5052, 5082), 'numpy.exp', 'np.exp', (['(-val / (sigma * sigma))'], {}), '(-val / (sigma * sigma))\n', (5058, 5082), True, 'import numpy as np\n'), ((7965, 8003), 'numpy.concatenate', 'np.concatenate', (['dataset_Z_list'], {'axis': '(1)'}), '(dataset_Z_list, axis=1)\n', (7979, 8003), True, 'import numpy as np\n'), ((8059, 8089), 'numpy.concatenate', 'np.concatenate', (['Z_list'], {'axis': '(0)'}), '(Z_list, axis=0)\n', (8073, 8089), True, 'import numpy as np\n'), ((8135, 8152), 'numpy.nan_to_num', 'np.nan_to_num', (['ZW'], {}), '(ZW)\n', (8148, 8152), True, 'import numpy as np\n'), ((11805, 11843), 'numpy.concatenate', 'np.concatenate', (['dataset_Z_list'], {'axis': '(1)'}), '(dataset_Z_list, axis=1)\n', (11819, 11843), True, 'import numpy as np\n'), ((11899, 11929), 'numpy.concatenate', 'np.concatenate', (['Z_list'], {'axis': '(0)'}), '(Z_list, axis=0)\n', (11913, 11929), True, 'import numpy as np\n'), ((11977, 11994), 'numpy.nan_to_num', 'np.nan_to_num', (['ZW'], {}), '(ZW)\n', (11990, 11994), True, 'import numpy as np\n'), ((15154, 15248), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'out', 'columns': "['m', 'n_dim', 'p', 'NMI_score', 'AMI_score', 'ARI_score']"}), "(data=out, columns=['m', 'n_dim', 'p', 'NMI_score', 'AMI_score',\n 'ARI_score'])\n", (15166, 15248), True, 'import pandas as pd\n'), ((15268, 15356), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'out', 'columns': "['m', 'n_dim', 'p', 'n_clusters', 'silhoutte_score']"}), "(data=out, columns=['m', 'n_dim', 'p', 'n_clusters',\n 'silhoutte_score'])\n", (15280, 15356), True, 'import pandas as pd\n'), ((1766, 1777), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (1774, 1777), True, 'import numpy as np\n'), ((3120, 3135), 'numpy.matmul', 'np.matmul', (['U', 'v'], {}), '(U, v)\n', (3129, 3135), True, 'import numpy as np\n'), ((5017, 5039), 'numpy.sqrt', 'np.sqrt', (['val[:, s - 1]'], {}), '(val[:, s - 1])\n', (5024, 5039), True, 'import numpy as np\n'), ((5510, 5531), 'OCAT.example.LAE', 'example.LAE', (['x', 'U', 'cn'], {}), '(x, U, cn)\n', (5521, 5531), True, 'import OCAT.example as example\n'), ((13752, 13774), 'numpy.unique', 'np.unique', (['true_labels'], {}), '(true_labels)\n', (13761, 13774), True, 'import numpy as np\n'), ((2164, 2183), 'numpy.sum', 'np.sum', (['t[0:kk + 1]'], {}), '(t[0:kk + 1])\n', (2170, 2183), True, 'import numpy as np\n'), ((3219, 3234), 'numpy.matmul', 'np.matmul', (['U', 'v'], {}), '(U, v)\n', (3228, 3234), True, 'import numpy as np\n'), ((3355, 3370), 'numpy.matmul', 'np.matmul', (['U', 'z'], {}), '(U, z)\n', (3364, 3370), True, 'import numpy as np\n'), ((3791, 3828), 'numpy.sqrt', 'np.sqrt', (['(1 + 4 * delta[0][t + 1] ** 2)'], {}), '(1 + 4 * delta[0][t + 1] ** 2)\n', (3798, 3828), True, 'import numpy as np\n'), ((5224, 5238), 'numpy.sum', 'np.sum', (['val', '(1)'], {}), '(val, 1)\n', (5230, 5238), True, 'import numpy as np\n'), ((6236, 6255), 'numpy.sum', 'np.sum', (['W_anchor', '(0)'], {}), '(W_anchor, 0)\n', (6242, 6255), True, 'import numpy as np\n'), ((14555, 14588), 'sklearn.metrics.silhouette_score', 'silhouette_score', (['ZW', 'labels_pred'], {}), '(ZW, labels_pred)\n', (14571, 14588), False, 'from sklearn.metrics import silhouette_score, adjusted_rand_score, adjusted_mutual_info_score, normalized_mutual_info_score\n'), ((14791, 14845), 'sklearn.metrics.normalized_mutual_info_score', 'normalized_mutual_info_score', (['true_labels', 'labels_pred'], {}), '(true_labels, labels_pred)\n', (14819, 14845), False, 'from sklearn.metrics import silhouette_score, adjusted_rand_score, adjusted_mutual_info_score, normalized_mutual_info_score\n'), ((14878, 14930), 'sklearn.metrics.adjusted_mutual_info_score', 'adjusted_mutual_info_score', (['true_labels', 'labels_pred'], {}), '(true_labels, labels_pred)\n', (14904, 14930), False, 'from sklearn.metrics import silhouette_score, adjusted_rand_score, adjusted_mutual_info_score, normalized_mutual_info_score\n'), ((14963, 15008), 'sklearn.metrics.adjusted_rand_score', 'adjusted_rand_score', (['true_labels', 'labels_pred'], {}), '(true_labels, labels_pred)\n', (14982, 15008), False, 'from sklearn.metrics import silhouette_score, adjusted_rand_score, adjusted_mutual_info_score, normalized_mutual_info_score\n'), ((1977, 1995), 'numpy.sum', 'np.sum', (['t[0:j + 1]'], {}), '(t[0:j + 1])\n', (1983, 1995), True, 'import numpy as np\n'), ((5309, 5340), 'numpy.sum', 'np.sum', (['(Anchor * Anchor)'], {'axis': '(0)'}), '(Anchor * Anchor, axis=0)\n', (5315, 5340), True, 'import numpy as np\n'), ((5418, 5438), 'numpy.linalg.norm', 'np.linalg.norm', (['x', '(2)'], {}), '(x, 2)\n', (5432, 5438), True, 'import numpy as np\n'), ((5115, 5129), 'numpy.sum', 'np.sum', (['val', '(1)'], {}), '(val, 1)\n', (5121, 5129), True, 'import numpy as np\n')] |
import sys
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import export_graphviz
import pydot
import json
features = pd.read_csv("dataset2.csv")
labels = np.array(features['hired'])
features= features.drop('hired', axis = 1)
feature_list = list(features.columns)
features = np.array(features)
train_features, test_features, train_labels, test_labels = train_test_split(features, labels, test_size = 0.25, random_state = 42)
rf = RandomForestRegressor(n_estimators = 1000, random_state = 42)
rf.fit(train_features, train_labels);
arr = json.loads(sys.argv[1])
predictions = rf.predict(arr)
print(predictions.tolist())
# errors = abs(predictions - test_labels)
# print('Mean Absolute Error:', round(np.mean(errors), 2), 'degrees.')
#
# tree = rf.estimators_[5]
# export_graphviz(tree, out_file = 'tree.dot', feature_names = feature_list, rounded = True, precision = 1)
# (graph, ) = pydot.graph_from_dot_file('tree.dot')
# graph.write_png('tree.png')
#
# importances = list(rf.feature_importances_)
# feature_importances = [(feature, round(importance, 2)) for feature, importance in zip(feature_list, importances)]
# feature_importances = sorted(feature_importances, key = lambda x: x[1], reverse = True)
# [print('Variable: {:20} Importance: {}'.format(*pair)) for pair in feature_importances];
| [
"json.loads",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.ensemble.RandomForestRegressor",
"numpy.array"
] | [((232, 259), 'pandas.read_csv', 'pd.read_csv', (['"""dataset2.csv"""'], {}), "('dataset2.csv')\n", (243, 259), True, 'import pandas as pd\n'), ((270, 297), 'numpy.array', 'np.array', (["features['hired']"], {}), "(features['hired'])\n", (278, 297), True, 'import numpy as np\n'), ((390, 408), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (398, 408), True, 'import numpy as np\n'), ((468, 535), 'sklearn.model_selection.train_test_split', 'train_test_split', (['features', 'labels'], {'test_size': '(0.25)', 'random_state': '(42)'}), '(features, labels, test_size=0.25, random_state=42)\n', (484, 535), False, 'from sklearn.model_selection import train_test_split\n'), ((546, 603), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(1000)', 'random_state': '(42)'}), '(n_estimators=1000, random_state=42)\n', (567, 603), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((653, 676), 'json.loads', 'json.loads', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (663, 676), False, 'import json\n')] |
"""
Collection of I/O functions to post-process the numerical solution from a
PetIBM simulation.
"""
import os
import sys
import struct
import numpy
sys.path.append(os.path.join(os.environ['PETSC_DIR'], 'bin'))
import PetscBinaryIO
# reduce is no longer a builtin in Python 3
# but has been added to the functools package
if sys.version_info[0] >= 3:
from functools import reduce
class Field(object):
"""
Contains information about a field (pressure for example).
"""
def __init__(self, x=None, y=None, z=None, values=None):
"""
Initializes the field by its grid and its values.
Parameters
----------
x, y, z: 3 1D arrays of floats, optional
Coordinates of the grid-nodes in each direction;
default: None, None, None.
value: 1D array of floats, optional
Nodal values of the field;
default: None.
"""
self.x, self.y, self.z = x, y, z
self.values = values
def get_time_steps(time_steps_range=None, directory=os.getcwd()):
"""
Returns a list of the time-steps to post-process.
If the range is not provided, the method lists the time-step folders
present in the directory (either provided or taken as the simulation
directory).
Parameters
----------
time_steps_range: 3-list of integers, optional
Initial, final and stride of the time-steps to consider;
default: None (all saved time-steps).
directory: string, optional
Directory containing the saved time-step folders;
default: <current-working-directory>.
"""
if time_steps_range:
return range(time_steps_range[0],
time_steps_range[1] + 1,
time_steps_range[2])
else:
return sorted(int(folder) for folder in os.listdir(directory)
if folder[0] == '0')
def read_grid(directory=os.getcwd(), file_name='grid.txt'):
"""
Reads the coordinates from the file grid file.
Parameters
----------
directory: string, optional
Directory of the simulation;
default: '<current-working-directory>'.
file_name: string, optional
Name of the file containing the grid points;
default: 'grid.txt'.
Returns
-------
grid: list of 1D array of floats
Coordinates of the grid-nodes in each direction.
"""
print('[info] reading the grid ...')
file_path = os.path.join(directory, file_name)
# test if file written in binary format
textchars = bytearray({7, 8, 9, 10, 12, 13, 27}
| set(range(0x20, 0x100)) - {0x7f})
is_binary_string = lambda bytes: bool(bytes.translate(None, textchars))
binary_format = is_binary_string(open(file_path, 'rb').read(1024))
if binary_format:
with open(file_path, 'rb') as infile:
# x-direction
nx = struct.unpack('i', infile.read(4))[0]
x = numpy.array(struct.unpack('d' * (nx + 1),
infile.read(8 * (nx + 1))))
# y-direction
ny = struct.unpack('i', infile.read(4))[0]
y = numpy.array(struct.unpack('d' * (ny + 1),
infile.read(8 * (ny + 1))))
return x, y
else:
with open(file_path, 'r') as infile:
n_cells = numpy.array([int(n)
for n in infile.readline().strip().split()])
coords = numpy.loadtxt(infile, dtype=numpy.float64)
return numpy.array(numpy.split(coords, numpy.cumsum(n_cells[:-1] + 1)))
def read_velocity(time_step, coords, periodic=[], directory=os.getcwd()):
"""
Reads the velocity field at a given time-step.
Parameters
----------
time_step: integer
Time-step at which the field will be read.
coords: list of 1D arrays of floats
Coordinates in each direction.
periodic: list of strings, optional
List of directions with periodic boundary conditions;
default: [].
directory: string, optional
Directory of the simulation;
default: '<current-working-directory>'.
Returns
-------
field_vector: list of Field objects
List containing the velocity field in each direction.
"""
print('Read the velocity field at time-step {} ...'.format(time_step))
dim3 = (True if len(coords) == 3 else False)
x, y, z = coords[0], coords[1], (None if not dim3 else coords[2])
# compute cell-widths
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
dz = (None if not dim3 else z[1:] - z[:-1])
# number of of cells
nx, ny, nz = dx.size, dy.size, (None if not dim3 else dz.size)
# folder with numerical solution
time_step_directory = os.path.join(directory, '{:0>7}'.format(time_step))
# read x-flux
flux_path = os.path.join(time_step_directory, 'qx.dat')
qx = PetscBinaryIO.PetscBinaryIO().readBinaryFile(flux_path)[0]
# read y-flux
flux_path = os.path.join(time_step_directory, 'qy.dat')
qy = PetscBinaryIO.PetscBinaryIO().readBinaryFile(flux_path)[0]
# get velocity nodes coordinates
xu, yu = x[1:-1], 0.5 * (y[:-1] + y[1:])
xv, yv = 0.5 * (x[:-1] + x[1:]), y[1:-1]
if dim3:
# get third-dimension coordinate of x-velocity nodes
zu = 0.5 * (z[:-1] + z[1:])
# compute x-velocity field
qx = qx.reshape((nz, ny, (nx if 'x' in periodic else nx - 1)))
u = (qx[:, :, :(-1 if 'x' in periodic else None)]
/ reduce(numpy.multiply, numpy.ix_(dz, dy, numpy.ones(nx - 1))))
# get third-dimension coordinate of y-velocity nodes
zv = 0.5 * (z[:-1] + z[1:])
# compute y-velocity field
qy = qy.reshape((nz, (ny if 'y' in periodic else ny - 1), nx))
v = (qy[:, :(-1 if 'y' in periodic else None), :]
/ reduce(numpy.multiply, numpy.ix_(dz, numpy.ones(ny - 1), dx)))
# read z-flux
flux_path = os.path.join(time_step_directory, 'qz.dat')
qz = PetscBinaryIO.PetscBinaryIO().readBinaryFile(flux_path)[0]
# get coordinates of z-velocity nodes
xw, yw, zw = 0.5 * (x[:-1] + x[1:]), 0.5 * (y[:-1] + y[1:]), z[1:-1]
# compute z-velocity field
qz = qz.reshape(((nz if 'z' in periodic else nz - 1), ny, nx))
w = (qz[:(-1 if 'z' in periodic else None), :, :]
/ reduce(numpy.multiply, numpy.ix_(numpy.ones(nz - 1), dy, dx)))
# tests
assert (zu.size, yu.size, xu.size) == u.shape
assert (zv.size, yv.size, xv.size) == v.shape
assert (zw.size, yw.size, xw.size) == w.shape
return [Field(x=xu, y=yu, z=zu, values=u),
Field(x=xv, y=yv, z=zv, values=v),
Field(x=xw, y=yw, z=zw, values=w)]
else:
# compute x-velocity field
qx = qx.reshape((ny, (nx if 'x' in periodic else nx - 1)))
u = (qx[:, :(-1 if 'x' in periodic else None)]
/ numpy.outer(dy, numpy.ones(nx - 1)))
# compute y-velocity field
qy = qy.reshape(((ny if 'y' in periodic else ny - 1), nx))
v = (qy[:(-1 if 'y' in periodic else None), :]
/ numpy.outer(numpy.ones(ny - 1), dx))
# tests
assert (yu.size, xu.size) == u.shape
assert (yv.size, xv.size) == v.shape
return [Field(x=xu, y=yu, values=u),
Field(x=xv, y=yv, values=v)]
def read_pressure(time_step, coords, directory=os.getcwd()):
"""
Reads the pressure fields from file given the time-step.
Parameters
----------
time_step: integer
Time-step at which the field will be read.
coords: list of 1D arrays of floats
Grid coordinates in each direction.
directory: string, optional
Directory of the simulation;
default: '<current-working-directory>'.
Returns
-------
pressure: Field object
The pressure field.
"""
print('Read the pressure field at time-step {} ...'.format(time_step))
dim3 = (True if len(coords) == 3 else False)
x, y, z = coords[0], coords[1], (None if not dim3 else coords[2])
# folder with numerical solution
time_step_directory = os.path.join(directory, '{:0>7}'.format(time_step))
# pressure
pressure_path = os.path.join(time_step_directory, 'phi.dat')
p = PetscBinaryIO.PetscBinaryIO().readBinaryFile(pressure_path)[0]
# get pressure nodes coordinates
xp, yp = 0.5 * (x[:-1] + x[1:]), 0.5 * (y[:-1] + y[1:])
nx, ny = xp.size, yp.size
if dim3:
# get third-dimension coordinates of pressure nodes
zp = 0.5 * (z[:-1] + z[1:])
nz = zp.size
# compute pressure field
p = p.reshape((nz, ny, nx))
# tests
assert (zp.size, yp.size, xp.size) == p.shape
return Field(x=xp, y=yp, z=zp, values=p)
else:
# compute pressure field
p = p.reshape((ny, nx))
# tests
assert (yp.size, xp.size) == p.shape
return Field(x=xp, y=yp, values=p)
def write_vtk(field, time_step, name,
directory=os.getcwd(),
view=[[float('-inf'), float('-inf'), float('-inf')],
[float('inf'), float('inf'), float('inf')]],
stride=1):
"""
Writes the field in a .vtk file.
Parameters
----------
field: Field object
Field to write.
time_step: integer
Time-step to write.
name: string
Name of the field.
directory: string, optional
Directory of the simulation;
default: '<current-working-directory>'.
view: list of floats, optional
Bottom-left and top-right coordinates of the rectangular view to write;
default: the whole domain is written.
stride: integer, optional
Stride at which the field is written;
default: 1.
"""
print('Write the {} field into .vtk file ...'.format(name))
if type(field) is not list:
field = [field]
try:
dim3 = field[0].z.all()
except:
dim3 = False
scalar = (True if len(field) == 1 else False)
# get mask for the view
mx = numpy.where(numpy.logical_and(field[0].x > view[0][0],
field[0].x < view[1][0]))[0][::stride]
my = numpy.where(numpy.logical_and(field[0].y > view[0][1],
field[0].y < view[1][1]))[0][::stride]
if dim3:
mz = numpy.where(numpy.logical_and(field[0].z > view[0][2],
field[0].z < view[1][2]))[0][::stride]
# create directory where .vtk file will be saved
vtk_directory = os.path.join(directory, 'vtk_files', name)
if not os.path.isdir(vtk_directory):
print('Make directory: {}'.format(vtk_directory))
os.makedirs(vtk_directory)
vtk_file_path = os.path.join(vtk_directory,
name + '{:0>7}'.format(time_step))
# get coordinates within the view
x = field[0].x[mx]
y = field[0].y[my]
z = (None if not dim3 else field[0].z[mz])
nx, ny, nz = x.size, y.size, (1 if not dim3 else z.size)
# write .vtk file
with open(vtk_file_path, 'w') as outfile:
outfile.write('# vtk DataFile Version 3.0\n')
outfile.write('contains {} field\n'.format(name))
outfile.write('ASCII\n')
outfile.write('DATASET RECTILINEAR_GRID\n')
outfile.write('DIMENSIONS {} {} {}\n'.format(nx, ny, nz))
outfile.write('X_COORDINATES {} double\n'.format(nx))
numpy.savetxt(outfile, x, fmt='%f')
outfile.write('Y_COORDINATES {} double\n'.format(ny))
numpy.savetxt(outfile, y, fmt='%f')
outfile.write('Z_COORDINATES {} double\n'.format(nz))
if dim3:
numpy.savetxt(outfile, z, fmt='%f')
else:
outfile.write('0.0\n')
outfile.write('POINT_DATA {}\n'.format(nx * ny * nz))
if scalar:
outfile.write('\nSCALARS {} double 1\nLOOKUP_TABLE default\n'
''.format(name))
if dim3:
values = field[0].values[mz[0]:mz[-1] + 1,
my[0]:my[-1] + 1,
mx[0]:mx[-1] + 1]
else:
values = field[0].values[my[0]:my[-1] + 1,
mx[0]:mx[-1] + 1]
numpy.savetxt(outfile, values.flatten(),
fmt='%.6f', delimiter='\t')
else:
outfile.write('\nVECTORS {} double\n'.format(name))
if dim3:
values_x = field[0].values[mz[0]:mz[-1] + 1,
my[0]:my[-1] + 1,
mx[0]:mx[-1] + 1]
values_y = field[1].values[mz[0]:mz[-1] + 1,
my[0]:my[-1] + 1,
mx[0]:mx[-1] + 1]
values_z = field[2].values[mz[0]:mz[-1] + 1,
my[0]:my[-1] + 1,
mx[0]:mx[-1] + 1]
numpy.savetxt(outfile,
numpy.c_[values_x.flatten(),
values_y.flatten(),
values_z.flatten()],
fmt='%.6f', delimiter='\t')
else:
values_x = field[0].values[my[0]:my[-1] + 1,
mx[0]:mx[-1] + 1]
values_y = field[1].values[my[0]:my[-1] + 1,
mx[0]:mx[-1] + 1]
numpy.savetxt(outfile, numpy.c_[values_x.flatten(),
values_y.flatten()],
fmt='%6f', delimiter='\t')
| [
"os.makedirs",
"numpy.logical_and",
"os.getcwd",
"os.path.isdir",
"numpy.savetxt",
"numpy.ones",
"numpy.cumsum",
"PetscBinaryIO.PetscBinaryIO",
"numpy.loadtxt",
"os.path.join",
"os.listdir"
] | [((167, 211), 'os.path.join', 'os.path.join', (["os.environ['PETSC_DIR']", '"""bin"""'], {}), "(os.environ['PETSC_DIR'], 'bin')\n", (179, 211), False, 'import os\n'), ((986, 997), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (995, 997), False, 'import os\n'), ((1805, 1816), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1814, 1816), False, 'import os\n'), ((2302, 2336), 'os.path.join', 'os.path.join', (['directory', 'file_name'], {}), '(directory, file_name)\n', (2314, 2336), False, 'import os\n'), ((3437, 3448), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3446, 3448), False, 'import os\n'), ((4548, 4591), 'os.path.join', 'os.path.join', (['time_step_directory', '"""qx.dat"""'], {}), "(time_step_directory, 'qx.dat')\n", (4560, 4591), False, 'import os\n'), ((4688, 4731), 'os.path.join', 'os.path.join', (['time_step_directory', '"""qy.dat"""'], {}), "(time_step_directory, 'qy.dat')\n", (4700, 4731), False, 'import os\n'), ((6969, 6980), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6978, 6980), False, 'import os\n'), ((7734, 7778), 'os.path.join', 'os.path.join', (['time_step_directory', '"""phi.dat"""'], {}), "(time_step_directory, 'phi.dat')\n", (7746, 7778), False, 'import os\n'), ((8474, 8485), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8483, 8485), False, 'import os\n'), ((9933, 9975), 'os.path.join', 'os.path.join', (['directory', '"""vtk_files"""', 'name'], {}), "(directory, 'vtk_files', name)\n", (9945, 9975), False, 'import os\n'), ((5594, 5637), 'os.path.join', 'os.path.join', (['time_step_directory', '"""qz.dat"""'], {}), "(time_step_directory, 'qz.dat')\n", (5606, 5637), False, 'import os\n'), ((9985, 10013), 'os.path.isdir', 'os.path.isdir', (['vtk_directory'], {}), '(vtk_directory)\n', (9998, 10013), False, 'import os\n'), ((10073, 10099), 'os.makedirs', 'os.makedirs', (['vtk_directory'], {}), '(vtk_directory)\n', (10084, 10099), False, 'import os\n'), ((10763, 10798), 'numpy.savetxt', 'numpy.savetxt', (['outfile', 'x'], {'fmt': '"""%f"""'}), "(outfile, x, fmt='%f')\n", (10776, 10798), False, 'import numpy\n'), ((10861, 10896), 'numpy.savetxt', 'numpy.savetxt', (['outfile', 'y'], {'fmt': '"""%f"""'}), "(outfile, y, fmt='%f')\n", (10874, 10896), False, 'import numpy\n'), ((3256, 3298), 'numpy.loadtxt', 'numpy.loadtxt', (['infile'], {'dtype': 'numpy.float64'}), '(infile, dtype=numpy.float64)\n', (3269, 3298), False, 'import numpy\n'), ((10974, 11009), 'numpy.savetxt', 'numpy.savetxt', (['outfile', 'z'], {'fmt': '"""%f"""'}), "(outfile, z, fmt='%f')\n", (10987, 11009), False, 'import numpy\n'), ((3342, 3372), 'numpy.cumsum', 'numpy.cumsum', (['(n_cells[:-1] + 1)'], {}), '(n_cells[:-1] + 1)\n', (3354, 3372), False, 'import numpy\n'), ((4599, 4628), 'PetscBinaryIO.PetscBinaryIO', 'PetscBinaryIO.PetscBinaryIO', ([], {}), '()\n', (4626, 4628), False, 'import PetscBinaryIO\n'), ((4739, 4768), 'PetscBinaryIO.PetscBinaryIO', 'PetscBinaryIO.PetscBinaryIO', ([], {}), '()\n', (4766, 4768), False, 'import PetscBinaryIO\n'), ((6530, 6548), 'numpy.ones', 'numpy.ones', (['(nx - 1)'], {}), '(nx - 1)\n', (6540, 6548), False, 'import numpy\n'), ((6719, 6737), 'numpy.ones', 'numpy.ones', (['(ny - 1)'], {}), '(ny - 1)\n', (6729, 6737), False, 'import numpy\n'), ((7785, 7814), 'PetscBinaryIO.PetscBinaryIO', 'PetscBinaryIO.PetscBinaryIO', ([], {}), '()\n', (7812, 7814), False, 'import PetscBinaryIO\n'), ((9454, 9521), 'numpy.logical_and', 'numpy.logical_and', (['(field[0].x > view[0][0])', '(field[0].x < view[1][0])'], {}), '(field[0].x > view[0][0], field[0].x < view[1][0])\n', (9471, 9521), False, 'import numpy\n'), ((9592, 9659), 'numpy.logical_and', 'numpy.logical_and', (['(field[0].y > view[0][1])', '(field[0].y < view[1][1])'], {}), '(field[0].y > view[0][1], field[0].y < view[1][1])\n', (9609, 9659), False, 'import numpy\n'), ((1718, 1739), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (1728, 1739), False, 'import os\n'), ((5223, 5241), 'numpy.ones', 'numpy.ones', (['(nx - 1)'], {}), '(nx - 1)\n', (5233, 5241), False, 'import numpy\n'), ((5534, 5552), 'numpy.ones', 'numpy.ones', (['(ny - 1)'], {}), '(ny - 1)\n', (5544, 5552), False, 'import numpy\n'), ((5647, 5676), 'PetscBinaryIO.PetscBinaryIO', 'PetscBinaryIO.PetscBinaryIO', ([], {}), '()\n', (5674, 5676), False, 'import PetscBinaryIO\n'), ((6017, 6035), 'numpy.ones', 'numpy.ones', (['(nz - 1)'], {}), '(nz - 1)\n', (6027, 6035), False, 'import numpy\n'), ((9743, 9810), 'numpy.logical_and', 'numpy.logical_and', (['(field[0].z > view[0][2])', '(field[0].z < view[1][2])'], {}), '(field[0].z > view[0][2], field[0].z < view[1][2])\n', (9760, 9810), False, 'import numpy\n')] |
from pathlib import Path
import os
import json
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib
from gym_recording_modified.playback import get_recordings
import seaborn as sns
from scipy.stats import sem
def plot_timesteps(params: np.array, data: np.array, row=None, col=None, plot=None, xlabel='', ylabel='', rowdict={}, coldict={}):
"""
General-use function for plotting a variable over time
args:
params : np.array shape (num_runs, num_params) with parameters for each run
data : np.array shape (num_runs, num_timesteps) with data
row : index of parameters, this function will generate a subplot for each unique setting
col : index of parameters, this function will generate a subplot for each unique setting
plot : index of parameters, for which a new line will be added to a subplot for each unique value
"""
nrows = np.unique(params[:, row]).shape[0]
ncols = np.unique(params[:, col]).shape[0]
fig, axs = plt.subplots(nrows=nrows, ncols=ncols, sharex=False, sharey=False, squeeze=False)
row_idx = 0
for row_param in np.unique(params[:, row]):
col_idx = 0
for col_param in np.unique(params[:, col]):
for plot_param in np.unique(params[:, plot]):
inds = np.where(
(params[:, row]==row_param) &
(params[:, col]==col_param) &
(params[:, plot]==plot_param))
data_ = np.mean(data[inds], axis=0) # data to plot
ax = axs[row_idx, col_idx]
ax.plot(data_)
ax.set_title('%s, %s' % (rowdict.get(row_param, row_param), coldict.get(col_param, col_param)))
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
col_idx += 1
row_idx += 1
fig.tight_layout()
def plot_episode_len(params: np.array, data: np.array, row=None, col=None, plot=None, xlabel='', ylabel='', rowdict={}, coldict={}):
"""
General-use function for plotting a episode length
args:
params : np.array shape (num_runs, num_params) with parameters for each run
data : np.array shape (num_runs, num_timesteps) with data
row : index of parameters, this function will generate a subplot for each unique setting
col : index of parameters, this function Will generate a subplot for each unique setting
plot : index of parameters, for which a new line will be added to a subplot for each unique value
"""
nrows = np.unique(params[:, row]).shape[0]
ncols = np.unique(params[:, col]).shape[0]
fig, axs = plt.subplots(nrows=nrows, ncols=ncols, sharex=False, sharey=False, squeeze=False, figsize=(10,10))
row_idx = 0
for row_param in np.unique(params[:, row]):
col_idx = 0
for col_param in np.unique(params[:, col]):
for plot_param in np.unique(params[:, plot]):
inds = np.where(
(params[:, row]==row_param) &
(params[:, col]==col_param) &
(params[:, plot]==plot_param))
ax = axs[row_idx, col_idx]
for run_data in data[inds]:
if len(run_data) > 1:
ax.plot(np.arange(len(run_data)), run_data)
else:
ax.scatter([0], run_data)
ax.set_title('%s, %s' % (rowdict.get(row_param, row_param), coldict.get(col_param, col_param)))
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_yscale('log')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
col_idx += 1
row_idx += 1
fig.tight_layout()
def plot_avg_episode_length(params: np.array, data: np.array, categories=[], shapes=[], plot=None, xlabel='', ylabel='', title='', rowdict={}, coldict={}, tick_fmt='', shape_fmt='', scale='linear'):
"""
General-use function for plotting a episode length
args:
params : np.array shape (num_runs, num_params) with parameters for each run
data : np.array shape (num_runs, num_timesteps) with data
categories : list of indices of parameters. This will determine the categorical variables plotted
shapes : index of parameters, will plot variables in this column with different shapes
"""
font = {'size' : 14}
matplotlib.rc('font', **font)
param_categories = np.unique(params[:, categories], axis=0)
shape_categories = np.unique(params[:, shapes], axis=0)
bar_width = (1 / len(shape_categories))*0.9
colours = sns.cubehelix_palette(len(shape_categories), start=.5, rot=-.75).as_hex()
colour_dict = {}
for i in range(len(shape_categories)):
colour_dict[ shape_fmt % tuple(shape_categories[i, :]) ] = colours[i]
x_pos = 0 # center for a collection of bars
labels = []
for param_cat in param_categories:
bar_positions = [(x_pos-0.5)+ (i*bar_width) for i in range(len(shape_categories))]
for i in range(len(shape_categories)):
bar_pos = bar_positions[i]
shape_cat = shape_categories[i]
inds = np.where((params[:, categories]==param_cat).all(axis=1) & (params[:, shapes]==shape_cat).all(axis=1) )
if inds[0].shape[0] ==0:
break
data_ = np.hstack(data[inds])
plt.bar(bar_pos, np.mean(data_), align='edge', width = bar_width, color=colour_dict[shape_fmt % tuple(shape_cat)])
plt.errorbar(bar_pos+(bar_width/2), np.mean(data_),yerr=sem(data_), ecolor='black', capsize=3 )
labels.append(tick_fmt % tuple(param_cat))
x_pos += 1
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
if 'DQN' in colour_dict.keys():
colour_dict['DQN'] = colour_dict.pop('FFN, 10')
plt.legend(handles=[mpatches.Patch(color=v, label=k.replace('RNN', 'DRQN')) for (k, v) in colour_dict.items()])
plt.xticks(np.arange(len(param_categories)), labels=labels, rotation=290)
if scale != 'linear':
plt.yscale('log', base=2)
plt.tight_layout()
def rolling_sem(data,window):
sems = []
for i in range(0, data.shape[1]-window+1):
sems.append(sem( data[:, i:i+window],axis=None ) )
sems = np.array(sems)
return sems
def plot_rewards(params: np.array, data: np.array, row=None, col=None, plot=None, xlabel='', ylabel='', title='', rowdict={}, coldict={}, plot_fmt='',window=1, sample=1):
"""
"""
plot_params = np.unique(params[:, plot], axis=0)
colours = sns.cubehelix_palette(len(plot_params), start=.5, rot=-.75).as_hex()
colour_dict = {}
for i in range(len(plot_params)):
colour_dict[plot_fmt % tuple(plot_params[i, :]) ] = colours[i]
for plot_param in plot_params:
inds = np.where((params[:, plot]==plot_param).all(axis=1))
data_ = np.mean(data[inds], axis=0) # data to plot
# if window == 1:
# error = sem(data[inds], axis=0)
# else:
# error = rolling_sem(data,window)
error = sem(data[inds], axis=0)
data_ = np.convolve(data_, np.ones(window)/window, mode='same')
error_up = data_ + error/2
error_low = data_ - error/2
data_ = data_[0::sample]
error_up = error_up[0::sample]
error_low = error_low[0::sample]
plt.plot(data_, color=colour_dict[plot_fmt % tuple(plot_param)])
plt.fill_between(range(data_.shape[0]), error_low, error_up, color=colour_dict[plot_fmt % tuple(plot_param)], alpha=0.4)
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
colour_dict['DQN'] = colour_dict.pop('FFN, 10')
plt.legend(handles=[mpatches.Patch(color=v, label=k.replace('RNN,', 'DRQN, L=')) for (k, v) in colour_dict.items()])
plt.tight_layout() | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.tight_layout",
"matplotlib.rc",
"matplotlib.pyplot.yscale",
"numpy.ones",
"numpy.hstack",
"numpy.mean",
"numpy.array",
"scipy.stats.sem",
"numpy.where",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotli... | [((1052, 1138), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'nrows', 'ncols': 'ncols', 'sharex': '(False)', 'sharey': '(False)', 'squeeze': '(False)'}), '(nrows=nrows, ncols=ncols, sharex=False, sharey=False, squeeze=\n False)\n', (1064, 1138), True, 'import matplotlib.pyplot as plt\n'), ((1172, 1197), 'numpy.unique', 'np.unique', (['params[:, row]'], {}), '(params[:, row])\n', (1181, 1197), True, 'import numpy as np\n'), ((2766, 2870), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'nrows', 'ncols': 'ncols', 'sharex': '(False)', 'sharey': '(False)', 'squeeze': '(False)', 'figsize': '(10, 10)'}), '(nrows=nrows, ncols=ncols, sharex=False, sharey=False, squeeze=\n False, figsize=(10, 10))\n', (2778, 2870), True, 'import matplotlib.pyplot as plt\n'), ((2903, 2928), 'numpy.unique', 'np.unique', (['params[:, row]'], {}), '(params[:, row])\n', (2912, 2928), True, 'import numpy as np\n'), ((4663, 4692), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **font)\n", (4676, 4692), False, 'import matplotlib\n'), ((4717, 4757), 'numpy.unique', 'np.unique', (['params[:, categories]'], {'axis': '(0)'}), '(params[:, categories], axis=0)\n', (4726, 4757), True, 'import numpy as np\n'), ((4781, 4817), 'numpy.unique', 'np.unique', (['params[:, shapes]'], {'axis': '(0)'}), '(params[:, shapes], axis=0)\n', (4790, 4817), True, 'import numpy as np\n'), ((6009, 6027), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (6019, 6027), True, 'import matplotlib.pyplot as plt\n'), ((6032, 6050), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (6042, 6050), True, 'import matplotlib.pyplot as plt\n'), ((6055, 6071), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (6064, 6071), True, 'import matplotlib.pyplot as plt\n'), ((6536, 6554), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6552, 6554), True, 'import matplotlib.pyplot as plt\n'), ((6722, 6736), 'numpy.array', 'np.array', (['sems'], {}), '(sems)\n', (6730, 6736), True, 'import numpy as np\n'), ((6962, 6996), 'numpy.unique', 'np.unique', (['params[:, plot]'], {'axis': '(0)'}), '(params[:, plot], axis=0)\n', (6971, 6996), True, 'import numpy as np\n'), ((8124, 8142), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (8134, 8142), True, 'import matplotlib.pyplot as plt\n'), ((8147, 8165), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (8157, 8165), True, 'import matplotlib.pyplot as plt\n'), ((8170, 8186), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (8179, 8186), True, 'import matplotlib.pyplot as plt\n'), ((8365, 8383), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8381, 8383), True, 'import matplotlib.pyplot as plt\n'), ((1244, 1269), 'numpy.unique', 'np.unique', (['params[:, col]'], {}), '(params[:, col])\n', (1253, 1269), True, 'import numpy as np\n'), ((2975, 3000), 'numpy.unique', 'np.unique', (['params[:, col]'], {}), '(params[:, col])\n', (2984, 3000), True, 'import numpy as np\n'), ((6505, 6530), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {'base': '(2)'}), "('log', base=2)\n", (6515, 6530), True, 'import matplotlib.pyplot as plt\n'), ((7335, 7362), 'numpy.mean', 'np.mean', (['data[inds]'], {'axis': '(0)'}), '(data[inds], axis=0)\n', (7342, 7362), True, 'import numpy as np\n'), ((7538, 7561), 'scipy.stats.sem', 'sem', (['data[inds]'], {'axis': '(0)'}), '(data[inds], axis=0)\n', (7541, 7561), False, 'from scipy.stats import sem\n'), ((954, 979), 'numpy.unique', 'np.unique', (['params[:, row]'], {}), '(params[:, row])\n', (963, 979), True, 'import numpy as np\n'), ((1001, 1026), 'numpy.unique', 'np.unique', (['params[:, col]'], {}), '(params[:, col])\n', (1010, 1026), True, 'import numpy as np\n'), ((1301, 1327), 'numpy.unique', 'np.unique', (['params[:, plot]'], {}), '(params[:, plot])\n', (1310, 1327), True, 'import numpy as np\n'), ((2668, 2693), 'numpy.unique', 'np.unique', (['params[:, row]'], {}), '(params[:, row])\n', (2677, 2693), True, 'import numpy as np\n'), ((2715, 2740), 'numpy.unique', 'np.unique', (['params[:, col]'], {}), '(params[:, col])\n', (2724, 2740), True, 'import numpy as np\n'), ((3032, 3058), 'numpy.unique', 'np.unique', (['params[:, plot]'], {}), '(params[:, plot])\n', (3041, 3058), True, 'import numpy as np\n'), ((5643, 5664), 'numpy.hstack', 'np.hstack', (['data[inds]'], {}), '(data[inds])\n', (5652, 5664), True, 'import numpy as np\n'), ((6668, 6705), 'scipy.stats.sem', 'sem', (['data[:, i:i + window]'], {'axis': 'None'}), '(data[:, i:i + window], axis=None)\n', (6671, 6705), False, 'from scipy.stats import sem\n'), ((1352, 1462), 'numpy.where', 'np.where', (['((params[:, row] == row_param) & (params[:, col] == col_param) & (params[:,\n plot] == plot_param))'], {}), '((params[:, row] == row_param) & (params[:, col] == col_param) & (\n params[:, plot] == plot_param))\n', (1360, 1462), True, 'import numpy as np\n'), ((1557, 1584), 'numpy.mean', 'np.mean', (['data[inds]'], {'axis': '(0)'}), '(data[inds], axis=0)\n', (1564, 1584), True, 'import numpy as np\n'), ((3083, 3193), 'numpy.where', 'np.where', (['((params[:, row] == row_param) & (params[:, col] == col_param) & (params[:,\n plot] == plot_param))'], {}), '((params[:, row] == row_param) & (params[:, col] == col_param) & (\n params[:, plot] == plot_param))\n', (3091, 3193), True, 'import numpy as np\n'), ((5706, 5720), 'numpy.mean', 'np.mean', (['data_'], {}), '(data_)\n', (5713, 5720), True, 'import numpy as np\n'), ((5852, 5866), 'numpy.mean', 'np.mean', (['data_'], {}), '(data_)\n', (5859, 5866), True, 'import numpy as np\n'), ((7597, 7612), 'numpy.ones', 'np.ones', (['window'], {}), '(window)\n', (7604, 7612), True, 'import numpy as np\n'), ((5872, 5882), 'scipy.stats.sem', 'sem', (['data_'], {}), '(data_)\n', (5875, 5882), False, 'from scipy.stats import sem\n'), ((6077, 6086), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6084, 6086), True, 'import matplotlib.pyplot as plt\n'), ((6124, 6133), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6131, 6133), True, 'import matplotlib.pyplot as plt\n'), ((8028, 8037), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8035, 8037), True, 'import matplotlib.pyplot as plt\n'), ((8075, 8084), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8082, 8084), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import json
from traceback import format_exc
import warnings
_additional_convert = []
# Only add conversions if the modules exist.
# This way, we do not add unnecessary dependencies
try:
import numpy as np
def _np_convert(obj):
if isinstance(obj, np.ndarray):
return obj.tolist(), True
if isinstance(obj, np.generic):
return np.asscalar(obj), True
return None, False
_additional_convert.append(_np_convert)
except ImportError:
pass
class SchedyJSONEncoder(json.JSONEncoder):
def default(self, obj):
for convert in _additional_convert:
try:
val, converted = convert(obj)
if converted:
return val
except Exception:
warnings.warn(format_exc())
return super(SchedyJSONEncoder, self).default(obj)
| [
"numpy.asscalar",
"traceback.format_exc"
] | [((484, 500), 'numpy.asscalar', 'np.asscalar', (['obj'], {}), '(obj)\n', (495, 500), True, 'import numpy as np\n'), ((907, 919), 'traceback.format_exc', 'format_exc', ([], {}), '()\n', (917, 919), False, 'from traceback import format_exc\n')] |
import json
import numpy as np
from keras import optimizers
import keras_custom
import netw_models
def model_train(file_train, file_val, model_name='u_net_model',
act_func='elu', regularizer='dropout', dropoutrate=0.1,
weighted_loss=True, class_weights=[1, 1, 1],
batch_size=8, n_epochs=50,
save_model=False, save_weights=True):
'''
Function for model training
file_train: file that contains all training (training set) instances (.npz-file)
file_val: file that contains all validation (dev set) instances (.npz-file)
model_name: name of network model
act_func: activation function (see Keras documentation for valid inputs)
regularizer: 'dropout' or 'batchnorm'
dropoutrate: dropoutrate (float between 0.0 and 1.0),
not considered when 'batchnorm' is used
weighted_loss: True or False
class_weights: if 'weighted_loss' is set to True, a list with the class weights
must be provided, the length of the list must correspond with
the number of classes (ground truth), the position of a certain
class (weighting) must correspond with the ground truth
batch_size: batch size to be used for training (must be smaller
than number of training instances)
n_epochs: number of epochs the network is trained for (in this example,
the training would be stopped after 50 epochs)
save_model: True or False, saves the whole model (including training
history etc.), may consume a lot of space on disk
save_weights: True or False, saves the weights only
'''
# Load data files
with np.load(file_train + '.npz') as data:
train_X = data['data_X']
train_Y = data['data_Y']
with np.load(file_val + '.npz') as data:
val_X = data['data_X']
val_Y = data['data_Y']
_, height, width, channels = train_X.shape
n_classes = train_Y.shape[-1]
# Definition of various input parameters
model_args = (height, width, channels, n_classes)
model_kwargs = {'act_func': act_func,
'regularizer': regularizer,
'dropoutrate': dropoutrate}
if weighted_loss == True:
loss_function = keras_custom.custom_categorical_crossentropy(class_weights)
else:
loss_function = 'categorical_crossentropy'
# Build model
model = netw_models.u_net_model(*model_args, **model_kwargs)
# Compile model
model.compile(loss=loss_function,
optimizer=optimizers.RMSprop(lr=1e-4, rho=0.9),
metrics=['acc'])
# Model training
model_fit = model.fit(train_X, train_Y, batch_size, n_epochs,
validation_data=(val_X, val_Y), shuffle=True)
if save_model == True:
model.save(model_name + '.h5')
if save_weights == True:
model.save_weights(model_name + '_weights.h5')
with open(model_name + '_init.json', 'w') as file:
json.dump(model_kwargs, file)
# Plot averaged overall accuracy and loss
keras_custom.plot_acc_loss(model_fit.history['acc'],
model_fit.history['val_acc'],
model_fit.history['loss'],
model_fit.history['val_loss'],
model_name)
del train_X, train_Y, val_X, val_Y
if __name__ == "__main__":
import sys
model_train(*sys.argv[1:])
| [
"json.dump",
"numpy.load",
"keras_custom.plot_acc_loss",
"netw_models.u_net_model",
"keras_custom.custom_categorical_crossentropy",
"keras.optimizers.RMSprop"
] | [((2532, 2584), 'netw_models.u_net_model', 'netw_models.u_net_model', (['*model_args'], {}), '(*model_args, **model_kwargs)\n', (2555, 2584), False, 'import netw_models\n'), ((3237, 3398), 'keras_custom.plot_acc_loss', 'keras_custom.plot_acc_loss', (["model_fit.history['acc']", "model_fit.history['val_acc']", "model_fit.history['loss']", "model_fit.history['val_loss']", 'model_name'], {}), "(model_fit.history['acc'], model_fit.history[\n 'val_acc'], model_fit.history['loss'], model_fit.history['val_loss'],\n model_name)\n", (3263, 3398), False, 'import keras_custom\n'), ((1762, 1790), 'numpy.load', 'np.load', (["(file_train + '.npz')"], {}), "(file_train + '.npz')\n", (1769, 1790), True, 'import numpy as np\n'), ((1878, 1904), 'numpy.load', 'np.load', (["(file_val + '.npz')"], {}), "(file_val + '.npz')\n", (1885, 1904), True, 'import numpy as np\n'), ((2371, 2430), 'keras_custom.custom_categorical_crossentropy', 'keras_custom.custom_categorical_crossentropy', (['class_weights'], {}), '(class_weights)\n', (2415, 2430), False, 'import keras_custom\n'), ((2680, 2718), 'keras.optimizers.RMSprop', 'optimizers.RMSprop', ([], {'lr': '(0.0001)', 'rho': '(0.9)'}), '(lr=0.0001, rho=0.9)\n', (2698, 2718), False, 'from keras import optimizers\n'), ((3149, 3178), 'json.dump', 'json.dump', (['model_kwargs', 'file'], {}), '(model_kwargs, file)\n', (3158, 3178), False, 'import json\n')] |
import json
from collections import defaultdict
import numpy as np
from habitat import Env, logger
from habitat.config.default import Config
from habitat.core.agent import Agent
from habitat.sims.habitat_simulator.actions import HabitatSimActions
from tqdm import tqdm
from robo_vln_baselines.common.continuous_path_follower import (
ContinuousPathFollower,
track_waypoint
)
import shutil
import os
import random
from fastdtw import fastdtw
import habitat_sim
import gzip
from robo_vln_baselines.common.env_utils import construct_env
from habitat.utils.visualizations import maps
from habitat.utils.visualizations.utils import (
append_text_to_image,
images_to_video,
)
def draw_top_down_map(info, output_size):
return maps.colorize_draw_agent_and_fit_to_height(
info["top_down_map"], output_size
)
from habitat.utils.visualizations.utils import observations_to_image
def save_map(observations, info, images):
im = observations_to_image(observations,info )
top_down_map = draw_top_down_map(
info, im.shape[0]
)
output_im = im
output_im = append_text_to_image(
output_im, observations["instruction"]["text"]
)
images.append(output_im)
def euclidean_distance(position_a, position_b):
return np.linalg.norm(np.array(position_b) - np.array(position_a), ord=2)
def evaluate_agent(config: Config):
split = config.EVAL.SPLIT
config.defrost()
config.TASK_CONFIG.DATASET.SPLIT = split
config.TASK_CONFIG.TASK.NDTW.SPLIT = split
config.TASK_CONFIG.TASK.SDTW.SPLIT = split
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = True
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = -1
config.freeze()
logger.info(config)
env = construct_env(config)
gt_path = config.TASK_CONFIG.TASK.NDTW.GT_PATH.format(split=config.TASK_CONFIG.DATASET.SPLIT)
with gzip.open(gt_path, "rt") as f:
gt_json = json.load(f)
assert config.EVAL.NONLEARNING.AGENT in [
"RandomAgent",
"HandcraftedAgent",
], "EVAL.NONLEARNING.AGENT must be either RandomAgent or HandcraftedAgent."
if config.EVAL.NONLEARNING.AGENT == "RandomAgent":
agent = RandomContinuousAgent()
else:
agent = HandcraftedAgent()
obs = env.reset()
agent.reset()
steps =0
is_done = False
stats_episodes = {} # dict of dicts that stores stats per episode
ep_count =0
locations=[]
vel_control = habitat_sim.physics.VelocityControl()
vel_control.controlling_lin_vel = True
vel_control.lin_vel_is_local = True
vel_control.controlling_ang_vel = True
vel_control.ang_vel_is_local = True
images = []
IMAGE_DIR = os.path.join("examples", "images")
if not os.path.exists(IMAGE_DIR):
os.makedirs(IMAGE_DIR)
while (len(stats_episodes) < config.EVAL.EPISODE_COUNT):
current_episode = env.habitat_env.current_episode
actions = agent.act()
vel_control.linear_velocity = np.array([0, 0, -actions[0]])
vel_control.angular_velocity = np.array([0, actions[1], 0])
observations, _, done, info = env.step(vel_control)
episode_over, success = done
episode_success = success and (actions[0]<0.25)
is_done = episode_over or episode_success
steps+=1
locations.append(env.habitat_env._sim.get_agent_state().position.tolist())
save_map(observations, info, images)
dirname = os.path.join(
IMAGE_DIR, "icra_video", "%02d" % env.habitat_env.current_episode.episode_id
)
if os.path.exists(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
if is_done or steps==config.TASK_CONFIG.ENVIRONMENT.MAX_EPISODE_STEPS:
gt_locations = gt_json[str(current_episode.episode_id)]["locations"]
dtw_distance = fastdtw(locations, gt_locations, dist=euclidean_distance)[0]
nDTW = np.exp(-dtw_distance / (len(gt_locations) * config.TASK_CONFIG.TASK.NDTW.SUCCESS_DISTANCE))
locations=[]
is_done = False
ep_count+=1
steps=0
print("dones:", done)
stats_episodes[current_episode.episode_id] = info
stats_episodes[current_episode.episode_id]['ndtw'] = nDTW
print("len stats episodes",len(stats_episodes))
print("Current episode ID:", current_episode.episode_id)
print("Episode Completed:", ep_count)
print(" Episode done---------------------------------------------")
obs = env.reset()
print(stats_episodes[current_episode.episode_id])
time_step = 1.0/30
images_to_video(images, dirname, str(current_episode.episode_id), fps = int (1.0/time_step))
images = []
env.close()
aggregated_stats = {}
num_episodes = len(stats_episodes)
for stat_key in next(iter(stats_episodes.values())).keys():
aggregated_stats[stat_key] = (
sum([v[stat_key] for v in stats_episodes.values()]) / num_episodes
)
with open(f"stats_complete_{config.EVAL.NONLEARNING.AGENT}_{split}.json", "w") as f:
json.dump(aggregated_stats, f, indent=4)
class RandomContinuousAgent(Agent):
r"""Selects an action at each time step by sampling from the oracle action
distribution of the training set.
"""
def __init__(self):
self.vel =0
self.omega=0
def reset(self):
pass
def act(self):
self.vel =random.random() * 2.0
self.omega= (random.random() - 0.5) * 2.0
return (self.vel,self.omega)
class HandcraftedAgentContinuous(Agent):
r"""Agent picks a random heading and takes 37 forward actions (average
oracle path length) before calling stop.
"""
def __init__(self):
self.reset()
def reset(self):
# 9.27m avg oracle path length in Train.
# Fwd step size: 0.25m. 9.25m/0.25m = 37
self.forward_steps = 30
self.turns = np.random.randint(0, int(360 / 15) + 1)
def act(self, observations):
if self.turns > 0:
self.turns -= 1
return {"action": HabitatSimActions.TURN_RIGHT}
if self.forward_steps > 0:
self.forward_steps -= 1
return {"action": HabitatSimActions.MOVE_FORWARD}
return {"action": HabitatSimActions.STOP}
class HandcraftedAgent(Agent):
r"""Agent picks a random heading and takes 37 forward actions (average
oracle path length) before calling stop.
"""
def __init__(self):
self.reset()
def reset(self):
# 9.27m avg oracle path length in Train.
# Fwd step size: 0.25m. 9.25m/0.25m = 37
self.forward_steps = 37
self.turns = np.random.randint(0, int(360 / 15) + 1)
def act(self, observations):
if self.turns > 0:
self.turns -= 1
return {"action": HabitatSimActions.TURN_RIGHT}
if self.forward_steps > 0:
self.forward_steps -= 1
return {"action": HabitatSimActions.MOVE_FORWARD}
return {"action": HabitatSimActions.STOP}
| [
"json.dump",
"robo_vln_baselines.common.env_utils.construct_env",
"json.load",
"habitat.logger.info",
"os.path.join",
"gzip.open",
"os.makedirs",
"os.path.exists",
"habitat.utils.visualizations.maps.colorize_draw_agent_and_fit_to_height",
"habitat_sim.physics.VelocityControl",
"random.random",
... | [((747, 824), 'habitat.utils.visualizations.maps.colorize_draw_agent_and_fit_to_height', 'maps.colorize_draw_agent_and_fit_to_height', (["info['top_down_map']", 'output_size'], {}), "(info['top_down_map'], output_size)\n", (789, 824), False, 'from habitat.utils.visualizations import maps\n'), ((962, 1003), 'habitat.utils.visualizations.utils.observations_to_image', 'observations_to_image', (['observations', 'info'], {}), '(observations, info)\n', (983, 1003), False, 'from habitat.utils.visualizations.utils import observations_to_image\n'), ((1109, 1177), 'habitat.utils.visualizations.utils.append_text_to_image', 'append_text_to_image', (['output_im', "observations['instruction']['text']"], {}), "(output_im, observations['instruction']['text'])\n", (1129, 1177), False, 'from habitat.utils.visualizations.utils import append_text_to_image, images_to_video\n'), ((1747, 1766), 'habitat.logger.info', 'logger.info', (['config'], {}), '(config)\n', (1758, 1766), False, 'from habitat import Env, logger\n'), ((1778, 1799), 'robo_vln_baselines.common.env_utils.construct_env', 'construct_env', (['config'], {}), '(config)\n', (1791, 1799), False, 'from robo_vln_baselines.common.env_utils import construct_env\n'), ((2485, 2522), 'habitat_sim.physics.VelocityControl', 'habitat_sim.physics.VelocityControl', ([], {}), '()\n', (2520, 2522), False, 'import habitat_sim\n'), ((2721, 2755), 'os.path.join', 'os.path.join', (['"""examples"""', '"""images"""'], {}), "('examples', 'images')\n", (2733, 2755), False, 'import os\n'), ((1908, 1932), 'gzip.open', 'gzip.open', (['gt_path', '"""rt"""'], {}), "(gt_path, 'rt')\n", (1917, 1932), False, 'import gzip\n'), ((1957, 1969), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1966, 1969), False, 'import json\n'), ((2767, 2792), 'os.path.exists', 'os.path.exists', (['IMAGE_DIR'], {}), '(IMAGE_DIR)\n', (2781, 2792), False, 'import os\n'), ((2802, 2824), 'os.makedirs', 'os.makedirs', (['IMAGE_DIR'], {}), '(IMAGE_DIR)\n', (2813, 2824), False, 'import os\n'), ((3013, 3042), 'numpy.array', 'np.array', (['[0, 0, -actions[0]]'], {}), '([0, 0, -actions[0]])\n', (3021, 3042), True, 'import numpy as np\n'), ((3082, 3110), 'numpy.array', 'np.array', (['[0, actions[1], 0]'], {}), '([0, actions[1], 0])\n', (3090, 3110), True, 'import numpy as np\n'), ((3479, 3574), 'os.path.join', 'os.path.join', (['IMAGE_DIR', '"""icra_video"""', "('%02d' % env.habitat_env.current_episode.episode_id)"], {}), "(IMAGE_DIR, 'icra_video', '%02d' % env.habitat_env.\n current_episode.episode_id)\n", (3491, 3574), False, 'import os\n'), ((3603, 3626), 'os.path.exists', 'os.path.exists', (['dirname'], {}), '(dirname)\n', (3617, 3626), False, 'import os\n'), ((3671, 3691), 'os.makedirs', 'os.makedirs', (['dirname'], {}), '(dirname)\n', (3682, 3691), False, 'import os\n'), ((5199, 5239), 'json.dump', 'json.dump', (['aggregated_stats', 'f'], {'indent': '(4)'}), '(aggregated_stats, f, indent=4)\n', (5208, 5239), False, 'import json\n'), ((1297, 1317), 'numpy.array', 'np.array', (['position_b'], {}), '(position_b)\n', (1305, 1317), True, 'import numpy as np\n'), ((1320, 1340), 'numpy.array', 'np.array', (['position_a'], {}), '(position_a)\n', (1328, 1340), True, 'import numpy as np\n'), ((3640, 3662), 'shutil.rmtree', 'shutil.rmtree', (['dirname'], {}), '(dirname)\n', (3653, 3662), False, 'import shutil\n'), ((5542, 5557), 'random.random', 'random.random', ([], {}), '()\n', (5555, 5557), False, 'import random\n'), ((3880, 3937), 'fastdtw.fastdtw', 'fastdtw', (['locations', 'gt_locations'], {'dist': 'euclidean_distance'}), '(locations, gt_locations, dist=euclidean_distance)\n', (3887, 3937), False, 'from fastdtw import fastdtw\n'), ((5585, 5600), 'random.random', 'random.random', ([], {}), '()\n', (5598, 5600), False, 'import random\n')] |
#! /home/lyjslay/py3env/bin python
# coding=utf-8
#================================================================
# Copyright (C) 2019 * Ltd. All rights reserved.
#
# File name : detector.py
# Author : <NAME>
# E-Mail : <EMAIL>
# Description : Object detection based on deeplearning
#
#================================================================
import cv2
import time
import numpy as np
import tensorflow as tf
import tensorflow.contrib.tensorrt as trt
from multiprocessing import Process, Value, Array
from shared_ram import *
class Detector(Process):
'''Detector Subprocess
'''
def __init__(self, name, detecting, tracking, initracker, boundingbox, image_in, direction, ENEMY_COLOR):
super().__init__()
self.name = name # process name
# Defined in main process and shared in all processes
self.detecting = detecting
self.tracking = tracking
self.initracker = initracker
self.boundingbox = boundingbox
self.image_in = image_in
self.direction = direction
# inference concerned param
self.cls_dict = {1:'blue',2:'red',3:'front',4:'back',5:'left',6:'right',7:'tracking'}
self.pb_path = './model_data/robomaster_trt.pb'
self.enemy_color = ENEMY_COLOR
self.conf_th = 0.5
def run(self):
trt_graph = load_trt(self.pb_path)
tf_sess = create_tfsess(trt_graph)
while True:
img = self.image_in[:].copy()
box_list, cls_list, score_list = detect(img, tf_sess, self.conf_th)
if len(cls_lsit) != 0:
box, direc = select_target(box_list, cls_list, score_list, self.enemy_color)
self.direction = direc[1]
self.boundingbox[:] = [box[1], box[0], box[3]-box[1], box[2]-box[0]] # xmin,ymin,width,height
# first start init_tracker.
self.detecting.value = False
self.initracker.value = True
self.tracking.value = False
else:
self.boundingbox[:] = None
self.direction = 7
rospy.loginfo('no enemy detected')
continue
def load_trt(pb_path):
trt_graph_def = tf.GraphDef()
with tf.gfile.GFile(pb_path, 'rb') as pf:
trt_graph_def.ParseFromString(pf.read())
for node in trt_graph_def.node:
node.device = '/device:CPU:0'
with tf.Graph().as_default() as trt_graph:
tf.import_graph_def(trt_graph_def, name='')
return trt_graph
def load_pb(pb_path):
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(pb_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return detection_graph
def create_tfsess(trt_graph):
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
tf_config.allow_soft_placement=True
tf_config.log_device_placement=True
tf_sess = tf.Session(config=tf_config, graph=trt_graph)
return tf_sess
def preprocess(src, shape=None, to_rgb=True):
img = src.astype(np.uint8)
#resize img for inputs
if shape:
img = cv2.resize(img, shape)
if to_rgb:
# BGR2RGB
img = img[..., ::-1]
return img
def postprocess(img, boxes, scores, classes, conf_thre):
'''process the output
'''
h, w, _ = img.shape
out_box = boxes[0] * np.array([h, w, h, w])
out_box = out_box.astype(np.int32)
out_conf = scores[0]
out_cls = classes[0].astype(np.int32)
mask = np.where(out_conf >= conf_thre)
return out_box[mask],out_cls[mask],out_conf[mask]
def select_target(box_list, cls_list, score_list, ENEMY_COLOR):
'''select enemy bbox and get enemy direction
'''
for box, cls, score in zip(box_list, cls_list, score_list):
tmp_armor_score = 0
if cls == ENEMY_COLOR and score > tmp_armor_score:
tmp_armor_score = score
armor_box = box
for box, cls, score in zip(box_list, cls_list, score_list):
tmp_direc_score = 0
if cls >=3 and score > tmp_direc_score:
if box[0] < armor_box[0] and box[2] > armor_box[2]:
direction = [box, cls]
return armor_box, direction
def detect(origimg, tf_sess, conf):
img = preprocess(origimg, (300, 300))
boxes_out, scores_out, classes_out = tf_sess.run(
[tf_boxes, tf_scores, tf_classes],
feed_dict={image_tensor: img[None, ...]})
# process outputs
box, cls, score = postprocess(origimg, boxes_out, scores_out, classes_out, conf)
return (box, cls, score)
#单独测试detector
if __name__ == '__main__':
ENEMY_COLOR = 2 #blue
MODEL_PATH = './model_data/robomaster_trt.pb'
cls_dict = {1:'red',2:'blue',3:'front',4:'back',5:'left',6:'right',7:'tracking'}
color_dict = {1:(255,0,0),2:(0,255,0),3:(0,0,255),4:(255,255,0),5:(255,0,255),6:(0,255,255),7:(255,255,255)}
detection_graph = load_pb(MODEL_PATH)
tf_sess = create_tfsess(detection_graph)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
tf_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
tf_scores = detection_graph.get_tensor_by_name('detection_scores:0')
tf_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
video = cv2.VideoCapture('test.avi')
while(video.isOpened()):
ret, frame = video.read()
start = time.time()
box,cls,score = detect(frame,tf_sess,0.2)
end = time.time()
print(box,cls,score)
infer_time = round((end-start)*1000,2)
text = 'inference time: '+str(infer_time)+' ms'
if len(box) != 0:
for b,c,s in zip(box,cls,score):
box_text = str(cls_dict[c])+' '+str(round(s,3))
color = color_dict[c]
cv2.rectangle(frame,(b[1],b[0]), (b[3],b[2]), color, 2)
cv2.putText(frame, box_text, (b[1], b[0]-15), cv2.FONT_HERSHEY_PLAIN, 1.4, color,1, cv2.LINE_AA)
cv2.putText(frame, text, (11, 20), cv2.FONT_HERSHEY_PLAIN, 1.4, (32,32,32),4, cv2.LINE_AA)
cv2.putText(frame, text, (10, 20), cv2.FONT_HERSHEY_PLAIN, 1.4, (240,240,240),1, cv2.LINE_AA)
cv2.imshow('tensorrt detector', frame)
if cv2.waitKey(1) == ord('q'):
break
video.release()
cv2.destroyAllWindows()
| [
"cv2.putText",
"cv2.waitKey",
"tensorflow.Session",
"cv2.imshow",
"time.time",
"cv2.VideoCapture",
"tensorflow.ConfigProto",
"numpy.where",
"tensorflow.gfile.GFile",
"numpy.array",
"tensorflow.Graph",
"cv2.rectangle",
"tensorflow.import_graph_def",
"tensorflow.GraphDef",
"cv2.destroyAllW... | [((1982, 1995), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (1993, 1995), True, 'import tensorflow as tf\n'), ((2297, 2307), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2305, 2307), True, 'import tensorflow as tf\n'), ((2617, 2633), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (2631, 2633), True, 'import tensorflow as tf\n'), ((2762, 2807), 'tensorflow.Session', 'tf.Session', ([], {'config': 'tf_config', 'graph': 'trt_graph'}), '(config=tf_config, graph=trt_graph)\n', (2772, 2807), True, 'import tensorflow as tf\n'), ((3281, 3312), 'numpy.where', 'np.where', (['(out_conf >= conf_thre)'], {}), '(out_conf >= conf_thre)\n', (3289, 3312), True, 'import numpy as np\n'), ((4993, 5021), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""test.avi"""'], {}), "('test.avi')\n", (5009, 5021), False, 'import cv2\n'), ((5863, 5886), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5884, 5886), False, 'import cv2\n'), ((2002, 2031), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['pb_path', '"""rb"""'], {}), "(pb_path, 'rb')\n", (2016, 2031), True, 'import tensorflow as tf\n'), ((2193, 2236), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['trt_graph_def'], {'name': '""""""'}), "(trt_graph_def, name='')\n", (2212, 2236), True, 'import tensorflow as tf\n'), ((2361, 2374), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (2372, 2374), True, 'import tensorflow as tf\n'), ((2942, 2964), 'cv2.resize', 'cv2.resize', (['img', 'shape'], {}), '(img, shape)\n', (2952, 2964), False, 'import cv2\n'), ((3153, 3175), 'numpy.array', 'np.array', (['[h, w, h, w]'], {}), '([h, w, h, w])\n', (3161, 3175), True, 'import numpy as np\n'), ((5086, 5097), 'time.time', 'time.time', ([], {}), '()\n', (5095, 5097), False, 'import time\n'), ((5150, 5161), 'time.time', 'time.time', ([], {}), '()\n', (5159, 5161), False, 'import time\n'), ((5764, 5802), 'cv2.imshow', 'cv2.imshow', (['"""tensorrt detector"""', 'frame'], {}), "('tensorrt detector', frame)\n", (5774, 5802), False, 'import cv2\n'), ((2382, 2411), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['pb_path', '"""rb"""'], {}), "(pb_path, 'rb')\n", (2396, 2411), True, 'import tensorflow as tf\n'), ((2506, 2548), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (2525, 2548), True, 'import tensorflow as tf\n'), ((5574, 5672), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(11, 20)', 'cv2.FONT_HERSHEY_PLAIN', '(1.4)', '(32, 32, 32)', '(4)', 'cv2.LINE_AA'], {}), '(frame, text, (11, 20), cv2.FONT_HERSHEY_PLAIN, 1.4, (32, 32, 32\n ), 4, cv2.LINE_AA)\n', (5585, 5672), False, 'import cv2\n'), ((5668, 5769), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(10, 20)', 'cv2.FONT_HERSHEY_PLAIN', '(1.4)', '(240, 240, 240)', '(1)', 'cv2.LINE_AA'], {}), '(frame, text, (10, 20), cv2.FONT_HERSHEY_PLAIN, 1.4, (240, 240, \n 240), 1, cv2.LINE_AA)\n', (5679, 5769), False, 'import cv2\n'), ((5808, 5822), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5819, 5822), False, 'import cv2\n'), ((2153, 2163), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2161, 2163), True, 'import tensorflow as tf\n'), ((5414, 5472), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(b[1], b[0])', '(b[3], b[2])', 'color', '(2)'], {}), '(frame, (b[1], b[0]), (b[3], b[2]), color, 2)\n', (5427, 5472), False, 'import cv2\n'), ((5474, 5577), 'cv2.putText', 'cv2.putText', (['frame', 'box_text', '(b[1], b[0] - 15)', 'cv2.FONT_HERSHEY_PLAIN', '(1.4)', 'color', '(1)', 'cv2.LINE_AA'], {}), '(frame, box_text, (b[1], b[0] - 15), cv2.FONT_HERSHEY_PLAIN, 1.4,\n color, 1, cv2.LINE_AA)\n', (5485, 5577), False, 'import cv2\n')] |
# BSD 2-Clause License
# Copyright (c) 2022, <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy
def reconstruct_interface_log_msg(msg, Nd):
"""Reconstruct the interface log from a std_msgs/Float64MultiArray message
Syntax
------
t, h = reconstruct_interface_log_msg(msg)
Input
-----
msg [std_msgs/Float64Array]
ROS message recieved from an operator_interface_logger node.
Nd [int]
Number of dimensions for the operator signal.
Output
------
t [numpy.ndarray]
An N-array of time stamps.
h [numpy.ndarray]
An Nd-by-N array containing N operator signals.
"""
Nd1 = Nd+1 # operator signals and time
data = numpy.array(msg.data).reshape(Nd1, len(msg.data)//(Nd1), order='F')
t = data[0, :]
h = data[1:, :]
return t, h
| [
"numpy.array"
] | [((1907, 1928), 'numpy.array', 'numpy.array', (['msg.data'], {}), '(msg.data)\n', (1918, 1928), False, 'import numpy\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 27 18:08:30 2020
@author: mohammad
"""
import numpy as np
from tensorflow.keras.utils import to_categorical
import tensorflow.keras
from tensorflow.keras.optimizers import Adam
import os
from tensorflow.keras.callbacks import ModelCheckpoint
from sklearn.model_selection import StratifiedKFold
import functions
original_path = os.path.realpath('') + '/UCRArchive_2018/'
list_of_folder = os.listdir(original_path)
list_of_folder = sorted(list_of_folder)
#be aware, whenever you run the code, the previous results will be deleted.
try:
os.remove('results_ResNet')
print("results_ResNet removed successfully")
except:
print("results_ResNet can not be removed")
#=================================================================================================
for counter, folder_name in enumerate(list_of_folder):
number_of_mul = 5
#=================================================================================================
#Reading the train samples
train_samples, train_labels, num_classes = functions.utils.read_train_data(functions.utils, original_path, folder_name)
#=================================================================================================
#augmentation
sample_label_list = []
for (h, k) in zip(train_samples, train_labels):
sample_label_list.append([h, k])
augmented_dataset = functions.utils.aug_dataset(functions.utils, train_samples, train_labels, sample_label_list, number_of_mul = number_of_mul)
augmented_dataset = np.array(augmented_dataset)
train_labels = np.array(train_labels)
#=================================================================================================
#splitting train data
skf = StratifiedKFold(n_splits=4, shuffle=True)
for train_index, val_index in skf.split(augmented_dataset, train_labels):
X_train, X_val = augmented_dataset[train_index], augmented_dataset[val_index]
y_train, y_val = train_labels[train_index], train_labels[val_index]
augmented_dataset = np.array(X_train)
train_labels = np.array(y_train)
X_val = np.array(X_val)
y_val = np.array(y_val)
#=================================================================================================
#Reading test samples
test_samples, test_labels = functions.utils.read_test_data(functions.utils, original_path, folder_name)
#=================================================================================================
#create and training network
_, shape2 = np.shape(augmented_dataset)
augmented_dataset = np.reshape(augmented_dataset, (-1, shape2 , 1))
test_samples = np.reshape(test_samples, (-1, shape2 , 1))
input_shape = (shape2, 1)
X_val = np.reshape(X_val, (-1, shape2 , 1))
model = functions.utils.build_model(input_shape = input_shape, num_classes = num_classes)
model.compile(loss='categorical_crossentropy', optimizer=Adam(),
metrics=['acc'])
one_hot_encode = to_categorical(train_labels)
one_hot_encode_test = to_categorical(test_labels)
one_hot_encode_val = to_categorical(y_val)
reduce_lr = tensorflow.keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.5,
patience=50, min_lr=0.0001)
checkpoint1 = ModelCheckpoint('val_loss.hdf5', save_best_only=True, save_weights_only=True, monitor='val_loss', mode='min')
checkpoint2 = ModelCheckpoint('train_loss.hdf5', save_best_only=True, save_weights_only=True, monitor='loss', mode='min')
history = model.fit(augmented_dataset, one_hot_encode, epochs=10, batch_size=64, validation_data = (X_val ,one_hot_encode_val), callbacks = [reduce_lr, checkpoint1, checkpoint2])
#=================================================================================================
#saving the results
model.load_weights('val_loss.hdf5')
_, test_acc_min_val_loss = model.evaluate(test_samples, one_hot_encode_test)
model.load_weights('train_loss.hdf5')
_, test_acc_min_train_loss = model.evaluate(test_samples, one_hot_encode_test)
with open("results_ResNet", "a+") as f:
f.write("%d, %s, %f, %f\n" % (counter, folder_name, test_acc_min_val_loss, test_acc_min_train_loss))
| [
"os.remove",
"tensorflow.keras.utils.to_categorical",
"functions.utils.aug_dataset",
"os.path.realpath",
"functions.utils.build_model",
"tensorflow.keras.callbacks.ModelCheckpoint",
"numpy.shape",
"numpy.array",
"sklearn.model_selection.StratifiedKFold",
"functions.utils.read_test_data",
"numpy.... | [((460, 485), 'os.listdir', 'os.listdir', (['original_path'], {}), '(original_path)\n', (470, 485), False, 'import os\n'), ((400, 420), 'os.path.realpath', 'os.path.realpath', (['""""""'], {}), "('')\n", (416, 420), False, 'import os\n'), ((610, 637), 'os.remove', 'os.remove', (['"""results_ResNet"""'], {}), "('results_ResNet')\n", (619, 637), False, 'import os\n'), ((1089, 1165), 'functions.utils.read_train_data', 'functions.utils.read_train_data', (['functions.utils', 'original_path', 'folder_name'], {}), '(functions.utils, original_path, folder_name)\n', (1120, 1165), False, 'import functions\n'), ((1413, 1538), 'functions.utils.aug_dataset', 'functions.utils.aug_dataset', (['functions.utils', 'train_samples', 'train_labels', 'sample_label_list'], {'number_of_mul': 'number_of_mul'}), '(functions.utils, train_samples, train_labels,\n sample_label_list, number_of_mul=number_of_mul)\n', (1440, 1538), False, 'import functions\n'), ((1565, 1592), 'numpy.array', 'np.array', (['augmented_dataset'], {}), '(augmented_dataset)\n', (1573, 1592), True, 'import numpy as np\n'), ((1611, 1633), 'numpy.array', 'np.array', (['train_labels'], {}), '(train_labels)\n', (1619, 1633), True, 'import numpy as np\n'), ((1769, 1810), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(4)', 'shuffle': '(True)'}), '(n_splits=4, shuffle=True)\n', (1784, 1810), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((2060, 2077), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (2068, 2077), True, 'import numpy as np\n'), ((2096, 2113), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (2104, 2113), True, 'import numpy as np\n'), ((2127, 2142), 'numpy.array', 'np.array', (['X_val'], {}), '(X_val)\n', (2135, 2142), True, 'import numpy as np\n'), ((2152, 2167), 'numpy.array', 'np.array', (['y_val'], {}), '(y_val)\n', (2160, 2167), True, 'import numpy as np\n'), ((2327, 2402), 'functions.utils.read_test_data', 'functions.utils.read_test_data', (['functions.utils', 'original_path', 'folder_name'], {}), '(functions.utils, original_path, folder_name)\n', (2357, 2402), False, 'import functions\n'), ((2553, 2580), 'numpy.shape', 'np.shape', (['augmented_dataset'], {}), '(augmented_dataset)\n', (2561, 2580), True, 'import numpy as np\n'), ((2603, 2649), 'numpy.reshape', 'np.reshape', (['augmented_dataset', '(-1, shape2, 1)'], {}), '(augmented_dataset, (-1, shape2, 1))\n', (2613, 2649), True, 'import numpy as np\n'), ((2667, 2708), 'numpy.reshape', 'np.reshape', (['test_samples', '(-1, shape2, 1)'], {}), '(test_samples, (-1, shape2, 1))\n', (2677, 2708), True, 'import numpy as np\n'), ((2746, 2780), 'numpy.reshape', 'np.reshape', (['X_val', '(-1, shape2, 1)'], {}), '(X_val, (-1, shape2, 1))\n', (2756, 2780), True, 'import numpy as np\n'), ((2792, 2869), 'functions.utils.build_model', 'functions.utils.build_model', ([], {'input_shape': 'input_shape', 'num_classes': 'num_classes'}), '(input_shape=input_shape, num_classes=num_classes)\n', (2819, 2869), False, 'import functions\n'), ((2999, 3027), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['train_labels'], {}), '(train_labels)\n', (3013, 3027), False, 'from tensorflow.keras.utils import to_categorical\n'), ((3051, 3078), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['test_labels'], {}), '(test_labels)\n', (3065, 3078), False, 'from tensorflow.keras.utils import to_categorical\n'), ((3101, 3122), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['y_val'], {}), '(y_val)\n', (3115, 3122), False, 'from tensorflow.keras.utils import to_categorical\n'), ((3281, 3395), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""val_loss.hdf5"""'], {'save_best_only': '(True)', 'save_weights_only': '(True)', 'monitor': '"""val_loss"""', 'mode': '"""min"""'}), "('val_loss.hdf5', save_best_only=True, save_weights_only=\n True, monitor='val_loss', mode='min')\n", (3296, 3395), False, 'from tensorflow.keras.callbacks import ModelCheckpoint\n'), ((3406, 3518), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""train_loss.hdf5"""'], {'save_best_only': '(True)', 'save_weights_only': '(True)', 'monitor': '"""loss"""', 'mode': '"""min"""'}), "('train_loss.hdf5', save_best_only=True, save_weights_only=\n True, monitor='loss', mode='min')\n", (3421, 3518), False, 'from tensorflow.keras.callbacks import ModelCheckpoint\n'), ((2933, 2939), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {}), '()\n', (2937, 2939), False, 'from tensorflow.keras.optimizers import Adam\n')] |
import pandas as pd
import numpy as np
from keras import Input, layers, regularizers, losses
from keras.models import Model
from keras.optimizers import SGD, Adam
from keras.callbacks import EarlyStopping
import STRING
from sklearn.feature_selection import VarianceThreshold
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plot
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import (confusion_matrix, precision_recall_curve, recall_score, precision_score, fbeta_score)
import seaborn as sns
class DeepAutoencoder(object):
def __init__(self, n_cols, activation, prob_dropout, dimension_node, encoding_dim=None, final_activation='relu'):
"""
:param n_cols: Number of cols of datastet
:param activation: Activation function
:param prob_dropout: proportion to dropout
:param dimension_node: Number of depth of encoder/decoder
"""
self.n_cols = n_cols
self.activation = activation
self.prob_dropout = prob_dropout
self.dimension_node = dimension_node
self.encoding_dim = encoding_dim
self.final_activation = final_activation
def encoded(self, input_layer, sparsity_const=10e-5, change_encode_name=None):
"""
Generate the encode layers
:param input_layer: The input layer
:param sparsity_const: Restrict some nodes and not all (as PCA), using regularization strategy
:param change_encode_name: Define a different layer name
:return: encode layer
"""
dim = self.dimension_node
cols = self.n_cols
node_size = int(cols / dim)
nodes_range = range(cols - node_size, node_size - 1, -node_size)
for nodes in nodes_range:
if (nodes == nodes_range[-1]) & (self.encoding_dim is not None):
last_node = self.encoding_dim
else:
last_node = nodes
if nodes == nodes_range[-1]:
last_activation = self.final_activation
else:
last_activation = self.activation
layer_name = 'Encoded_model_' + str(last_node)
if change_encode_name is not None:
layer_name = 'Encoded_model_' + change_encode_name + '_' + str(last_node)
if sparsity_const is not None:
input_layer = layers.Dense(last_node, activation=last_activation, name=layer_name, activity_regularizer=
regularizers.l1(sparsity_const))(input_layer)
else:
input_layer = layers.Dense(last_node, activation=last_activation, name=layer_name)(input_layer)
if self.prob_dropout is not None:
input_layer = layers.Dropout(self.prob_dropout)(input_layer)
encode_layer = input_layer
return encode_layer, last_node
def decoded(self, encode_layer, change_decode_name=None):
"""
Generate the decoded model
:param encode_layer: The input layer to the decode
:param change_decode_name: Define a different layer name
:return: decoded layer
"""
dim = self.dimension_node
cols = self.n_cols
node_size = int(cols / dim)
nodes_range = range(node_size * 2, cols, node_size)
name = 'Decoded_Model'
for nodes in nodes_range:
layer_name = 'Decoded_model_' + str(nodes)
if change_decode_name is not None:
layer_name = 'Encoded_model_' + change_decode_name + '_' + str(nodes)
name = 'Decoded_Model_' + change_decode_name
encode_layer = layers.Dense(nodes, activation=self.activation, name=layer_name)(encode_layer)
encode_layer = layers.Dense(self.n_cols, activation=self.final_activation, name=name)(encode_layer)
decode_layer = encode_layer
return decode_layer
def autoencoder(self, input_tensor):
"""
Generate the autoencoder model
:param input_tensor: the original input tensor
:return: autoencoder model
"""
encode_layer, _ = DeepAutoencoder.encoded(self, input_tensor)
decode_layer = DeepAutoencoder.decoded(self, encode_layer)
autoencoder = Model(input_tensor, decode_layer)
print(autoencoder.summary())
return autoencoder
def fit(self, x, x_valid, learning_rate=0.001, loss_function=losses.mean_squared_error, epochs=500,
batch_size=500, verbose=True, callback_list=[]):
input_tensor = Input(shape=(self.n_cols,), name='Input')
autoencoder = DeepAutoencoder.autoencoder(self, input_tensor)
optimizer = Adam(lr=learning_rate)
autoencoder.compile(optimizer=optimizer, loss=loss_function)
history = autoencoder.fit(x, x, epochs=epochs, batch_size=batch_size, verbose=verbose, callbacks=callback_list,
shuffle=True, validation_data=x_valid).history
plot.plot(history['loss'])
plot.plot(history['val_loss'])
plot.title('model loss')
plot.ylabel('loss')
plot.xlabel('epoch')
plot.legend(['train', 'valid'], loc='upper right')
plot.show()
if __name__ == '__main__':
import os
seed = 42
np.random.seed(seed)
os.chdir(STRING.path_db)
# LOAD FILE
normal = pd.read_csv('normal.csv', sep=';', encoding='latin1')
anormal = pd.read_csv('anormal.csv', sep=';', encoding='latin1')
# NORMALIZE
normal['CONTROL'] = pd.Series(0, index=normal.index)
anormal['CONTROL'] = pd.Series(1, index=anormal.index)
normalize = pd.concat([normal, anormal], axis=0)
for i in normalize.drop(['oferta_id', 'target', 'CONTROL'], axis=1).columns.values.tolist():
normalize[i] = normalize[i].map(float)
normalize[i] = StandardScaler().fit_transform(normalize[i].values.reshape(-1, 1))
normal = normalize[normalize['CONTROL'] == 0]
anormal = normalize[normalize['CONTROL'] == 1]
del normal['CONTROL']
del anormal['CONTROL']
# VARIANCE REDUCTION
selection = VarianceThreshold(threshold=0.0)
selection.fit(normal.drop(['oferta_id', 'target'], axis=1))
features = selection.get_support(indices=True)
features = list(normal.columns[features]) + ['oferta_id', 'target']
normal = normal[features]
test_anormal = anormal[features]
train, valid, _, _ = train_test_split(normal, normal, test_size=0.30, random_state=42)
valid, test_normal, _, _ = train_test_split(valid, valid, test_size=len(anormal.index), random_state=42)
valid = valid.drop(['oferta_id', 'target'], axis=1)
# INPUT COLS
cols = train.drop(['oferta_id', 'target'], axis=1).shape[1]
ae = DeepAutoencoder(n_cols=cols, activation='tanh', prob_dropout=0.2, dimension_node=4, encoding_dim=14)
early_stopping_monitor = EarlyStopping(patience=2)
ae.fit(train.drop(['oferta_id', 'target'], axis=1), X_valid=[valid, valid], callback_list=[early_stopping_monitor],
batch_size=200, epochs=1000,
learning_rate=0.001)
# After watching the plot where train and valid have to converge (the reconstruction error)
# we look if it is enough low
input_tensor = Input(shape=(cols,))
autoencoder = ae.autoencoder(input_tensor)
prediction_true = autoencoder.predict(valid)
prediction_test = autoencoder.predict(test_normal.drop(['oferta_id', 'target'], axis=1))
prediction_anormal = autoencoder.predict(test_anormal.drop(['oferta_id', 'target'], axis=1))
mse_true = np.mean(np.power(valid - prediction_true, 2), axis=1)
mse_test = np.mean(np.power(test_normal.drop(['oferta_id', 'target'], axis=1) - prediction_test, 2), axis=1)
mse_anormal = np.mean(np.power(test_anormal.drop(['oferta_id', 'target'], axis=1) - prediction_anormal, 2), axis=1)
mse_true = pd.DataFrame(mse_true, columns=['reconstruction_error'])
mse_test = pd.DataFrame(mse_test, columns=['reconstruction_error'])
mse_anormal = pd.DataFrame(mse_anormal, columns=['reconstruction_error'])
mse_true['target'] = pd.Series(0, index=mse_true.index)
mse_test['target'] = pd.Series(0, index=mse_test.index)
mse_anormal['target'] = pd.Series(1, index=mse_anormal.index)
error_df = pd.concat([mse_test, mse_anormal], axis=0)
print(error_df.describe())
# PLOT ERROR WITHOUT ANOMALIES
fig = plot.figure()
ax = fig.add_subplot(111)
normal_error_df = error_df[(error_df['target'] == 0) & (error_df['reconstruction_error'] < 10)]
_ = ax.hist(normal_error_df.reconstruction_error.values, bins=10)
plot.show()
plot.close()
# PLOT ERROR WITH ANOMALIES
fig = plot.figure()
ax = fig.add_subplot(111)
fraud_error_df = error_df[error_df['target'] == 1]
_ = ax.hist(fraud_error_df.reconstruction_error.values, bins=10)
plot.show()
plot.close()
# RECALL-PRECISION
precision, recall, th = precision_recall_curve(error_df.target, error_df.reconstruction_error)
plot.plot(recall, precision, 'b', label='Precision-Recall curve')
plot.title('Recall vs Precision')
plot.xlabel('Recall')
plot.ylabel('Precision')
plot.show()
plot.plot(th, precision[1:], 'b', label='Threshold-Precision curve')
plot.plot(th, recall[1:], 'g', label='Threshold-Recall curve')
plot.title('Precision-Recall for different threshold values')
plot.xlabel('Threshold')
plot.ylabel('Precision-Recall')
plot.legend(['precision', 'recall'], loc='upper right')
plot.show()
# OUTLIER DETECTION
# We define a threshold for the reconstruction error. It will be based on the error plot
thresholds = np.linspace(0.1, 10.0, 200)
scores = []
for threshold in thresholds:
y_hat = [1 if e > threshold else 0 for e in error_df.reconstruction_error.values]
scores.append([
recall_score(y_pred=y_hat, y_true=error_df.target.values),
precision_score(y_pred=y_hat, y_true=error_df.target.values),
fbeta_score(y_pred=y_hat, y_true=error_df.target.values,
beta=1)])
scores = np.array(scores)
threshold = thresholds[scores[:, 2].argmax()]
print('final Threshold ', threshold)
predicted = [1 if e > threshold else 0 for e in error_df.reconstruction_error.values]
print('PRECISION ', precision_score(error_df.target.values, predicted))
print('RECALL ', recall_score(error_df.target.values, predicted))
print('FBSCORE ', fbeta_score(error_df.target.values, predicted, beta=1))
groups = error_df.groupby('target')
fig, ax = plot.subplots()
for name, group in groups:
ax.plot(group.index, group.reconstruction_error, marker='o', ms=3.5, linestyle='',
label="Anomaly" if name == 1 else "Normal")
ax.hlines(threshold, ax.get_xlim()[0], ax.get_xlim()[1], colors="r", zorder=100, label='Threshold')
ax.legend()
plot.title("Reconstruction error for different classes")
plot.ylabel("Reconstruction error")
plot.xlabel("Data point index")
plot.show()
conf_matrix = confusion_matrix(error_df.target, predicted)
plot.figure(figsize=(12, 12))
sns.heatmap(conf_matrix, xticklabels=['Normal', 'Anomaly'], yticklabels=['Normal', 'Anomaly'], annot=True, fmt="d");
plot.title("Confusion matrix")
plot.ylabel('True class')
plot.xlabel('Predicted class')
plot.show()
| [
"matplotlib.pyplot.title",
"numpy.random.seed",
"seaborn.heatmap",
"sklearn.preprocessing.StandardScaler",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"keras.models.Model",
"matplotlib.pyplot.figure",
"keras.regularizers.l1",
"os.chdir",
"pandas.DataFrame",
"sklearn.metrics.... | [((5395, 5415), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (5409, 5415), True, 'import numpy as np\n'), ((5421, 5445), 'os.chdir', 'os.chdir', (['STRING.path_db'], {}), '(STRING.path_db)\n', (5429, 5445), False, 'import os\n'), ((5479, 5532), 'pandas.read_csv', 'pd.read_csv', (['"""normal.csv"""'], {'sep': '""";"""', 'encoding': '"""latin1"""'}), "('normal.csv', sep=';', encoding='latin1')\n", (5490, 5532), True, 'import pandas as pd\n'), ((5548, 5602), 'pandas.read_csv', 'pd.read_csv', (['"""anormal.csv"""'], {'sep': '""";"""', 'encoding': '"""latin1"""'}), "('anormal.csv', sep=';', encoding='latin1')\n", (5559, 5602), True, 'import pandas as pd\n'), ((5647, 5679), 'pandas.Series', 'pd.Series', (['(0)'], {'index': 'normal.index'}), '(0, index=normal.index)\n', (5656, 5679), True, 'import pandas as pd\n'), ((5706, 5739), 'pandas.Series', 'pd.Series', (['(1)'], {'index': 'anormal.index'}), '(1, index=anormal.index)\n', (5715, 5739), True, 'import pandas as pd\n'), ((5759, 5795), 'pandas.concat', 'pd.concat', (['[normal, anormal]'], {'axis': '(0)'}), '([normal, anormal], axis=0)\n', (5768, 5795), True, 'import pandas as pd\n'), ((6240, 6272), 'sklearn.feature_selection.VarianceThreshold', 'VarianceThreshold', ([], {'threshold': '(0.0)'}), '(threshold=0.0)\n', (6257, 6272), False, 'from sklearn.feature_selection import VarianceThreshold\n'), ((6562, 6626), 'sklearn.model_selection.train_test_split', 'train_test_split', (['normal', 'normal'], {'test_size': '(0.3)', 'random_state': '(42)'}), '(normal, normal, test_size=0.3, random_state=42)\n', (6578, 6626), False, 'from sklearn.model_selection import train_test_split\n'), ((7023, 7048), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': '(2)'}), '(patience=2)\n', (7036, 7048), False, 'from keras.callbacks import EarlyStopping\n'), ((7398, 7418), 'keras.Input', 'Input', ([], {'shape': '(cols,)'}), '(shape=(cols,))\n', (7403, 7418), False, 'from keras import Input, layers, regularizers, losses\n'), ((8036, 8092), 'pandas.DataFrame', 'pd.DataFrame', (['mse_true'], {'columns': "['reconstruction_error']"}), "(mse_true, columns=['reconstruction_error'])\n", (8048, 8092), True, 'import pandas as pd\n'), ((8109, 8165), 'pandas.DataFrame', 'pd.DataFrame', (['mse_test'], {'columns': "['reconstruction_error']"}), "(mse_test, columns=['reconstruction_error'])\n", (8121, 8165), True, 'import pandas as pd\n'), ((8185, 8244), 'pandas.DataFrame', 'pd.DataFrame', (['mse_anormal'], {'columns': "['reconstruction_error']"}), "(mse_anormal, columns=['reconstruction_error'])\n", (8197, 8244), True, 'import pandas as pd\n'), ((8273, 8307), 'pandas.Series', 'pd.Series', (['(0)'], {'index': 'mse_true.index'}), '(0, index=mse_true.index)\n', (8282, 8307), True, 'import pandas as pd\n'), ((8334, 8368), 'pandas.Series', 'pd.Series', (['(0)'], {'index': 'mse_test.index'}), '(0, index=mse_test.index)\n', (8343, 8368), True, 'import pandas as pd\n'), ((8398, 8435), 'pandas.Series', 'pd.Series', (['(1)'], {'index': 'mse_anormal.index'}), '(1, index=mse_anormal.index)\n', (8407, 8435), True, 'import pandas as pd\n'), ((8452, 8494), 'pandas.concat', 'pd.concat', (['[mse_test, mse_anormal]'], {'axis': '(0)'}), '([mse_test, mse_anormal], axis=0)\n', (8461, 8494), True, 'import pandas as pd\n'), ((8576, 8589), 'matplotlib.pyplot.figure', 'plot.figure', ([], {}), '()\n', (8587, 8589), True, 'import matplotlib.pyplot as plot\n'), ((8798, 8809), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (8807, 8809), True, 'import matplotlib.pyplot as plot\n'), ((8815, 8827), 'matplotlib.pyplot.close', 'plot.close', ([], {}), '()\n', (8825, 8827), True, 'import matplotlib.pyplot as plot\n'), ((8874, 8887), 'matplotlib.pyplot.figure', 'plot.figure', ([], {}), '()\n', (8885, 8887), True, 'import matplotlib.pyplot as plot\n'), ((9050, 9061), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (9059, 9061), True, 'import matplotlib.pyplot as plot\n'), ((9067, 9079), 'matplotlib.pyplot.close', 'plot.close', ([], {}), '()\n', (9077, 9079), True, 'import matplotlib.pyplot as plot\n'), ((9135, 9205), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['error_df.target', 'error_df.reconstruction_error'], {}), '(error_df.target, error_df.reconstruction_error)\n', (9157, 9205), False, 'from sklearn.metrics import confusion_matrix, precision_recall_curve, recall_score, precision_score, fbeta_score\n'), ((9211, 9276), 'matplotlib.pyplot.plot', 'plot.plot', (['recall', 'precision', '"""b"""'], {'label': '"""Precision-Recall curve"""'}), "(recall, precision, 'b', label='Precision-Recall curve')\n", (9220, 9276), True, 'import matplotlib.pyplot as plot\n'), ((9282, 9315), 'matplotlib.pyplot.title', 'plot.title', (['"""Recall vs Precision"""'], {}), "('Recall vs Precision')\n", (9292, 9315), True, 'import matplotlib.pyplot as plot\n'), ((9321, 9342), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (9332, 9342), True, 'import matplotlib.pyplot as plot\n'), ((9348, 9372), 'matplotlib.pyplot.ylabel', 'plot.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (9359, 9372), True, 'import matplotlib.pyplot as plot\n'), ((9378, 9389), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (9387, 9389), True, 'import matplotlib.pyplot as plot\n'), ((9397, 9465), 'matplotlib.pyplot.plot', 'plot.plot', (['th', 'precision[1:]', '"""b"""'], {'label': '"""Threshold-Precision curve"""'}), "(th, precision[1:], 'b', label='Threshold-Precision curve')\n", (9406, 9465), True, 'import matplotlib.pyplot as plot\n'), ((9471, 9533), 'matplotlib.pyplot.plot', 'plot.plot', (['th', 'recall[1:]', '"""g"""'], {'label': '"""Threshold-Recall curve"""'}), "(th, recall[1:], 'g', label='Threshold-Recall curve')\n", (9480, 9533), True, 'import matplotlib.pyplot as plot\n'), ((9539, 9600), 'matplotlib.pyplot.title', 'plot.title', (['"""Precision-Recall for different threshold values"""'], {}), "('Precision-Recall for different threshold values')\n", (9549, 9600), True, 'import matplotlib.pyplot as plot\n'), ((9606, 9630), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (['"""Threshold"""'], {}), "('Threshold')\n", (9617, 9630), True, 'import matplotlib.pyplot as plot\n'), ((9636, 9667), 'matplotlib.pyplot.ylabel', 'plot.ylabel', (['"""Precision-Recall"""'], {}), "('Precision-Recall')\n", (9647, 9667), True, 'import matplotlib.pyplot as plot\n'), ((9673, 9728), 'matplotlib.pyplot.legend', 'plot.legend', (["['precision', 'recall']"], {'loc': '"""upper right"""'}), "(['precision', 'recall'], loc='upper right')\n", (9684, 9728), True, 'import matplotlib.pyplot as plot\n'), ((9734, 9745), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (9743, 9745), True, 'import matplotlib.pyplot as plot\n'), ((9885, 9912), 'numpy.linspace', 'np.linspace', (['(0.1)', '(10.0)', '(200)'], {}), '(0.1, 10.0, 200)\n', (9896, 9912), True, 'import numpy as np\n'), ((10354, 10370), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (10362, 10370), True, 'import numpy as np\n'), ((10842, 10857), 'matplotlib.pyplot.subplots', 'plot.subplots', ([], {}), '()\n', (10855, 10857), True, 'import matplotlib.pyplot as plot\n'), ((11170, 11226), 'matplotlib.pyplot.title', 'plot.title', (['"""Reconstruction error for different classes"""'], {}), "('Reconstruction error for different classes')\n", (11180, 11226), True, 'import matplotlib.pyplot as plot\n'), ((11232, 11267), 'matplotlib.pyplot.ylabel', 'plot.ylabel', (['"""Reconstruction error"""'], {}), "('Reconstruction error')\n", (11243, 11267), True, 'import matplotlib.pyplot as plot\n'), ((11273, 11304), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (['"""Data point index"""'], {}), "('Data point index')\n", (11284, 11304), True, 'import matplotlib.pyplot as plot\n'), ((11310, 11321), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (11319, 11321), True, 'import matplotlib.pyplot as plot\n'), ((11343, 11387), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['error_df.target', 'predicted'], {}), '(error_df.target, predicted)\n', (11359, 11387), False, 'from sklearn.metrics import confusion_matrix, precision_recall_curve, recall_score, precision_score, fbeta_score\n'), ((11393, 11422), 'matplotlib.pyplot.figure', 'plot.figure', ([], {'figsize': '(12, 12)'}), '(figsize=(12, 12))\n', (11404, 11422), True, 'import matplotlib.pyplot as plot\n'), ((11428, 11548), 'seaborn.heatmap', 'sns.heatmap', (['conf_matrix'], {'xticklabels': "['Normal', 'Anomaly']", 'yticklabels': "['Normal', 'Anomaly']", 'annot': '(True)', 'fmt': '"""d"""'}), "(conf_matrix, xticklabels=['Normal', 'Anomaly'], yticklabels=[\n 'Normal', 'Anomaly'], annot=True, fmt='d')\n", (11439, 11548), True, 'import seaborn as sns\n'), ((11550, 11580), 'matplotlib.pyplot.title', 'plot.title', (['"""Confusion matrix"""'], {}), "('Confusion matrix')\n", (11560, 11580), True, 'import matplotlib.pyplot as plot\n'), ((11586, 11611), 'matplotlib.pyplot.ylabel', 'plot.ylabel', (['"""True class"""'], {}), "('True class')\n", (11597, 11611), True, 'import matplotlib.pyplot as plot\n'), ((11617, 11647), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (['"""Predicted class"""'], {}), "('Predicted class')\n", (11628, 11647), True, 'import matplotlib.pyplot as plot\n'), ((11653, 11664), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (11662, 11664), True, 'import matplotlib.pyplot as plot\n'), ((4347, 4380), 'keras.models.Model', 'Model', (['input_tensor', 'decode_layer'], {}), '(input_tensor, decode_layer)\n', (4352, 4380), False, 'from keras.models import Model\n'), ((4642, 4683), 'keras.Input', 'Input', ([], {'shape': '(self.n_cols,)', 'name': '"""Input"""'}), "(shape=(self.n_cols,), name='Input')\n", (4647, 4683), False, 'from keras import Input, layers, regularizers, losses\n'), ((4776, 4798), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (4780, 4798), False, 'from keras.optimizers import SGD, Adam\n'), ((5083, 5109), 'matplotlib.pyplot.plot', 'plot.plot', (["history['loss']"], {}), "(history['loss'])\n", (5092, 5109), True, 'import matplotlib.pyplot as plot\n'), ((5119, 5149), 'matplotlib.pyplot.plot', 'plot.plot', (["history['val_loss']"], {}), "(history['val_loss'])\n", (5128, 5149), True, 'import matplotlib.pyplot as plot\n'), ((5159, 5183), 'matplotlib.pyplot.title', 'plot.title', (['"""model loss"""'], {}), "('model loss')\n", (5169, 5183), True, 'import matplotlib.pyplot as plot\n'), ((5193, 5212), 'matplotlib.pyplot.ylabel', 'plot.ylabel', (['"""loss"""'], {}), "('loss')\n", (5204, 5212), True, 'import matplotlib.pyplot as plot\n'), ((5222, 5242), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (5233, 5242), True, 'import matplotlib.pyplot as plot\n'), ((5252, 5302), 'matplotlib.pyplot.legend', 'plot.legend', (["['train', 'valid']"], {'loc': '"""upper right"""'}), "(['train', 'valid'], loc='upper right')\n", (5263, 5302), True, 'import matplotlib.pyplot as plot\n'), ((5312, 5323), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (5321, 5323), True, 'import matplotlib.pyplot as plot\n'), ((7737, 7773), 'numpy.power', 'np.power', (['(valid - prediction_true)', '(2)'], {}), '(valid - prediction_true, 2)\n', (7745, 7773), True, 'import numpy as np\n'), ((10582, 10632), 'sklearn.metrics.precision_score', 'precision_score', (['error_df.target.values', 'predicted'], {}), '(error_df.target.values, predicted)\n', (10597, 10632), False, 'from sklearn.metrics import confusion_matrix, precision_recall_curve, recall_score, precision_score, fbeta_score\n'), ((10656, 10703), 'sklearn.metrics.recall_score', 'recall_score', (['error_df.target.values', 'predicted'], {}), '(error_df.target.values, predicted)\n', (10668, 10703), False, 'from sklearn.metrics import confusion_matrix, precision_recall_curve, recall_score, precision_score, fbeta_score\n'), ((10728, 10782), 'sklearn.metrics.fbeta_score', 'fbeta_score', (['error_df.target.values', 'predicted'], {'beta': '(1)'}), '(error_df.target.values, predicted, beta=1)\n', (10739, 10782), False, 'from sklearn.metrics import confusion_matrix, precision_recall_curve, recall_score, precision_score, fbeta_score\n'), ((3830, 3900), 'keras.layers.Dense', 'layers.Dense', (['self.n_cols'], {'activation': 'self.final_activation', 'name': 'name'}), '(self.n_cols, activation=self.final_activation, name=name)\n', (3842, 3900), False, 'from keras import Input, layers, regularizers, losses\n'), ((3725, 3789), 'keras.layers.Dense', 'layers.Dense', (['nodes'], {'activation': 'self.activation', 'name': 'layer_name'}), '(nodes, activation=self.activation, name=layer_name)\n', (3737, 3789), False, 'from keras import Input, layers, regularizers, losses\n'), ((5966, 5982), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (5980, 5982), False, 'from sklearn.preprocessing import StandardScaler\n'), ((10099, 10156), 'sklearn.metrics.recall_score', 'recall_score', ([], {'y_pred': 'y_hat', 'y_true': 'error_df.target.values'}), '(y_pred=y_hat, y_true=error_df.target.values)\n', (10111, 10156), False, 'from sklearn.metrics import confusion_matrix, precision_recall_curve, recall_score, precision_score, fbeta_score\n'), ((10171, 10231), 'sklearn.metrics.precision_score', 'precision_score', ([], {'y_pred': 'y_hat', 'y_true': 'error_df.target.values'}), '(y_pred=y_hat, y_true=error_df.target.values)\n', (10186, 10231), False, 'from sklearn.metrics import confusion_matrix, precision_recall_curve, recall_score, precision_score, fbeta_score\n'), ((10246, 10310), 'sklearn.metrics.fbeta_score', 'fbeta_score', ([], {'y_pred': 'y_hat', 'y_true': 'error_df.target.values', 'beta': '(1)'}), '(y_pred=y_hat, y_true=error_df.target.values, beta=1)\n', (10257, 10310), False, 'from sklearn.metrics import confusion_matrix, precision_recall_curve, recall_score, precision_score, fbeta_score\n'), ((2644, 2712), 'keras.layers.Dense', 'layers.Dense', (['last_node'], {'activation': 'last_activation', 'name': 'layer_name'}), '(last_node, activation=last_activation, name=layer_name)\n', (2656, 2712), False, 'from keras import Input, layers, regularizers, losses\n'), ((2804, 2837), 'keras.layers.Dropout', 'layers.Dropout', (['self.prob_dropout'], {}), '(self.prob_dropout)\n', (2818, 2837), False, 'from keras import Input, layers, regularizers, losses\n'), ((2548, 2579), 'keras.regularizers.l1', 'regularizers.l1', (['sparsity_const'], {}), '(sparsity_const)\n', (2563, 2579), False, 'from keras import Input, layers, regularizers, losses\n')] |
# import appropriate python modules to the program
import numpy as np
import cv2
from matplotlib import pyplot as plt
import freenect
# capturing video from Kinect Xbox 360
def get_video():
array, _ = freenect.sync_get_video()
array = cv2.cvtColor(array, cv2.COLOR_RGB2BGR)
return array
# callback function for selecting object by clicking 4-corner-points of the object
def select_object(event, x, y, flags, param):
global box_pts, frame
if input_mode and event == cv2.EVENT_LBUTTONDOWN and len(box_pts) < 4:
box_pts.append([x, y])
frame = cv2.circle(frame, (x, y), 4, (0, 255, 0), 2)
# selecting object by clicking 4-corner-points
def select_object_mode():
global input_mode, initialize_mode
input_mode = True
frame_static = frame.copy()
while len(box_pts) < 4:
cv2.imshow("frame", frame)
cv2.waitKey(1)
initialize_mode = True
input_mode = False
# setting the boundary of reference object
def set_boundary_of_reference(box_pts):
### upper bound ###
if box_pts[0][1] < box_pts[1][1]:
upper_bound = box_pts[0][1]
else:
upper_bound = box_pts[1][1]
### lower bound ###
if box_pts[2][1] > box_pts[3][1]:
lower_bound = box_pts[2][1]
else:
lower_bound = box_pts[3][1]
### left bound ###
if box_pts[0][0] < box_pts[2][0]:
left_bound = box_pts[0][0]
else:
left_bound = box_pts[2][0]
### right bound ###
if box_pts[1][0] > box_pts[3][0]:
right_bound = box_pts[1][0]
else:
right_bound = box_pts[3][0]
upper_left_point = [0, 0]
upper_right_point = [(right_bound - left_bound), 0]
lower_left_point = [0, (lower_bound - upper_bound)]
lower_right_point = [(right_bound - left_bound), (lower_bound - upper_bound)]
pts2 = np.float32([upper_left_point, upper_right_point, lower_left_point, lower_right_point])
# display dimension of reference object image to terminal
print
pts2
return pts2, right_bound, left_bound, lower_bound, upper_bound
# doing perspective transform to reference object
def input_perspective_transform(box_pts, pts2, right_bound, left_bound, lower_bound, upper_bound):
global object_orb
pts1 = np.float32(box_pts)
M = cv2.getPerspectiveTransform(pts1, pts2)
img_object = cv2.warpPerspective(frame, M, ((right_bound - left_bound), (lower_bound - upper_bound)))
return cv2.cvtColor(img_object, cv2.COLOR_BGR2GRAY)
# feature detection and description using ORB
def orb_feature_descriptor(img_object):
kp1, des1 = orb.detectAndCompute(img_object, None)
kp2, des2 = orb.detectAndCompute(frame, None)
return kp1, des1, kp2, des2
# feature matching using Brute Force
def brute_force_feature_matcher(kp1, des1, kp2, des2):
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
return sorted(matches, key=lambda x: x.distance)
# finding homography matrix between reference and image frame
def find_homography_object(kp1, kp2, matches):
src_pts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
return M, mask
# applying homography matrix as inference of perpective transformation
def output_perspective_transform(img_object, M):
h, w = img_object.shape
corner_pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
center_pts = np.float32([[w / 2, h / 2]]).reshape(-1, 1, 2)
corner_pts_3d = np.float32(
[[-w / 2, -h / 2, 0], [-w / 2, (h - 1) / 2, 0], [(w - 1) / 2, (h - 1) / 2, 0], [(w - 1) / 2, -h / 2, 0]]) ###
corner_camera_coord = cv2.perspectiveTransform(corner_pts, M) ###
center_camera_coord = cv2.perspectiveTransform(center_pts, M)
return corner_camera_coord, center_camera_coord, corner_pts_3d, center_pts
# solving pnp using iterative LMA algorithm
def iterative_solve_pnp(object_points, image_points):
image_points = image_points.reshape(-1, 2)
retval, rotation, translation = cv2.solvePnP(object_points, image_points, kinect_intrinsic_param,
kinect_distortion_param)
return rotation, translation
# drawing box around object
def draw_box_around_object(dst):
return cv2.polylines(frame, [np.int32(dst)], True, 255, 3)
# recording sample data
def record_samples_data(translation, rotation):
translation_list = translation.tolist()
rotation_list = rotation.tolist()
t1.append(translation_list[0])
t2.append(translation_list[1])
t3.append(translation_list[2])
r1.append(rotation_list[0])
r2.append(rotation_list[1])
r3.append(rotation_list[2])
# computing and showing recorded data to terminal
def showing_recorded_data_to_terminal(t1, t2, t3, r1, r2, r3):
# convert to numpy array
t1 = np.array(t1)
t2 = np.array(t2)
t3 = np.array(t3)
r1 = np.array(r1)
r2 = np.array(r2)
r3 = np.array(r3)
# print mean and std of the data to terminal
print
"mean t1", np.mean(t1)
print
"std t1", np.std(t1)
print
""
print
"mean t2", np.mean(t2)
print
"std t2", np.std(t2)
print
""
print
"mean t3", np.mean(t3)
print
"std t3", np.std(t3)
print
""
print
""
print
"mean r1", np.mean(r1)
print
"std r1", np.std(r1)
print
""
print
"mean r2", np.mean(r2)
print
"std r2", np.std(r2)
print
""
print
"mean r3", np.mean(r3)
print
"std r3", np.std(r3)
print
""
print
"#####################"
print
""
# showing object position and orientation value to frame
def put_position_orientation_value_to_frame(translation, rotation):
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, 'position(cm)', (10, 30), font, 0.7, (0, 255, 0), 1, cv2.LINE_AA)
cv2.putText(frame, 'x:' + str(round(translation[0], 2)), (250, 30), font, 0.7, (0, 0, 255), 2, cv2.LINE_AA)
cv2.putText(frame, 'y:' + str(round(translation[1], 2)), (350, 30), font, 0.7, (0, 0, 255), 2, cv2.LINE_AA)
cv2.putText(frame, 'z:' + str(round(translation[2], 2)), (450, 30), font, 0.7, (0, 0, 255), 2, cv2.LINE_AA)
cv2.putText(frame, 'orientation(degree)', (10, 60), font, 0.7, (0, 255, 0), 1, cv2.LINE_AA)
cv2.putText(frame, 'x:' + str(round(rotation[0], 2)), (250, 60), font, 0.7, (0, 0, 255), 2, cv2.LINE_AA)
cv2.putText(frame, 'y:' + str(round(rotation[1], 2)), (350, 60), font, 0.7, (0, 0, 255), 2, cv2.LINE_AA)
cv2.putText(frame, 'z:' + str(round(rotation[2], 2)), (450, 60), font, 0.7, (0, 0, 255), 2, cv2.LINE_AA)
return frame
############
### Main ###
############
# initialization
input_mode = False
initialize_mode = False
track_mode = False
box_pts = []
record_num = 0
record_mode = False
t1, t2, t3, r1, r2, r3 = [], [], [], [], [], []
kinect_intrinsic_param = np.array([[514.04093664, 0., 320], [0., 514.87476583, 240], [0., 0., 1.]])
kinect_distortion_param = np.array([2.68661165e-01, -1.31720458e+00, -3.22098653e-03, -1.11578383e-03, 2.44470018e+00])
orb = cv2.ORB_create()
cv2.namedWindow("frame")
cv2.setMouseCallback("frame", select_object)
while True:
frame = get_video()
k = cv2.waitKey(1) & 0xFF
# press i to enter input mode
if k == ord('i'):
# select object by clicking 4-corner-points
select_object_mode()
# set the boundary of reference object
pts2, right_bound, left_bound, lower_bound, upper_bound = set_boundary_of_reference(box_pts)
# do perspective transform to reference object
img_object = input_perspective_transform(box_pts, pts2, right_bound, left_bound, lower_bound, upper_bound)
track_mode = True
# track mode is run immediately after user selects 4-corner-points of object
if track_mode is True:
# feature detection and description
kp1, des1, kp2, des2 = orb_feature_descriptor(img_object)
# feature matching
matches = brute_force_feature_matcher(kp1, des1, kp2, des2)
# find homography matrix
M, mask = find_homography_object(kp1, kp2, matches)
# apply homography matrix using perspective transformation
corner_camera_coord, center_camera_coord, object_points_3d, center_pts = output_perspective_transform(
img_object, M)
# solve pnp using iterative LMA algorithm
rotation, translation = iterative_solve_pnp(object_points_3d, corner_camera_coord)
# convert to centimeters
translation = (40. / 53.) * translation * .1
# convert to degree
rotation = rotation * 180. / np.pi
# press r to record 50 sample data and calculate its mean and std
if k == ord("r"):
record_mode = True
if record_mode is True:
record_num = record_num + 1
# record 50 data
record_samples_data(translation, rotation)
if record_num == 50:
record_mode = False
record_num = 0
# compute and show recorded data
showing_recorded_data_to_terminal(t1, t2, t3, r1, r2, r3)
# reset the data after 50 iterations
t1, t2, t3, r1, r2, r3 = [], [], [], [], [], []
# draw box around object
frame = draw_box_around_object(corner_camera_coord)
# show object position and orientation value to frame
frame = put_position_orientation_value_to_frame(translation, rotation)
cv2.imshow("frame", frame)
# break when user pressing ESC
if k == 27:
break
cv2.destroyAllWindows() | [
"cv2.getPerspectiveTransform",
"cv2.solvePnP",
"numpy.mean",
"cv2.imshow",
"cv2.warpPerspective",
"cv2.cvtColor",
"numpy.std",
"cv2.BFMatcher",
"cv2.setMouseCallback",
"numpy.int32",
"cv2.destroyAllWindows",
"cv2.circle",
"cv2.waitKey",
"cv2.ORB_create",
"cv2.putText",
"numpy.float32",... | [((7035, 7114), 'numpy.array', 'np.array', (['[[514.04093664, 0.0, 320], [0.0, 514.87476583, 240], [0.0, 0.0, 1.0]]'], {}), '([[514.04093664, 0.0, 320], [0.0, 514.87476583, 240], [0.0, 0.0, 1.0]])\n', (7043, 7114), True, 'import numpy as np\n'), ((7136, 7221), 'numpy.array', 'np.array', (['[0.268661165, -1.31720458, -0.00322098653, -0.00111578383, 2.44470018]'], {}), '([0.268661165, -1.31720458, -0.00322098653, -0.00111578383, 2.44470018]\n )\n', (7144, 7221), True, 'import numpy as np\n'), ((7237, 7253), 'cv2.ORB_create', 'cv2.ORB_create', ([], {}), '()\n', (7251, 7253), False, 'import cv2\n'), ((7255, 7279), 'cv2.namedWindow', 'cv2.namedWindow', (['"""frame"""'], {}), "('frame')\n", (7270, 7279), False, 'import cv2\n'), ((7280, 7324), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""frame"""', 'select_object'], {}), "('frame', select_object)\n", (7300, 7324), False, 'import cv2\n'), ((9763, 9786), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (9784, 9786), False, 'import cv2\n'), ((207, 232), 'freenect.sync_get_video', 'freenect.sync_get_video', ([], {}), '()\n', (230, 232), False, 'import freenect\n'), ((245, 283), 'cv2.cvtColor', 'cv2.cvtColor', (['array', 'cv2.COLOR_RGB2BGR'], {}), '(array, cv2.COLOR_RGB2BGR)\n', (257, 283), False, 'import cv2\n'), ((1830, 1920), 'numpy.float32', 'np.float32', (['[upper_left_point, upper_right_point, lower_left_point, lower_right_point]'], {}), '([upper_left_point, upper_right_point, lower_left_point,\n lower_right_point])\n', (1840, 1920), True, 'import numpy as np\n'), ((2251, 2270), 'numpy.float32', 'np.float32', (['box_pts'], {}), '(box_pts)\n', (2261, 2270), True, 'import numpy as np\n'), ((2279, 2318), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (2306, 2318), False, 'import cv2\n'), ((2336, 2424), 'cv2.warpPerspective', 'cv2.warpPerspective', (['frame', 'M', '(right_bound - left_bound, lower_bound - upper_bound)'], {}), '(frame, M, (right_bound - left_bound, lower_bound -\n upper_bound))\n', (2355, 2424), False, 'import cv2\n'), ((2436, 2480), 'cv2.cvtColor', 'cv2.cvtColor', (['img_object', 'cv2.COLOR_BGR2GRAY'], {}), '(img_object, cv2.COLOR_BGR2GRAY)\n', (2448, 2480), False, 'import cv2\n'), ((2809, 2857), 'cv2.BFMatcher', 'cv2.BFMatcher', (['cv2.NORM_HAMMING'], {'crossCheck': '(True)'}), '(cv2.NORM_HAMMING, crossCheck=True)\n', (2822, 2857), False, 'import cv2\n'), ((3235, 3288), 'cv2.findHomography', 'cv2.findHomography', (['src_pts', 'dst_pts', 'cv2.RANSAC', '(5.0)'], {}), '(src_pts, dst_pts, cv2.RANSAC, 5.0)\n', (3253, 3288), False, 'import cv2\n'), ((3638, 3758), 'numpy.float32', 'np.float32', (['[[-w / 2, -h / 2, 0], [-w / 2, (h - 1) / 2, 0], [(w - 1) / 2, (h - 1) / 2, \n 0], [(w - 1) / 2, -h / 2, 0]]'], {}), '([[-w / 2, -h / 2, 0], [-w / 2, (h - 1) / 2, 0], [(w - 1) / 2, (h -\n 1) / 2, 0], [(w - 1) / 2, -h / 2, 0]])\n', (3648, 3758), True, 'import numpy as np\n'), ((3795, 3834), 'cv2.perspectiveTransform', 'cv2.perspectiveTransform', (['corner_pts', 'M'], {}), '(corner_pts, M)\n', (3819, 3834), False, 'import cv2\n'), ((3866, 3905), 'cv2.perspectiveTransform', 'cv2.perspectiveTransform', (['center_pts', 'M'], {}), '(center_pts, M)\n', (3890, 3905), False, 'import cv2\n'), ((4168, 4262), 'cv2.solvePnP', 'cv2.solvePnP', (['object_points', 'image_points', 'kinect_intrinsic_param', 'kinect_distortion_param'], {}), '(object_points, image_points, kinect_intrinsic_param,\n kinect_distortion_param)\n', (4180, 4262), False, 'import cv2\n'), ((4979, 4991), 'numpy.array', 'np.array', (['t1'], {}), '(t1)\n', (4987, 4991), True, 'import numpy as np\n'), ((5001, 5013), 'numpy.array', 'np.array', (['t2'], {}), '(t2)\n', (5009, 5013), True, 'import numpy as np\n'), ((5023, 5035), 'numpy.array', 'np.array', (['t3'], {}), '(t3)\n', (5031, 5035), True, 'import numpy as np\n'), ((5046, 5058), 'numpy.array', 'np.array', (['r1'], {}), '(r1)\n', (5054, 5058), True, 'import numpy as np\n'), ((5068, 5080), 'numpy.array', 'np.array', (['r2'], {}), '(r2)\n', (5076, 5080), True, 'import numpy as np\n'), ((5090, 5102), 'numpy.array', 'np.array', (['r3'], {}), '(r3)\n', (5098, 5102), True, 'import numpy as np\n'), ((5927, 6016), 'cv2.putText', 'cv2.putText', (['frame', '"""position(cm)"""', '(10, 30)', 'font', '(0.7)', '(0, 255, 0)', '(1)', 'cv2.LINE_AA'], {}), "(frame, 'position(cm)', (10, 30), font, 0.7, (0, 255, 0), 1, cv2\n .LINE_AA)\n", (5938, 6016), False, 'import cv2\n'), ((6353, 6448), 'cv2.putText', 'cv2.putText', (['frame', '"""orientation(degree)"""', '(10, 60)', 'font', '(0.7)', '(0, 255, 0)', '(1)', 'cv2.LINE_AA'], {}), "(frame, 'orientation(degree)', (10, 60), font, 0.7, (0, 255, 0),\n 1, cv2.LINE_AA)\n", (6364, 6448), False, 'import cv2\n'), ((9669, 9695), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (9679, 9695), False, 'import cv2\n'), ((580, 624), 'cv2.circle', 'cv2.circle', (['frame', '(x, y)', '(4)', '(0, 255, 0)', '(2)'], {}), '(frame, (x, y), 4, (0, 255, 0), 2)\n', (590, 624), False, 'import cv2\n'), ((831, 857), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (841, 857), False, 'import cv2\n'), ((866, 880), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (877, 880), False, 'import cv2\n'), ((5178, 5189), 'numpy.mean', 'np.mean', (['t1'], {}), '(t1)\n', (5185, 5189), True, 'import numpy as np\n'), ((5214, 5224), 'numpy.std', 'np.std', (['t1'], {}), '(t1)\n', (5220, 5224), True, 'import numpy as np\n'), ((5267, 5278), 'numpy.mean', 'np.mean', (['t2'], {}), '(t2)\n', (5274, 5278), True, 'import numpy as np\n'), ((5303, 5313), 'numpy.std', 'np.std', (['t2'], {}), '(t2)\n', (5309, 5313), True, 'import numpy as np\n'), ((5356, 5367), 'numpy.mean', 'np.mean', (['t3'], {}), '(t3)\n', (5363, 5367), True, 'import numpy as np\n'), ((5392, 5402), 'numpy.std', 'np.std', (['t3'], {}), '(t3)\n', (5398, 5402), True, 'import numpy as np\n'), ((5462, 5473), 'numpy.mean', 'np.mean', (['r1'], {}), '(r1)\n', (5469, 5473), True, 'import numpy as np\n'), ((5498, 5508), 'numpy.std', 'np.std', (['r1'], {}), '(r1)\n', (5504, 5508), True, 'import numpy as np\n'), ((5551, 5562), 'numpy.mean', 'np.mean', (['r2'], {}), '(r2)\n', (5558, 5562), True, 'import numpy as np\n'), ((5587, 5597), 'numpy.std', 'np.std', (['r2'], {}), '(r2)\n', (5593, 5597), True, 'import numpy as np\n'), ((5640, 5651), 'numpy.mean', 'np.mean', (['r3'], {}), '(r3)\n', (5647, 5651), True, 'import numpy as np\n'), ((5676, 5686), 'numpy.std', 'np.std', (['r3'], {}), '(r3)\n', (5682, 5686), True, 'import numpy as np\n'), ((7372, 7386), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (7383, 7386), False, 'import cv2\n'), ((3071, 3120), 'numpy.float32', 'np.float32', (['[kp1[m.queryIdx].pt for m in matches]'], {}), '([kp1[m.queryIdx].pt for m in matches])\n', (3081, 3120), True, 'import numpy as np\n'), ((3153, 3202), 'numpy.float32', 'np.float32', (['[kp2[m.trainIdx].pt for m in matches]'], {}), '([kp2[m.trainIdx].pt for m in matches])\n', (3163, 3202), True, 'import numpy as np\n'), ((3475, 3535), 'numpy.float32', 'np.float32', (['[[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]'], {}), '([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]])\n', (3485, 3535), True, 'import numpy as np\n'), ((3571, 3599), 'numpy.float32', 'np.float32', (['[[w / 2, h / 2]]'], {}), '([[w / 2, h / 2]])\n', (3581, 3599), True, 'import numpy as np\n'), ((4437, 4450), 'numpy.int32', 'np.int32', (['dst'], {}), '(dst)\n', (4445, 4450), True, 'import numpy as np\n')] |
# Copyright 2022 <NAME>, <NAME>, <NAME>.
# Licensed under the BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)
# This file may not be copied, modified, or distributed
# except according to those terms.
import sys
from collections import defaultdict, namedtuple
from enum import Enum
from itertools import product
sys.stderr = open(snakemake.log[0], "w")
import gffutils
import numpy as np
import requests
from dnachisel.biotools import get_backtranslation_table, translate
from pysam import FastaFile, VariantFile, VariantHeader, VariantRecord
from requests.models import ContentDecodingError
covariants_data = requests.get(
"https://raw.githubusercontent.com/hodcroftlab/covariants/master/web/data/clusters.json"
).json()
translate_aa = get_backtranslation_table("Standard")
gff = gffutils.create_db(snakemake.input.annotation, dbfn=":memory:")
gene_start = {gene["gene_name"][0]: gene.start for gene in gff.features_of_type("gene")}
gene_end = {gene["gene_name"][0]: gene.end for gene in gff.features_of_type("gene")}
def aa_to_dna(aa_seq):
return (
"".join(combination)
for combination in product(*[translate_aa[aa] for aa in aa_seq])
)
def codon_equivalence_class(dna_seq):
aa = translate(dna_seq)
return aa_to_dna(aa)
class VariantType(Enum):
Ins = 1
Del = 2
Subst = 3
class SynonymousVariant:
def __init__(self, left, pos, right):
self.left = left
self.pos = pos
self.right = right
def __eq__(self, other):
return (
self.left == other.left
and self.right == other.right
and self.pos == other.pos
)
def __hash__(self):
return hash((self.left, self.pos, self.right))
def __lt__(self, other):
return self.pos < other.pos
def is_same_feature(self, other):
return True
def variant_type(self):
if self.left == "-":
return VariantType.Ins
elif self.right == "-":
return VariantType.Del
else:
return VariantType.Subst
def genome_pos(self):
return self.pos - 1
def signature(self):
return f"{self.left}{self.pos}{self.right}"
def __repr__(self):
return repr(self.signature())
class NonSynonymousVariant(SynonymousVariant):
def __init__(self, left, pos, right, gene):
super().__init__(left, pos, right)
self.gene = gene
def __eq__(self, other):
return super().__eq__(other) and self.gene == other.gene
def __hash__(self):
return hash((self.left, self.pos, self.right, self.gene))
def is_same_feature(self, other):
return self.gene == other.gene
def genome_pos(self):
return gene_start[self.gene] - 1 + (self.pos - 1) * 3
def signature(self):
return f"{self.gene}:{self.left}{self.pos}{self.right}"
def is_in_first_codon(self):
return self.pos == 1
def is_in_last_codon(self):
aa_len = gene_end[self.gene] - gene_start[self.gene] / 3
assert self.pos <= aa_len
return self.pos == aa_len
with FastaFile(snakemake.input.reference) as infasta:
assert infasta.nreferences == 1
contig = infasta.references[0]
ref_len = infasta.lengths[0]
header = VariantHeader()
header.add_line(f"##contig=<ID={contig},length={ref_len}")
header.add_line(
'##INFO=<ID=SIGNATURES,Number=.,Type=String,Description="Variant signature as obtained from covariants.<EMAIL>">'
)
header.add_line(
'##INFO=<ID=LINEAGES,Number=.,Type=String,Description="Lineages having this variant">'
)
known_non_synonymous_variants = defaultdict(set)
for lineage_entry in covariants_data["clusters"]:
if (
"mutations" in lineage_entry
and "nonsynonymous" in lineage_entry["mutations"]
):
for variant in lineage_entry["mutations"]["nonsynonymous"]:
variant = NonSynonymousVariant(**variant)
if variant.gene in gene_start:
known_non_synonymous_variants[variant].add(
lineage_entry["build_name"]
)
else:
print(
f"Skipping variant at {variant.gene} because gene is not in given GFF annotation.",
file=sys.stderr,
)
known_synonymous_variants = defaultdict(set)
for lineage_entry in covariants_data["clusters"]:
if "mutations" in lineage_entry and "synonymous" in lineage_entry["mutations"]:
for variant in lineage_entry["mutations"]["synonymous"]:
known_synonymous_variants[SynonymousVariant(**variant)].add(
lineage_entry["build_name"]
)
with VariantFile(snakemake.output[0], "wb", header=header) as outvcf:
def get_variants(all_variants, variant_type, merge=True):
filtered_variants = sorted(
filter(
lambda item: item[0].variant_type() == variant_type,
all_variants.items(),
)
)
if not merge:
yield from filtered_variants
else:
def process_batch(batch, batch_lineages):
# Step 1: collect all visited lineages in batch
all_lineages = np.array(
list(
set(
lineage
for lineages in batch_lineages
for lineage in lineages
)
)
)
# Step 2: build matrix of variants vs lineages (columns mark combinations of variants that can be merged)
lineage_matrix = np.array(
[
[(lineage in lineages) for lineage in all_lineages]
for lineages in batch_lineages
]
)
# Step 3: remove duplicate columns
if len(lineage_matrix) > 0:
lineage_matrix = np.unique(lineage_matrix, axis=1)
# Step 4: iterate over combinations
batch = np.array(batch)
batch_lineages = np.array(batch_lineages)
for variant_combination in lineage_matrix.T:
# select variants and lineages
variants = batch[variant_combination]
lineages = set.intersection(
*batch_lineages[variant_combination]
)
# yield them in consecutive inner batches
last_pos = None
inner_batch_start = 0
for i, variant in enumerate(variants):
if last_pos is not None and variant.pos != last_pos + 1:
# yield inner batch
yield variants[inner_batch_start:i], lineages
inner_batch_start = i
last_pos = variant.pos
yield variants[inner_batch_start:], lineages
batch = []
batch_lineages = []
for variant, lineages in filtered_variants:
if not batch or (
variant.pos == batch[-1].pos + 1
and variant.is_same_feature(batch[-1])
):
batch.append(variant)
batch_lineages.append(lineages)
else:
# yield and remove the last batch
yield from process_batch(batch, batch_lineages)
# clear and start with new batch
batch = [variant]
batch_lineages = [lineages]
yield from process_batch(batch, batch_lineages)
def write_record(pos, ref_allele, alt_allele, lineages, variants):
record = outvcf.new_record()
record.contig = contig
record.alleles = (ref_allele, alt_allele)
record.pos = pos + 1 # pysam expects 1-based positions here
record.info["LINEAGES"] = ",".join(lineages)
record.info["SIGNATURES"] = ",".join(
variant.signature() for variant in variants
)
outvcf.write(record)
for variants, lineages in get_variants(
known_synonymous_variants, VariantType.Ins
):
pos = variants[0].genome_pos() - 1
ref_allele = infasta.fetch(reference=contig, start=pos, end=pos + 1)
alt_allele = ref_allele + "".join(variant.right for variant in variants)
write_record(pos, ref_allele, alt_allele, lineages, variants)
for variants, lineages in get_variants(
known_synonymous_variants, VariantType.Del
):
pos = variants[0].genome_pos() - 1
alt_allele = infasta.fetch(reference=contig, start=pos, end=pos + 1)
ref_allele = alt_allele + "".join(variant.left for variant in variants)
write_record(pos, ref_allele, alt_allele, lineages, variants)
for variant, lineages in get_variants(
known_synonymous_variants, VariantType.Subst, merge=False
):
pos = variant.genome_pos()
write_record(pos, variant.left, variant.right, lineages, [variant])
for variants, lineages in get_variants(
known_non_synonymous_variants, VariantType.Ins
):
pos = variants[0].genome_pos()
assert not variants[
0
].is_in_first_codon(), "unsupported insertion: is in first codon of protein"
assert not variants[
-1
].is_in_last_codon(), "unsupported insertion: is in last codon of protein"
# METHOD: add an unchanged codon before and after the actual variant
ref_allele = infasta.fetch(reference=contig, start=pos - 3, end=pos + 3)
pre_codons = codon_equivalence_class(
infasta.fetch(reference=contig, start=pos - 3, end=pos)
)
post_codons = codon_equivalence_class(
infasta.fetch(reference=contig, start=pos, end=pos + 3)
)
for pre_codon, post_codon in product(pre_codons, post_codons):
for ins_seq in aa_to_dna(
"".join(variant.right for variant in variants)
):
alt_allele = pre_codon + ins_seq + post_codon
write_record(pos - 3, ref_allele, alt_allele, lineages, variants)
for variants, lineages in get_variants(
known_non_synonymous_variants, VariantType.Del
):
variant = variants[0]
pos = variants[0].genome_pos()
del_len = len(variants) * 3
assert not variants[
0
].is_in_first_codon(), "unsupported deletion: is in first codon of protein"
assert not variants[
-1
].is_in_last_codon(), "unsupported deletion: is in last codon of protein"
# METHOD: add an unchanged codon before and after the actual variant
# in order to capture ambiguity in the alignment
# before the potential deletion
pre_codons = codon_equivalence_class(
infasta.fetch(reference=contig, start=pos - 3, end=pos)
)
post_codons = codon_equivalence_class(
infasta.fetch(
reference=contig, start=pos + del_len, end=pos + del_len + 3
)
)
# ref allele including the unchanged codons
ref_allele = infasta.fetch(
reference=contig, start=pos - 3, end=pos + del_len + 3
)
for pre_codon, post_codon in product(pre_codons, post_codons):
alt_allele = pre_codon + post_codon
write_record(pos - 3, ref_allele, alt_allele, lineages, variants)
for variant, lineages in get_variants(
known_non_synonymous_variants, VariantType.Subst, merge=False
):
pos = variant.genome_pos()
ref_allele = infasta.fetch(reference=contig, start=pos, end=pos + 3)
for alt_allele in aa_to_dna(variant.right):
write_record(pos, ref_allele, alt_allele, lineages, [variant])
| [
"dnachisel.biotools.translate",
"pysam.FastaFile",
"dnachisel.biotools.get_backtranslation_table",
"pysam.VariantFile",
"collections.defaultdict",
"numpy.array",
"requests.get",
"pysam.VariantHeader",
"itertools.product",
"gffutils.create_db",
"numpy.unique"
] | [((765, 802), 'dnachisel.biotools.get_backtranslation_table', 'get_backtranslation_table', (['"""Standard"""'], {}), "('Standard')\n", (790, 802), False, 'from dnachisel.biotools import get_backtranslation_table, translate\n'), ((809, 872), 'gffutils.create_db', 'gffutils.create_db', (['snakemake.input.annotation'], {'dbfn': '""":memory:"""'}), "(snakemake.input.annotation, dbfn=':memory:')\n", (827, 872), False, 'import gffutils\n'), ((1242, 1260), 'dnachisel.biotools.translate', 'translate', (['dna_seq'], {}), '(dna_seq)\n', (1251, 1260), False, 'from dnachisel.biotools import get_backtranslation_table, translate\n'), ((3124, 3160), 'pysam.FastaFile', 'FastaFile', (['snakemake.input.reference'], {}), '(snakemake.input.reference)\n', (3133, 3160), False, 'from pysam import FastaFile, VariantFile, VariantHeader, VariantRecord\n'), ((3290, 3305), 'pysam.VariantHeader', 'VariantHeader', ([], {}), '()\n', (3303, 3305), False, 'from pysam import FastaFile, VariantFile, VariantHeader, VariantRecord\n'), ((3677, 3693), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (3688, 3693), False, 'from collections import defaultdict, namedtuple\n'), ((4443, 4459), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (4454, 4459), False, 'from collections import defaultdict, namedtuple\n'), ((634, 746), 'requests.get', 'requests.get', (['"""https://raw.githubusercontent.com/hodcroftlab/covariants/master/web/data/clusters.json"""'], {}), "(\n 'https://raw.githubusercontent.com/hodcroftlab/covariants/master/web/data/clusters.json'\n )\n", (646, 746), False, 'import requests\n'), ((4824, 4877), 'pysam.VariantFile', 'VariantFile', (['snakemake.output[0]', '"""wb"""'], {'header': 'header'}), "(snakemake.output[0], 'wb', header=header)\n", (4835, 4877), False, 'from pysam import FastaFile, VariantFile, VariantHeader, VariantRecord\n'), ((1141, 1186), 'itertools.product', 'product', (['*[translate_aa[aa] for aa in aa_seq]'], {}), '(*[translate_aa[aa] for aa in aa_seq])\n', (1148, 1186), False, 'from itertools import product\n'), ((10633, 10665), 'itertools.product', 'product', (['pre_codons', 'post_codons'], {}), '(pre_codons, post_codons)\n', (10640, 10665), False, 'from itertools import product\n'), ((12201, 12233), 'itertools.product', 'product', (['pre_codons', 'post_codons'], {}), '(pre_codons, post_codons)\n', (12208, 12233), False, 'from itertools import product\n'), ((5892, 5990), 'numpy.array', 'np.array', (['[[(lineage in lineages) for lineage in all_lineages] for lineages in\n batch_lineages]'], {}), '([[(lineage in lineages) for lineage in all_lineages] for lineages in\n batch_lineages])\n', (5900, 5990), True, 'import numpy as np\n'), ((6377, 6392), 'numpy.array', 'np.array', (['batch'], {}), '(batch)\n', (6385, 6392), True, 'import numpy as np\n'), ((6430, 6454), 'numpy.array', 'np.array', (['batch_lineages'], {}), '(batch_lineages)\n', (6438, 6454), True, 'import numpy as np\n'), ((6259, 6292), 'numpy.unique', 'np.unique', (['lineage_matrix'], {'axis': '(1)'}), '(lineage_matrix, axis=1)\n', (6268, 6292), True, 'import numpy as np\n')] |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import numpy as np
class Normalizer:
"""
MLP 相关的数据标准化器(归一化)
标准化处理对于计算距离的机器学习方法是非常重要的,因为特征的尺度不同会导致计算出来的距离倾向于尺度大的特征,
为保证距离对每一列特征都是公平的,必须将所有特征缩放到同一尺度范围内
Author: xrh
Date: 2021-07-10
"""
@staticmethod
def tow_norm_normalize(X):
"""
对所有样本进行二范数归一化
ref:
https://blog.csdn.net/hqh131360239/article/details/79061535
:param X: shape(N,m)
N - 样本的个数
m - 样本的维度
:return:
"""
X_norm_2 = np.linalg.norm(X, ord=2, axis=1, keepdims=True) # 每一个样本的模(二范数) shape(N,1)
# X_norm_2 = X_norm_2.reshape(-1,1)
X = X / X_norm_2
return X
@staticmethod
def Z_Score_normalize(X):
"""
0均值归一化( Z-Score Normalization)
对所有特征进行 0均值归一化, 将特征归一化为均值为0 方差为1
:param X: shape(N,m)
N - 样本的个数
m - 样本的维度
:return:
"""
# N, m = np.shape(X) # N 个样本, m 个特征
mu = np.mean(X, axis=0) # 每一个特征的均值 shape:(m,)
s = np.std(X, axis=0) # 每一个特征的标准差 shape:(m,)
X = (X - mu) / s
return X
@staticmethod
def min_max_normalize(Xarray):
"""
对特征进行 min-max 标准化,将数据缩放到0-1之间
:param Xarray:
:return:
"""
for f in range(Xarray.shape[1]):
maxf = np.max(Xarray[:, f])
minf = np.min(Xarray[:, f])
for n in range(Xarray.shape[0]):
Xarray[n][f] = (Xarray[n][f] - minf) / (maxf - minf)
return Xarray | [
"numpy.std",
"numpy.max",
"numpy.mean",
"numpy.linalg.norm",
"numpy.min"
] | [((559, 606), 'numpy.linalg.norm', 'np.linalg.norm', (['X'], {'ord': '(2)', 'axis': '(1)', 'keepdims': '(True)'}), '(X, ord=2, axis=1, keepdims=True)\n', (573, 606), True, 'import numpy as np\n'), ((1041, 1059), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1048, 1059), True, 'import numpy as np\n'), ((1096, 1113), 'numpy.std', 'np.std', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1102, 1113), True, 'import numpy as np\n'), ((1400, 1420), 'numpy.max', 'np.max', (['Xarray[:, f]'], {}), '(Xarray[:, f])\n', (1406, 1420), True, 'import numpy as np\n'), ((1440, 1460), 'numpy.min', 'np.min', (['Xarray[:, f]'], {}), '(Xarray[:, f])\n', (1446, 1460), True, 'import numpy as np\n')] |
'''
Calculates LSF, instrument background and transmission
'''
import logging
import numpy as np
from scipy.interpolate import interp1d, interp2d
import scipy.constants as sp
from astropy.convolution import Gaussian1DKernel
from astropy.io import fits
from src.config import *
from src.modules.misc_utils import path_setup
from src.modules.em_model import *
tppath = path_setup('../../' + config_data["data_dir"] + 'throughput/')
hc_path = path_setup('../../' + config_data["data_dir"] + 'HC/')
class InstrumentPart:
substrate = "Suprasil3001_50mm_Emissivity.txt"
mirror = "QuantumFS500_Emissivity.txt"
edust = 0.5 # Grey Dust covering on some optics
mindustfrac = 0.005 # 0.5% dust on optical surfaces - won't be perfectly clean
def __init__(self, name, temp, area, n_mirrors=0, n_lenses=0, dust_lens=0., dust_mirror=mindustfrac, global_scaling=1., emis_scaling=1., emis_mirror=mirror, emis_lens=substrate, emis_dust=edust):
if dust_lens != 0:
assert(n_lenses != 0)
self.name = name
self.temp = temp
self.area = area
self.n_mirrors = n_mirrors
self.n_lenses = n_lenses
self.dust_lens = dust_lens
self.dust_mirror = dust_mirror
self.global_scaling = global_scaling
self.emis_scaling = emis_scaling
self.emis_mirror = emis_mirror
self.emis_lens = emis_lens
self.emis_dust = emis_dust
self.number = 0
def set_number(self, number):
self.number = number
def calcEmissivity(self, lamb, filename, scaling, dust, n):
if n == 0:
# if no elements return 0 emissivity
return 0.
# Read emissivity from file or use "filename" as a number
if type(filename) == str:
l, emi = np.loadtxt(os.path.join(tppath, filename), unpack=True, comments="#", delimiter=",")
else:
l = lamb
emi = np.zeros_like(lamb) + filename
# Scale emissivity
emi *= scaling
# Add dust emissivity
emi += self.emis_dust*dust
# Calculate emissivity for n elements
emi = 1. - (1. - emi)**n # ~= n*emi for small emi
# Scale depending on the effective area
emi = emi*self.area/config_data['telescope']['area']
# Interpolate emissivity to output lambda grid
emi_interp = interp1d(l, emi, kind='linear', bounds_error=False, fill_value=0.)
return emi_interp(lamb)
def calcThroughputAndEmission(self, lamb, DIT, output_file=""):
# mirrors
emi_mirror = self.global_scaling*self.calcEmissivity(lamb, self.emis_mirror, self.emis_scaling, self.dust_mirror, self.n_mirrors)
# lenses
emi_lens = self.global_scaling*self.calcEmissivity(lamb, self.emis_lens, self.emis_scaling, self.dust_lens, self.n_lenses)
emissivity = 1. - ((1. - emi_mirror)*(1. - emi_lens))
throughput = 1. - emissivity
emission = emissivity*blackbody(lamb, self.temp) #J/s/m2/lambda(um)/arcsec2
emission_ph = emission/(sp.h*sp.c/(lamb*1.E-6))*DIT # photons/um/m2/arcsec2
logging.debug("Instrument Part Model - {:02d} {}".format(self.number, self.name))
logging.debug("T = {:d} K Mirrors = {:d} Lenses = {:d} Area = {:d} m2".format(*map(int, [self.temp, self.n_mirrors, self.n_lenses, self.area])))
logging.debug("global_scaling = {:5.3f} emis_scaling = {:3.1f}".format(self.global_scaling, self.emis_scaling))
logging.debug("emis_dust = {}".format(self.emis_dust))
if self.n_mirrors > 0:
logging.debug("emis_mirror = {} dust_mirror = {:5.3f}".format(self.emis_mirror, self.dust_mirror))
if self.n_lenses > 0:
logging.debug("emis_lens = {} dust_lens = {:5.3f}".format(self.emis_lens, self.dust_lens))
logging.debug("lambda = {:7.4f} emissivity = {:6.3f} throughput = {:6.3f} emission_ph = {:.2e}".format(np.median(lamb), np.median(emissivity), np.median(throughput), np.median(emission_ph)))
plot_file = output_file + "_HARMONI_" + "{:02d}".format(self.number) + "_" + self.name.replace(" ", "_").lower()
plt.clf()
plt.plot(lamb, throughput)
plt.xlabel(r"wavelength [$\mu$m]")
plt.ylabel("Throughput " + self.name)
plt.savefig(plot_file + "_tr.pdf")
np.savetxt(plot_file + "_tr.txt", np.c_[lamb, throughput])
plt.clf()
plt.plot(lamb, emission_ph, label="Blackbody T = {:.1f} K".format(self.temp))
plt.legend()
plt.xlabel(r"wavelength [$\mu$m]")
plt.ylabel("Emissivity " + self.name)
plt.savefig(plot_file + "_em.pdf")
np.savetxt(plot_file + "_em.txt", np.c_[lamb, emission_ph])
logging.debug("-------")
return throughput, emission_ph
class Instrument:
def __init__(self, name):
self.name = name
self.parts = []
def addPart(self, part):
self.parts.append(part)
part.set_number(len(self.parts))
def calcThroughputAndEmission(self, lamb, DIT, output_file=""):
throughput = np.ones_like(lamb)
emission = np.zeros_like(lamb)
for part in self.parts:
part_t, part_emi = part.calcThroughputAndEmission(lamb, DIT, output_file=output_file)
throughput *= part_t
emission *= part_t
emission = emission + part_emi
return throughput, emission
def sim_instrument(input_parameters, cube, back_emission, transmission, ext_lambs, cube_lamb_mask, input_spec_res, debug_plots=False, output_file=""):
''' Simulates instrument effects
Inputs:
input_parameters: input dictionary
exposure_time: Exposure time [s]
grating: Spectral grating
ao_mode: LTAO/SCAO/NOAO/AIRY/User defined PSF fits file
telescope_temp: Telescope temperature [K]
cube: Input datacube (RA, DEC, lambda)
back_emission: Input background emission
transmission: Input transmission
ext_lambs: extended lambda array [um]
cube_lamb_mask: mask array to get the lambs of the cube
input_spec_res: Spectral resolution of the input cube [micron]
debug_plots: Produce debug plots
output_file: File name for debug plots
Outputs:
cube: cube including instrument effects
back_emission: back_emission including telescope
LSF_size: width of the LSF [A]
'''
# Get instrument transmission
logging.info("Calculating HARMONI transmission and background")
harmoni = Instrument("HARMONI")
# Instrument model variables
# -------------------------
# Instrument temperatures
TTel = input_parameters["telescope_temp"]
#TCool = TTel - config_data['HARMONI_FPRS_diff_temp']
TCool = 273.15 - 10
TCryo = config_data['HARMONI_cryo_temp']
TCryoMech = TCryo - 5.
TTrap = TCool
AreaIns = (config_data['telescope']['diameter']*0.5)**2*np.pi # Full 37m2 aperture, including central obstruction -- this what we see from a thermal point of view after cold stop
AreaTel = config_data['telescope']['area'] # 37m with 11m central obscuration -- this is what we see before the cold stop
# Dust properties
dustfrac = 0.01
dustfrac = max(InstrumentPart.mindustfrac, dustfrac) # Can make outer surfaces more dusty to represent aging
# Cold trap properties
ecoldtrap = 1.
rwindow = 0.01 # 1% AR coating on each surface
# -------------------------
logging.debug("HARMONI model. TCool = {:d} K TCryo = {:d} K TCryoMech = {:d} K TTrap = {:d} K".format(*map(int, [TCool, TCryo, TCryoMech, TTrap])))
logging.debug("AreaIns = {:6.1f} m2 AreaTel = {:6.1f} m2".format(AreaIns, AreaTel))
logging.debug("edust = {:6.3f} dustfrac = {:6.3f} mindustfrac = {:6.3f}".format(InstrumentPart.edust, dustfrac, InstrumentPart.mindustfrac))
logging.debug("ecoldtrap = {:6.3f} rwindow = {:6.3f}".format(ecoldtrap, rwindow))
logging.debug("-------")
# AO dichroic if present
aoMode = input_parameters["ao_mode"]
if aoMode == "LTAO":
harmoni.addPart(InstrumentPart("LTAO dichroic", TTel, AreaIns, n_lenses=1, emis_lens="LTAO_0.6_dichroic.txt", dust_lens=2.*dustfrac))
elif aoMode in ["SCAO", "HCAO"]:
harmoni.addPart(InstrumentPart("SCAO dichroic", TTel, AreaIns, n_lenses=1, emis_lens="SCAO_0.8_dichroic.txt", dust_lens=2.*dustfrac))
if aoMode in ["LTAO", "SCAO", "HCAO"]:
harmoni.addPart(InstrumentPart("AO cold trap", TTrap, AreaIns, n_mirrors=1, emis_mirror=0., dust_mirror=0.03, emis_dust=ecoldtrap))
harmoni.addPart(InstrumentPart("Outer window", TTel-6, AreaIns, n_lenses=1, emis_scaling=0.5, dust_lens=dustfrac + InstrumentPart.mindustfrac))
harmoni.addPart(InstrumentPart("Inner window", TCool+6, AreaIns, n_lenses=1, emis_scaling=0.5, dust_lens=2.*InstrumentPart.mindustfrac))
harmoni.addPart(InstrumentPart("Window cold trap", TCool, AreaTel, n_mirrors=4, global_scaling=2.*2.0*rwindow))
harmoni.addPart(InstrumentPart("Window reflected", TTrap, AreaIns, n_mirrors=1, emis_mirror=0., dust_mirror=2.*0.8*2.0*rwindow, emis_dust=ecoldtrap))
# FPRS
harmoni.addPart(InstrumentPart("FPRS", TCool, AreaTel, n_mirrors=4))
harmoni.addPart(InstrumentPart("Cryo window", TCool, AreaTel, n_lenses=1, emis_scaling=0.4, dust_lens=InstrumentPart.mindustfrac))
harmoni.addPart(InstrumentPart("Cryo window inner dust", TCryo+50., AreaIns, n_mirrors=1, emis_mirror=0., dust_mirror=InstrumentPart.mindustfrac))
harmoni.addPart(InstrumentPart("Cryo window cold trap", TCryo+50., AreaIns, n_mirrors=1, emis_mirror=0., dust_mirror=2.0*rwindow, emis_dust=ecoldtrap))
# Cryostat
harmoni.addPart(InstrumentPart("Pre-optics+IFU+Spectrograph", TCryoMech, AreaIns, n_lenses=8, n_mirrors=19))
# Grating
grating = input_parameters["grating"]
harmoni.addPart(InstrumentPart("Grating " + grating, TCryoMech, AreaIns, n_mirrors=1, emis_mirror=grating + "_grating.txt", dust_mirror=0))
lamb_grid = np.linspace(2, 2.5, 50)
HARMONI_transmission, HARMONI_background = harmoni.calcThroughputAndEmission(ext_lambs, input_parameters["exposure_time"], output_file=output_file)
back_emission = back_emission*HARMONI_transmission
transmission = transmission*HARMONI_transmission
back_emission = back_emission + HARMONI_background
# Add instrument emission/transmission to the input cube
instrument_tr_cube = HARMONI_transmission[cube_lamb_mask]
instrument_tr_cube.shape = (np.sum(cube_lamb_mask), 1, 1)
cube *= instrument_tr_cube
instrument_background_cube = HARMONI_background[cube_lamb_mask]
instrument_background_cube.shape = (np.sum(cube_lamb_mask), 1, 1)
cube += instrument_background_cube
# - LSF
logging.info("Convolve with LSF")
# Assume Gaussian LSF
bandws = config_data['gratings'][grating]
new_res = (bandws.lmin + bandws.lmax)/(2.*bandws.R) # micron
pix_size = (ext_lambs[1] - ext_lambs[0])
if new_res > input_spec_res:
new_res_pix = (new_res**2 - input_spec_res**2)**0.5/pix_size
else:
logging.warning("The output spectral resolution is higher than the input cube resolution. Assuming input resolution = 0 AA")
new_res_pix = new_res/pix_size
logging.info("Output resolution: {:.3f} AA".format(new_res*10000.))
logging.info("Input resolution: {:.3f} AA".format(input_spec_res*10000.))
logging.info("Effective LSF FWHM = {:.3f} AA".format(new_res_pix*pix_size*10000.))
LSF_size = 0
if new_res_pix > 1.: # avoid convolution with a kernel narrower than 1 pixel
sigma_LSF_pix = new_res_pix/2.35482
npix_LSF = int(sigma_LSF_pix*config_data['LSF_kernel_size'])
# Ensure that the kernel has an odd number of channels
if npix_LSF % 2 == 0:
npix_LSF = npix_LSF + 1
kernel_LSF = Gaussian1DKernel(stddev=sigma_LSF_pix, x_size=npix_LSF)
z, y, x = cube.shape
for py in range(y):
for px in range(x):
spectrum = np.copy(back_emission)
spectrum[cube_lamb_mask] = cube[:, py, px]
cube[:, py, px] = np.convolve(spectrum, kernel_LSF, mode="same")[cube_lamb_mask]
back_emission = np.convolve(back_emission, kernel_LSF, mode="same")
transmission = np.convolve(transmission, kernel_LSF, mode="same")
LSF_size = npix_LSF*(ext_lambs[1] - ext_lambs[0])*10000. # AA
logging.info("Range for the LSF convolution: {:.3f} AA".format(LSF_size))
else:
logging.warning("LSF convolution not performed because the effective LSF FWHM is < 1 pix")
# Apply high-constrast focal plane mask
if aoMode == "HCAO":
logging.info("Apply HC focal plane mask " + input_parameters["hc_fp_mask"])
fpm = fits.getdata(os.path.join(hc_path, input_parameters["hc_fp_mask"] + ".fits.gz"), 0, memmap=True) # 0.39 mas sampling
fpm_sampling = 0.39 # mas
y, x = fpm.shape
mask_xsize = x*fpm_sampling
mask_ysize = y*fpm_sampling
spax = input_parameters["spaxel_scale"]
pix_size = config_data["spaxel_scale"][spax].psfscale
cube_xsize = cube.shape[2]*pix_size
cube_ysize = cube.shape[1]*pix_size
xgrid_in = np.linspace(-abs(mask_xsize)*0.5, abs(mask_xsize)*0.5, x)
ygrid_in = np.linspace(-abs(mask_ysize)*0.5, abs(mask_ysize)*0.5, y)
xgrid_out = np.arange(-abs(cube_xsize)*0.5, abs(cube_xsize)*0.5, abs(pix_size))
ygrid_out = np.arange(-abs(cube_ysize)*0.5, abs(cube_ysize)*0.5, abs(pix_size))
fpm_interp = interp2d(xgrid_in, ygrid_in, fpm, kind='linear')
fpm_final = fpm_interp(xgrid_out, ygrid_out)
for i in range(cube.shape[0]):
cube[i,:,:] *= fpm_final
else:
fpm_final = None
return (cube, back_emission, transmission, fpm_final), LSF_size
| [
"numpy.zeros_like",
"logging.debug",
"numpy.ones_like",
"numpy.sum",
"numpy.copy",
"logging.warning",
"numpy.median",
"numpy.convolve",
"numpy.savetxt",
"logging.info",
"scipy.interpolate.interp2d",
"numpy.linspace",
"astropy.convolution.Gaussian1DKernel",
"scipy.interpolate.interp1d",
"... | [((371, 433), 'src.modules.misc_utils.path_setup', 'path_setup', (["('../../' + config_data['data_dir'] + 'throughput/')"], {}), "('../../' + config_data['data_dir'] + 'throughput/')\n", (381, 433), False, 'from src.modules.misc_utils import path_setup\n'), ((444, 498), 'src.modules.misc_utils.path_setup', 'path_setup', (["('../../' + config_data['data_dir'] + 'HC/')"], {}), "('../../' + config_data['data_dir'] + 'HC/')\n", (454, 498), False, 'from src.modules.misc_utils import path_setup\n'), ((5865, 5928), 'logging.info', 'logging.info', (['"""Calculating HARMONI transmission and background"""'], {}), "('Calculating HARMONI transmission and background')\n", (5877, 5928), False, 'import logging\n'), ((7287, 7311), 'logging.debug', 'logging.debug', (['"""-------"""'], {}), "('-------')\n", (7300, 7311), False, 'import logging\n'), ((9277, 9300), 'numpy.linspace', 'np.linspace', (['(2)', '(2.5)', '(50)'], {}), '(2, 2.5, 50)\n', (9288, 9300), True, 'import numpy as np\n'), ((9995, 10028), 'logging.info', 'logging.info', (['"""Convolve with LSF"""'], {}), "('Convolve with LSF')\n", (10007, 10028), False, 'import logging\n'), ((2129, 2196), 'scipy.interpolate.interp1d', 'interp1d', (['l', 'emi'], {'kind': '"""linear"""', 'bounds_error': '(False)', 'fill_value': '(0.0)'}), "(l, emi, kind='linear', bounds_error=False, fill_value=0.0)\n", (2137, 2196), False, 'from scipy.interpolate import interp1d, interp2d\n'), ((3960, 4018), 'numpy.savetxt', 'np.savetxt', (["(plot_file + '_tr.txt')", 'np.c_[lamb, throughput]'], {}), "(plot_file + '_tr.txt', np.c_[lamb, throughput])\n", (3970, 4018), True, 'import numpy as np\n'), ((4252, 4311), 'numpy.savetxt', 'np.savetxt', (["(plot_file + '_em.txt')", 'np.c_[lamb, emission_ph]'], {}), "(plot_file + '_em.txt', np.c_[lamb, emission_ph])\n", (4262, 4311), True, 'import numpy as np\n'), ((4314, 4338), 'logging.debug', 'logging.debug', (['"""-------"""'], {}), "('-------')\n", (4327, 4338), False, 'import logging\n'), ((4633, 4651), 'numpy.ones_like', 'np.ones_like', (['lamb'], {}), '(lamb)\n', (4645, 4651), True, 'import numpy as np\n'), ((4665, 4684), 'numpy.zeros_like', 'np.zeros_like', (['lamb'], {}), '(lamb)\n', (4678, 4684), True, 'import numpy as np\n'), ((9756, 9778), 'numpy.sum', 'np.sum', (['cube_lamb_mask'], {}), '(cube_lamb_mask)\n', (9762, 9778), True, 'import numpy as np\n'), ((9917, 9939), 'numpy.sum', 'np.sum', (['cube_lamb_mask'], {}), '(cube_lamb_mask)\n', (9923, 9939), True, 'import numpy as np\n'), ((10301, 10435), 'logging.warning', 'logging.warning', (['"""The output spectral resolution is higher than the input cube resolution. Assuming input resolution = 0 AA"""'], {}), "(\n 'The output spectral resolution is higher than the input cube resolution. Assuming input resolution = 0 AA'\n )\n", (10316, 10435), False, 'import logging\n'), ((11014, 11069), 'astropy.convolution.Gaussian1DKernel', 'Gaussian1DKernel', ([], {'stddev': 'sigma_LSF_pix', 'x_size': 'npix_LSF'}), '(stddev=sigma_LSF_pix, x_size=npix_LSF)\n', (11030, 11069), False, 'from astropy.convolution import Gaussian1DKernel\n'), ((11340, 11391), 'numpy.convolve', 'np.convolve', (['back_emission', 'kernel_LSF'], {'mode': '"""same"""'}), "(back_emission, kernel_LSF, mode='same')\n", (11351, 11391), True, 'import numpy as np\n'), ((11409, 11459), 'numpy.convolve', 'np.convolve', (['transmission', 'kernel_LSF'], {'mode': '"""same"""'}), "(transmission, kernel_LSF, mode='same')\n", (11420, 11459), True, 'import numpy as np\n'), ((11612, 11707), 'logging.warning', 'logging.warning', (['"""LSF convolution not performed because the effective LSF FWHM is < 1 pix"""'], {}), "(\n 'LSF convolution not performed because the effective LSF FWHM is < 1 pix')\n", (11627, 11707), False, 'import logging\n'), ((11770, 11845), 'logging.info', 'logging.info', (["('Apply HC focal plane mask ' + input_parameters['hc_fp_mask'])"], {}), "('Apply HC focal plane mask ' + input_parameters['hc_fp_mask'])\n", (11782, 11845), False, 'import logging\n'), ((12582, 12630), 'scipy.interpolate.interp2d', 'interp2d', (['xgrid_in', 'ygrid_in', 'fpm'], {'kind': '"""linear"""'}), "(xgrid_in, ygrid_in, fpm, kind='linear')\n", (12590, 12630), False, 'from scipy.interpolate import interp1d, interp2d\n'), ((1751, 1770), 'numpy.zeros_like', 'np.zeros_like', (['lamb'], {}), '(lamb)\n', (1764, 1770), True, 'import numpy as np\n'), ((3595, 3610), 'numpy.median', 'np.median', (['lamb'], {}), '(lamb)\n', (3604, 3610), True, 'import numpy as np\n'), ((3612, 3633), 'numpy.median', 'np.median', (['emissivity'], {}), '(emissivity)\n', (3621, 3633), True, 'import numpy as np\n'), ((3635, 3656), 'numpy.median', 'np.median', (['throughput'], {}), '(throughput)\n', (3644, 3656), True, 'import numpy as np\n'), ((3658, 3680), 'numpy.median', 'np.median', (['emission_ph'], {}), '(emission_ph)\n', (3667, 3680), True, 'import numpy as np\n'), ((11156, 11178), 'numpy.copy', 'np.copy', (['back_emission'], {}), '(back_emission)\n', (11163, 11178), True, 'import numpy as np\n'), ((11253, 11299), 'numpy.convolve', 'np.convolve', (['spectrum', 'kernel_LSF'], {'mode': '"""same"""'}), "(spectrum, kernel_LSF, mode='same')\n", (11264, 11299), True, 'import numpy as np\n')] |
''' figures.py
=========================
AIM: Provide several specific functions to save beautiful figures
INPUT: function depend
OUTPUT: function depend
CMD: To include: import resources.figures as figures
ISSUES: <none known>
REQUIRES: standard python libraries, specific libraries in resources/
REMARKS: in general fancy means latex interpreter (font is serif, Palatino) and generates *.eps and *.pdf
'''
######################################################################
import numpy as np
def savefig(fname,fig,fancy=False):
import os
import subprocess
import parameters as param
fig.savefig(fname+'.png',dpi=param.dpi)
if fancy:
fig.savefig(fname+'.eps',dpi=param.dpi,transparent=True)
os.system("epstopdf "+fname+".eps")
command = 'pdfcrop %s.pdf' % fname
subprocess.check_output(command, shell=True)
os.system('mv '+fname+'-crop.pdf '+fname+'.pdf')
def set_fancy():
from matplotlib import rc
rc('font',**{'family':'serif','serif':['Palatino'],'size':16})
rc('text', usetex=True)
def cd(xx, year=2018, day=1, month=1):
''' converts day in 2018 to usual date '''
import datetime
dd = datetime.date.today()
dd = dd.replace(year=year, month=month, day=day)
first_ordinal = dd.toordinal()
return datetime.date.fromordinal(first_ordinal+int(round(xx)))
convert_date = np.vectorize(cd)
def format_log10(value):
return r'$10^{%d}$' % np.log10(value)
def format_mag(value):
return r'$%d$' % value
def format_degree(value):
return r'$%d^\circ$' % value
def format_second(xx):
import time
return time.strftime('%d %b %H:%M', xx)
def format_day(xx):
import time
return time.strftime('%d %b', xx)
| [
"matplotlib.rc",
"numpy.vectorize",
"subprocess.check_output",
"datetime.date.today",
"os.system",
"time.strftime",
"numpy.log10"
] | [((1313, 1329), 'numpy.vectorize', 'np.vectorize', (['cd'], {}), '(cd)\n', (1325, 1329), True, 'import numpy as np\n'), ((935, 1003), 'matplotlib.rc', 'rc', (['"""font"""'], {}), "('font', **{'family': 'serif', 'serif': ['Palatino'], 'size': 16})\n", (937, 1003), False, 'from matplotlib import rc\n'), ((999, 1022), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (1001, 1022), False, 'from matplotlib import rc\n'), ((1130, 1151), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1149, 1151), False, 'import datetime\n'), ((1545, 1577), 'time.strftime', 'time.strftime', (['"""%d %b %H:%M"""', 'xx'], {}), "('%d %b %H:%M', xx)\n", (1558, 1577), False, 'import time\n'), ((1620, 1646), 'time.strftime', 'time.strftime', (['"""%d %b"""', 'xx'], {}), "('%d %b', xx)\n", (1633, 1646), False, 'import time\n'), ((716, 755), 'os.system', 'os.system', (["('epstopdf ' + fname + '.eps')"], {}), "('epstopdf ' + fname + '.eps')\n", (725, 755), False, 'import os\n'), ((791, 835), 'subprocess.check_output', 'subprocess.check_output', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (814, 835), False, 'import subprocess\n'), ((838, 894), 'os.system', 'os.system', (["('mv ' + fname + '-crop.pdf ' + fname + '.pdf')"], {}), "('mv ' + fname + '-crop.pdf ' + fname + '.pdf')\n", (847, 894), False, 'import os\n'), ((1379, 1394), 'numpy.log10', 'np.log10', (['value'], {}), '(value)\n', (1387, 1394), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import tools as tls
X = np.loadtxt('X14_SIM.txt', delimiter=',')
Y = np.loadtxt('Y14_SIM.txt', delimiter=',')
Z = np.loadtxt('Z14_SIM.txt', delimiter=',')
dx_vector = np.zeros( (1,) )
dz_vector = np.zeros( (1,) )
radius_vector = np.zeros( (1,) )
std_radius_vector = np.zeros( (1,) )
maxXYZ, amax = tls.maxpointsXYZ(X, Y, Z)
damagex = 0.05 * ( X[0,-1] - X[0,0] )
damagez = 0.01 * ( np.max(Z[0,:]) - np.min(Z[0,:]) )
yArea1 = 0.4 * Y[:,-1]
yArea2 = 0.6 * Y[:,-1]
dx = X[0,1] - X[0,0]
dy = Y[1,0] - Y[0,0]
dz = Z[0,1] - Z[0,0]
print('X \n', X[0:4, 0:4])
print('Y \n', Y[0:3, 0:3])
print('Z \n', Z[0:3, 0:3])
print(dx, dy, dz)
for j in range(0, len(dx_vector)):
print('Test')
'''
fig = plt.figure()
ax = fig.gca(projection='3d')
surface = ax.plot_surface(X, Y, Z, rstride=3, cstride=3, cmap='Greys', linewidth=0.25, vmin = -1.5, vmax = 1)
ax.plot(maxXYZ[0,:],maxXYZ[1,:],maxXYZ[2,:], 'ko')
plt.axis('off')
ax.view_init(-30, 80)
ax.margins(x=0, y=0)
plt.show()
''' | [
"numpy.zeros",
"numpy.max",
"numpy.min",
"numpy.loadtxt",
"tools.maxpointsXYZ"
] | [((117, 157), 'numpy.loadtxt', 'np.loadtxt', (['"""X14_SIM.txt"""'], {'delimiter': '""","""'}), "('X14_SIM.txt', delimiter=',')\n", (127, 157), True, 'import numpy as np\n'), ((162, 202), 'numpy.loadtxt', 'np.loadtxt', (['"""Y14_SIM.txt"""'], {'delimiter': '""","""'}), "('Y14_SIM.txt', delimiter=',')\n", (172, 202), True, 'import numpy as np\n'), ((207, 247), 'numpy.loadtxt', 'np.loadtxt', (['"""Z14_SIM.txt"""'], {'delimiter': '""","""'}), "('Z14_SIM.txt', delimiter=',')\n", (217, 247), True, 'import numpy as np\n'), ((262, 276), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (270, 276), True, 'import numpy as np\n'), ((291, 305), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (299, 305), True, 'import numpy as np\n'), ((325, 339), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (333, 339), True, 'import numpy as np\n'), ((362, 376), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (370, 376), True, 'import numpy as np\n'), ((395, 420), 'tools.maxpointsXYZ', 'tls.maxpointsXYZ', (['X', 'Y', 'Z'], {}), '(X, Y, Z)\n', (411, 420), True, 'import tools as tls\n'), ((478, 493), 'numpy.max', 'np.max', (['Z[0, :]'], {}), '(Z[0, :])\n', (484, 493), True, 'import numpy as np\n'), ((495, 510), 'numpy.min', 'np.min', (['Z[0, :]'], {}), '(Z[0, :])\n', (501, 510), True, 'import numpy as np\n')] |
import numpy as np
class Detection(object):
def __init__(self):
self.img = None
self.bbox = None
self.XZ = None
self.fvec = np.array([1, 2, 3, 4], np.float64) # init in case not needed
self.frame_id = None
self.num_misses = 0
self.has_match = False
self.num_matches = 0
self.matches = []
# added by sh
self.score = 0
self.det_class = None
self.area = 0
self.is_valid = False
self.dist_est_y2 = 0.0
self.intersecting_segs = []
self.distance_estimates_by_segs = []
def as_numpy_array(self):
return np.array([np.hstack((self.frame_id, self.bbox, self.det_class, self.score, self.fvec))])
@staticmethod
def det_from_numpy_array(np_array):
d = Detection()
d.frame_id = np_array[0]
d.bbox = np_array[1:5]
d.fvec = np_array[5:]
return d
| [
"numpy.array",
"numpy.hstack"
] | [((162, 196), 'numpy.array', 'np.array', (['[1, 2, 3, 4]', 'np.float64'], {}), '([1, 2, 3, 4], np.float64)\n', (170, 196), True, 'import numpy as np\n'), ((663, 739), 'numpy.hstack', 'np.hstack', (['(self.frame_id, self.bbox, self.det_class, self.score, self.fvec)'], {}), '((self.frame_id, self.bbox, self.det_class, self.score, self.fvec))\n', (672, 739), True, 'import numpy as np\n')] |
import pytest
import fenicsmechanics as fm
problem_classes = ("MechanicsProblem",
"SolidMechanicsProblem",
"FluidMechanicsProblem")
_EXPRESSIONS = {
'body_force': ["np.log(1.0 + t)", "np.exp(t)", "1.0 - t"],
'displacement': ["1.0 + 2.0*t", "3.0*t", "1.0"],
'velocity': ["np.tanh(t)", "np.exp(-t)*np.cos(2.0*np.pi*t)", "0.5"],
'values': ["t", "t*t", "10.0*np.cos(t)"],
'pressure': ["np.cos(9.0*t)"]
}
@pytest.mark.parametrize("class_name, field_name",
(("MechanicsProblem", "formulation/body_force"),
("MechanicsProblem", "formulation/bcs/dirichlet/displacement"),
("MechanicsProblem", "formulation/bcs/dirichlet/velocity"),
("MechanicsProblem", "formulation/bcs/dirichlet/pressure"),
("MechanicsProblem", "formulation/bcs/neumann/values"),
("SolidMechanicsProblem", "formulation/body_force"),
("SolidMechanicsProblem", "formulation/bcs/dirichlet/displacement"),
("SolidMechanicsProblem", "formulation/bcs/dirichlet/pressure"),
("SolidMechanicsProblem", "formulation/bcs/neumann/values"),
("FluidMechanicsProblem", "formulation/body_force"),
("FluidMechanicsProblem", "formulation/bcs/dirichlet/velocity"),
("FluidMechanicsProblem", "formulation/bcs/dirichlet/pressure"),
("FluidMechanicsProblem", "formulation/bcs/neumann/values")))
def test_single_time_update_tmp(default_config, class_name, field_name):
import numpy as np
config = default_config(class_name, unsteady=True)
if "body_force" in field_name:
config['formulation']['body_force'] = None
if "pressure" in field_name:
config['formulation']['bcs']['dirichlet']['pressure'] = None
# config['formulation']['bcs']['dirichlet']['p_regions'] = [2]
t, tf = config['formulation']['time']['interval']
dt = config['formulation']['time']['dt']
subconfig, last_key = _get_subdict(field_name, config, ret_last_key=True)
fm_expr = _get_expressions(_EXPRESSIONS[last_key])
_update_subconfig(last_key, subconfig, fm_expr, t)
problem_class = getattr(fm, class_name)
problem = problem_class(config)
subconfig, last_key = _get_subdict(field_name, problem.config, ret_last_key=True)
tspan = np.arange(t, tf + dt/10.0, dt)
expected_values = _get_expected_values(tspan, *_EXPRESSIONS[last_key])
actual_values = np.zeros(expected_values.shape)
for i, t in enumerate(tspan):
problem.update_time(t)
if last_key == "body_force":
_eval_expr(subconfig[last_key], actual_values[i, :], np.zeros(3))
else:
# Check for 2018.1.0 compatibility
_eval_expr(subconfig[last_key][0], actual_values[i, :], np.zeros(3))
assert np.all(expected_values == actual_values)
return None
@pytest.mark.parametrize("class_name", problem_classes)
def test_all_time_updates_tmp(default_config, class_name):
import numpy as np
config = default_config(class_name, unsteady=True)
config['formulation']['body_force'] = None
t, tf = config['formulation']['time']['interval']
dt = config['formulation']['time']['dt']
tspan = np.arange(t, tf + dt/10.0, dt)
all_expr = list()
all_keys = ("formulation/body_force",)
if class_name in ["MechanicsProblem", "SolidMechanicsProblem"]:
all_keys += ("formulation/bcs/dirichlet/displacement",)
if class_name in ["MechanicsProblem", "FluidMechanicsProblem"]:
all_keys += ("formulation/bcs/dirichlet/velocity",)
all_keys += ("formulation/bcs/neumann/values",)
for key in all_keys:
subconfig, last_key = _get_subdict(key, config, ret_last_key=True)
expr = _EXPRESSIONS[last_key]
all_expr.extend(expr)
fm_expr = _get_expressions(expr)
_update_subconfig(last_key, subconfig, fm_expr, t)
problem_class = getattr(fm, class_name)
problem = problem_class(config)
all_expected = _get_expected_values(tspan, *all_expr)
all_actual = np.zeros(all_expected.shape)
for i, t in enumerate(tspan):
problem.update_time(t)
for key in all_keys:
subconfig, last_key = _get_subdict(key, problem.config,
ret_last_key=True)
if last_key == "body_force":
_eval_expr(subconfig[last_key], all_actual[i, 0:3], np.zeros(3))
elif last_key == "displacement":
_eval_expr(subconfig[last_key][0], all_actual[i, 3:6], np.zeros(3))
elif last_key == "velocity":
if all_expected.shape[1] == 9:
_eval_expr(subconfig[last_key][0], all_actual[i, 3:6], np.zeros(3))
else:
_eval_expr(subconfig[last_key][0], all_actual[i, 6:9], np.zeros(3))
else:
_eval_expr(subconfig[last_key][0], all_actual[i, -3:], np.zeros(3))
assert np.all(all_actual == all_expected)
return None
def _eval_expr(expr, vals, x):
"""
This function tries calling expr.eval. Calls expr.cpp_object().eval
if the first fails. This is for FEniCS 2018.1.0 compatibility.
"""
try:
expr.eval(vals, x)
except AttributeError:
expr.cpp_object().eval(vals, x)
return None
def _update_subconfig(last_key, subconfig, fm_expr, t):
import dolfin as dlf
if last_key == "body_force":
fm_expr = dlf.Expression(fm_expr, degree=1, t=t)
elif last_key == "displacement":
subconfig['regions'] = [1]
elif last_key == "velocity":
subconfig['regions'] = [1]
elif last_key == "pressure":
subconfig['p_regions'] = [2]
elif last_key == "values":
subconfig['types'] = ['cauchy']
subconfig['regions'] = [2]
if (last_key == "body_force") or (last_key == "pressure"):
subconfig[last_key] = fm_expr
else:
subconfig[last_key] = [fm_expr]
return None
def _get_expected_values(t, *expr_list):
import numpy as np
expected_values = list()
for expr in expr_list:
eval_expr = eval(expr)
if isinstance(eval_expr, float):
eval_expr = eval_expr*np.ones(t.shape)
expected_values.append(eval_expr)
return np.array(expected_values).T
def _get_expressions(expr_list):
import re
expr_fm_list = list()
for expr in expr_list:
sub_str = re.sub("np.", "std::", expr)
sub_str = re.sub("std::pi", "pi", sub_str)
expr_fm_list.append(sub_str)
return expr_fm_list
def _get_subdict(key, my_dict, ret_last_key=False):
subdict = my_dict
key_list = key.split("/")
keys_used = list()
for sub in key_list:
old_subdict = subdict
subdict = subdict[sub]
if not isinstance(subdict, dict):
subdict = old_subdict
break
keys_used.append(sub)
if keys_used != key_list:
print("Returning '%s' since '%s' is not a dictionary." \
% ("/".join(keys_used), key))
if len(set(key_list).difference(keys_used)) > 1:
msg = "There are multiple keys left, but the last object is " \
+ "not a dictionary."
raise KeyError(msg)
if ret_last_key:
ret = subdict, key_list[-1]
else:
ret = subdict
return ret
if __name__ == "__main__":
import sys
sys.path.append("../")
from conftest import _default_config
# Trying specific values
_ = test_single_time_update_tmp(_default_config, "MechanicsProblem",
"formulation/body_force")
_ = test_single_time_update_tmp(_default_config, "MechanicsProblem",
"formulation/bcs/dirichlet/displacement")
_ = test_single_time_update_tmp(_default_config, "SolidMechanicsProblem",
"formulation/bcs/dirichlet/pressure")
_ = test_single_time_update_tmp(_default_config, "MechanicsProblem",
"formulation/bcs/neumann/values")
_ = test_single_time_update_tmp(_default_config, "FluidMechanicsProblem",
"formulation/body_force")
_ = test_all_time_updates_tmp(_default_config, "SolidMechanicsProblem")
| [
"sys.path.append",
"numpy.zeros",
"numpy.ones",
"dolfin.Expression",
"numpy.arange",
"numpy.array",
"pytest.mark.parametrize",
"re.sub",
"numpy.all"
] | [((464, 1347), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""class_name, field_name"""', "(('MechanicsProblem', 'formulation/body_force'), ('MechanicsProblem',\n 'formulation/bcs/dirichlet/displacement'), ('MechanicsProblem',\n 'formulation/bcs/dirichlet/velocity'), ('MechanicsProblem',\n 'formulation/bcs/dirichlet/pressure'), ('MechanicsProblem',\n 'formulation/bcs/neumann/values'), ('SolidMechanicsProblem',\n 'formulation/body_force'), ('SolidMechanicsProblem',\n 'formulation/bcs/dirichlet/displacement'), ('SolidMechanicsProblem',\n 'formulation/bcs/dirichlet/pressure'), ('SolidMechanicsProblem',\n 'formulation/bcs/neumann/values'), ('FluidMechanicsProblem',\n 'formulation/body_force'), ('FluidMechanicsProblem',\n 'formulation/bcs/dirichlet/velocity'), ('FluidMechanicsProblem',\n 'formulation/bcs/dirichlet/pressure'), ('FluidMechanicsProblem',\n 'formulation/bcs/neumann/values'))"], {}), "('class_name, field_name', (('MechanicsProblem',\n 'formulation/body_force'), ('MechanicsProblem',\n 'formulation/bcs/dirichlet/displacement'), ('MechanicsProblem',\n 'formulation/bcs/dirichlet/velocity'), ('MechanicsProblem',\n 'formulation/bcs/dirichlet/pressure'), ('MechanicsProblem',\n 'formulation/bcs/neumann/values'), ('SolidMechanicsProblem',\n 'formulation/body_force'), ('SolidMechanicsProblem',\n 'formulation/bcs/dirichlet/displacement'), ('SolidMechanicsProblem',\n 'formulation/bcs/dirichlet/pressure'), ('SolidMechanicsProblem',\n 'formulation/bcs/neumann/values'), ('FluidMechanicsProblem',\n 'formulation/body_force'), ('FluidMechanicsProblem',\n 'formulation/bcs/dirichlet/velocity'), ('FluidMechanicsProblem',\n 'formulation/bcs/dirichlet/pressure'), ('FluidMechanicsProblem',\n 'formulation/bcs/neumann/values')))\n", (487, 1347), False, 'import pytest\n'), ((3066, 3120), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""class_name"""', 'problem_classes'], {}), "('class_name', problem_classes)\n", (3089, 3120), False, 'import pytest\n'), ((2512, 2544), 'numpy.arange', 'np.arange', (['t', '(tf + dt / 10.0)', 'dt'], {}), '(t, tf + dt / 10.0, dt)\n', (2521, 2544), True, 'import numpy as np\n'), ((2639, 2670), 'numpy.zeros', 'np.zeros', (['expected_values.shape'], {}), '(expected_values.shape)\n', (2647, 2670), True, 'import numpy as np\n'), ((3005, 3045), 'numpy.all', 'np.all', (['(expected_values == actual_values)'], {}), '(expected_values == actual_values)\n', (3011, 3045), True, 'import numpy as np\n'), ((3416, 3448), 'numpy.arange', 'np.arange', (['t', '(tf + dt / 10.0)', 'dt'], {}), '(t, tf + dt / 10.0, dt)\n', (3425, 3448), True, 'import numpy as np\n'), ((4251, 4279), 'numpy.zeros', 'np.zeros', (['all_expected.shape'], {}), '(all_expected.shape)\n', (4259, 4279), True, 'import numpy as np\n'), ((5159, 5193), 'numpy.all', 'np.all', (['(all_actual == all_expected)'], {}), '(all_actual == all_expected)\n', (5165, 5193), True, 'import numpy as np\n'), ((7586, 7608), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (7601, 7608), False, 'import sys\n'), ((5652, 5690), 'dolfin.Expression', 'dlf.Expression', (['fm_expr'], {'degree': '(1)', 't': 't'}), '(fm_expr, degree=1, t=t)\n', (5666, 5690), True, 'import dolfin as dlf\n'), ((6474, 6499), 'numpy.array', 'np.array', (['expected_values'], {}), '(expected_values)\n', (6482, 6499), True, 'import numpy as np\n'), ((6622, 6650), 're.sub', 're.sub', (['"""np."""', '"""std::"""', 'expr'], {}), "('np.', 'std::', expr)\n", (6628, 6650), False, 'import re\n'), ((6669, 6701), 're.sub', 're.sub', (['"""std::pi"""', '"""pi"""', 'sub_str'], {}), "('std::pi', 'pi', sub_str)\n", (6675, 6701), False, 'import re\n'), ((2838, 2849), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2846, 2849), True, 'import numpy as np\n'), ((2980, 2991), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2988, 2991), True, 'import numpy as np\n'), ((6404, 6420), 'numpy.ones', 'np.ones', (['t.shape'], {}), '(t.shape)\n', (6411, 6420), True, 'import numpy as np\n'), ((4617, 4628), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4625, 4628), True, 'import numpy as np\n'), ((4746, 4757), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4754, 4757), True, 'import numpy as np\n'), ((5134, 5145), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (5142, 5145), True, 'import numpy as np\n'), ((4922, 4933), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4930, 4933), True, 'import numpy as np\n'), ((5032, 5043), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (5040, 5043), True, 'import numpy as np\n')] |
"""
Classes and functions for element-level operations
"""
import numpy as np
import h5py
import astropy.units as u
import plasmapy
import fiasco
__all__ = ['Element']
class Element(fiasco.IonCollection):
"""
Object containing all ions for a particular element.
The Element object provides a way to logically group together ions of the same
element. This provides easy ways to compute element-level derived quantities such
as the population fractions.
Parameters
----------
element_name : `str`, `int`
Symbol, atomic number, or full name of the element
temperature : `~astropy.units.Quantity`
hdf5_path : `str`, optional
Path to HDF5 database; defaults to that listed in `~fiasco.defaults`
Other Parameters
----------------
ion_kwargs : `dict`
Possible keyword arguments for individual ions
Examples
--------
"""
@u.quantity_input
def __init__(self, element_name, temperature: u.K, hdf5_path=None, **kwargs):
self.temperature = temperature
if type(element_name) is str:
element_name = element_name.capitalize()
self.atomic_symbol = plasmapy.atomic.atomic_symbol(element_name)
self.atomic_number = plasmapy.atomic.atomic_number(element_name)
self.element_name = plasmapy.atomic.element_name(element_name)
if hdf5_path is None:
self.hdf5_dbase_root = fiasco.defaults['hdf5_dbase_root']
else:
self.hdf5_dbase_root = hdf5_path
ion_kwargs = kwargs.get('ion_kwargs', {})
ion_kwargs['hdf5_path'] = self.hdf5_dbase_root
chianti_ions = fiasco.DataIndexer(self.hdf5_dbase_root, self.atomic_symbol.lower()).fields
ion_list = []
for i in range(self.atomic_number + 1):
ion = f'{self.atomic_symbol.lower()}_{i+1}'
if ion in chianti_ions:
ion_list.append(fiasco.Ion(f'{self.atomic_symbol} {i+1}',
temperature, **ion_kwargs))
super().__init__(*ion_list)
@property
def abundance(self):
return self[0].abundance
def _rate_matrix(self):
rate_matrix = np.zeros(self.temperature.shape+(self.atomic_number+1, self.atomic_number+1))
rate_unit = self[0].ionization_rate().unit
rate_matrix = rate_matrix * rate_unit
for i in range(1, self.atomic_number):
rate_matrix[:, i, i] = -(self[i].ionization_rate() + self[i].recombination_rate())
rate_matrix[:, i, i-1] = self[i-1].ionization_rate()
rate_matrix[:, i, i+1] = self[i+1].recombination_rate()
rate_matrix[:, 0, 0] = -(self[0].ionization_rate() + self[0].recombination_rate())
rate_matrix[:, 0, 1] = self[1].recombination_rate()
rate_matrix[:, -1, -1] = -(self[-1].ionization_rate() + self[-1].recombination_rate())
rate_matrix[:, -1, -2] = self[-2].ionization_rate()
return rate_matrix
def equilibrium_ionization(self, **kwargs):
"""
Calculate the ionization equilibrium for all ions of the element.
Calculate the population fractions for every ion of this element as a function of
temperature, assuming ionization equilibrium.
"""
rate_matrix = kwargs.get('rate_matrix', None)
if rate_matrix is None:
rate_matrix = self._rate_matrix()
# Solve system of equations using singular value decomposition
_, _, V = np.linalg.svd(rate_matrix.value)
# Select columns of V with smallest eigenvalues (returned in descending order)
ioneq = np.fabs(V[:, -1, :])
ioneq /= np.sum(ioneq, axis=1)[:, np.newaxis]
return u.Quantity(ioneq)
def __getitem__(self, value):
if type(value) is str:
el, ion = value.split()
if '+' in ion:
value = int(ion.strip('+'))
else:
value = int(ion) - 1
return super().__getitem__(value)
def __repr__(self):
ion_list = '\n'.join([i.ion_name for i in self._ion_list])
return f"""Element
-------
{self.atomic_symbol} ({self.atomic_number}) -- {self.element_name}
Available Ions
--------------
{ion_list}"""
| [
"astropy.units.Quantity",
"plasmapy.atomic.element_name",
"numpy.sum",
"fiasco.Ion",
"plasmapy.atomic.atomic_symbol",
"plasmapy.atomic.atomic_number",
"numpy.zeros",
"numpy.linalg.svd",
"numpy.fabs"
] | [((1174, 1217), 'plasmapy.atomic.atomic_symbol', 'plasmapy.atomic.atomic_symbol', (['element_name'], {}), '(element_name)\n', (1203, 1217), False, 'import plasmapy\n'), ((1247, 1290), 'plasmapy.atomic.atomic_number', 'plasmapy.atomic.atomic_number', (['element_name'], {}), '(element_name)\n', (1276, 1290), False, 'import plasmapy\n'), ((1319, 1361), 'plasmapy.atomic.element_name', 'plasmapy.atomic.element_name', (['element_name'], {}), '(element_name)\n', (1347, 1361), False, 'import plasmapy\n'), ((2182, 2270), 'numpy.zeros', 'np.zeros', (['(self.temperature.shape + (self.atomic_number + 1, self.atomic_number + 1))'], {}), '(self.temperature.shape + (self.atomic_number + 1, self.\n atomic_number + 1))\n', (2190, 2270), True, 'import numpy as np\n'), ((3479, 3511), 'numpy.linalg.svd', 'np.linalg.svd', (['rate_matrix.value'], {}), '(rate_matrix.value)\n', (3492, 3511), True, 'import numpy as np\n'), ((3615, 3635), 'numpy.fabs', 'np.fabs', (['V[:, -1, :]'], {}), '(V[:, -1, :])\n', (3622, 3635), True, 'import numpy as np\n'), ((3706, 3723), 'astropy.units.Quantity', 'u.Quantity', (['ioneq'], {}), '(ioneq)\n', (3716, 3723), True, 'import astropy.units as u\n'), ((3653, 3674), 'numpy.sum', 'np.sum', (['ioneq'], {'axis': '(1)'}), '(ioneq, axis=1)\n', (3659, 3674), True, 'import numpy as np\n'), ((1920, 1990), 'fiasco.Ion', 'fiasco.Ion', (['f"""{self.atomic_symbol} {i + 1}"""', 'temperature'], {}), "(f'{self.atomic_symbol} {i + 1}', temperature, **ion_kwargs)\n", (1930, 1990), False, 'import fiasco\n')] |
#! /usr/bin/env python3
"""
monte-carlo-liquid.py
This script runs the Monte Carlo uncertainty analysis for
a liquid fuel in the University of Connecticut RCM. This script is
associated with the work "On the Uncertainty of Temperature Estimation
in a Rapid Compression Machine" by <NAME>, <NAME>, and
<NAME>, Combustion and Flame, DOI:
10.1016/j.combustflame.2015.03.001. This script is
licensed according to the LICENSE file available in the repository
associated in the paper.
The most recent version of this code is available on GitHub
at https://github.com/bryanwweber/rcm-temperature-uncertainty
Please email <EMAIL> with any questions.
"""
# System library imports
import sys
from multiprocessing import Pool
from itertools import repeat as rp
if sys.version_info[:2] < (3, 3):
print('This script requires Python 3.3 or higher.')
sys.exit(1)
try:
from scipy.special import lambertw
from scipy.stats import norm as norm_dist
from scipy.stats import triang
from scipy.stats import uniform
except ImportError:
print('SciPy must be installed')
sys.exit(1)
try:
import cantera as ct
except ImportError:
print('Cantera must be installed')
sys.exit(1)
try:
import numpy as np
except ImportError:
print('NumPy must be installed')
sys.exit(1)
def run_case(dummy, fuel, P_0, T_0, P_C, mfuel, T_a, mix):
# Set the Cantera Solution with the thermo data from the xml file.
# Get the molecular weight of the fuel and set the unit basis for
# the Solution to molar basis.
gas = ct.Solution('therm-data.xml')
fuel_mw = gas.molecular_weights[gas.species_index(fuel)]
gas.basis = 'molar'
# Set the ambient temperature and distribution. Convert the ambient
# temperature to °C to match the spec but use absolute temperature
# for the distribution.
sigma_Ta = max(2.2, (T_a - 273.15)*0.0075)/2
Ta_dist = norm_dist(loc=T_a, scale=sigma_Ta)
# Ta_dist = uniform(loc=T_a-sigma_Ta, scale=sigma_Ta*2)
# Ta_dist = triang(loc=T_a-sigma_Ta, scale=sigma_Ta*2, c=0.5)
# Set the tank volume and distribution.
nom_tank_volume = 0.01660
sigma_volume = 0.00001
vol_dist = norm_dist(loc=nom_tank_volume, scale=sigma_volume)
# Create the normal distributions for the initial temperature,
# initial pressure, and compressed pressure. Convert the initial
# temperature to °C to match the spec. Use the appropriate
# distribution for the desired analysis (normal, uniform,
# triangular).
sigma_T0 = max(2.2, (T_0 - 273)*0.0075)/2
T0_dist = norm_dist(loc=T_0, scale=sigma_T0)
# T0_dist = uniform(loc=T_0-sigma_T0, scale=sigma_T0*2)
# T0_dist = triang(loc=T_0-sigma_T0, scale=sigma_T0*2, c=0.5)
sigma_P0 = 346.6/2
P0_dist = norm_dist(loc=P_0, scale=sigma_P0)
sigma_PC = 5000/2
PC_dist = norm_dist(loc=P_C, scale=sigma_PC)
# Set the nominal injected mass of the fuel. Compute the nominal
# moles of fuel and corresponding nominal required number of moles
# of the gases.
nom_mass_fuel = mfuel
nom_mole_fuel = nom_mass_fuel/fuel_mw
nom_mole_o2 = nom_mole_fuel*mix[0]
nom_mole_n2 = nom_mole_fuel*mix[1]
nom_mole_ar = nom_mole_fuel*mix[2]
# Create the normal distribution for the fuel mass.
sigma_mass = 0.03/2
fuel_mass_dist = norm_dist(loc=nom_mass_fuel, scale=sigma_mass)
# Calculate the nominal pressure required for each gas to match the
# desired molar proportions. Note that the gas constant from
# Cantera is given in units of J/kmol-K, hence the factor of 1000.
nom_o2_pres = nom_mole_o2*ct.gas_constant*T_a/(1000*nom_tank_volume)
nom_n2_pres = nom_mole_n2*ct.gas_constant*T_a/(1000*nom_tank_volume)
nom_ar_pres = nom_mole_ar*ct.gas_constant*T_a/(1000*nom_tank_volume)
# Compute the pressures of each component as they are filled into
# the mixing tank. The mean of the distribution of the pressure of
# each subsequent gas is the sum of the sampled value of the
# pressure of the previous gas plus the nominal value of the
# current gas. Note that these are thus not partial pressures, but
# the total pressure in the tank after filling each component.
sigma_pressure = 346.6/2
o2_dist = norm_dist(loc=nom_o2_pres, scale=sigma_pressure)
o2_pres_rand = o2_dist.ppf(np.random.random_sample())
n2_pressure = o2_pres_rand + nom_n2_pres
n2_dist = norm_dist(loc=n2_pressure, scale=sigma_pressure)
n2_pres_rand = n2_dist.ppf(np.random.random_sample())
ar_pressure = n2_pres_rand + nom_ar_pres
ar_dist = norm_dist(loc=ar_pressure, scale=sigma_pressure)
ar_pres_rand = ar_dist.ppf(np.random.random_sample())
# Sample random values of the ambient temperature, tank volume, and
# fuel mass from their distributions.
Ta_rand = Ta_dist.ppf(np.random.random_sample())
tank_volume_rand = vol_dist.ppf(np.random.random_sample())
mole_fuel_rand = fuel_mass_dist.ppf(np.random.random_sample())/fuel_mw
# Compute the number of moles of each gaseous component based on
# the sampling from the various distributions. Note that the gas
# constant from Cantera is given in units of J/kmol-K, hence the
# factor of 1000.
mole_o2_rand = o2_pres_rand*tank_volume_rand*1000/(ct.gas_constant*Ta_rand)
mole_n2_rand = (n2_pres_rand - o2_pres_rand)*tank_volume_rand*1000/(ct.gas_constant*Ta_rand)
mole_ar_rand = (ar_pres_rand - n2_pres_rand)*tank_volume_rand*1000/(ct.gas_constant*Ta_rand)
# Compute the mole fractions of each component and set the state of
# the Cantera solution.
total_moles = sum([mole_fuel_rand, mole_o2_rand, mole_n2_rand, mole_ar_rand])
mole_fractions = '{fuel_name}:{fuel_mole},o2:{o2},n2:{n2},ar:{ar}'.format(
fuel_name=fuel, fuel_mole=mole_fuel_rand/total_moles, o2=mole_o2_rand/total_moles,
n2=mole_n2_rand/total_moles, ar=mole_ar_rand/total_moles)
gas.TPX = None, None, mole_fractions
# Initialize the array of temperatures over which the C_p should be fit.
# The range is [first input, second input) with increment set by the third
# input. Loop through the temperatures and compute the non-dimensional
# specific heats.
temperatures = np.arange(300, 1105, 5)
gas_cp = np.zeros(len(temperatures))
for j, temp in enumerate(temperatures):
gas.TP = temp, None
gas_cp[j] = gas.cp/ct.gas_constant
# Compute the linear fit to the specific heat.
(gas_b, gas_a) = np.polyfit(temperatures, gas_cp, 1)
# Sample the values for the initial temperature, initial pressure, and
# compressed pressure.
T0_rand = T0_dist.ppf(np.random.random_sample())
P0_rand = P0_dist.ppf(np.random.random_sample())
PC_rand = PC_dist.ppf(np.random.random_sample())
# Compute the compressed temperature and return it.
lam_rand = gas_b/gas_a * np.exp(gas_b*T0_rand/gas_a) * T0_rand * (PC_rand/P0_rand)**(1/gas_a)
TC_rand = np.real(gas_a * lambertw(lam_rand)/gas_b)
return TC_rand
if __name__ == "__main__":
# n_runs is the number iterations to run per case
n_runs = 1000000
# Set the parameters to be studied so that we can use a loop
P0s = [1.8794E5, 4.3787E5, 3.9691E5, 4.3635E5, 1.9118E5, 4.3987E5,]
T0s = [308]*6
PCs = [50.0135E5, 49.8629E5, 50.0485E5, 49.6995E5, 49.8254E5, 50.0202E5,]
mfuels = [3.43, 3.48, 3.49, 3.53, 3.53, 3.69,]
Tas = [21.7, 21.7, 22.0, 22.1, 21.7, 20.0,]
cases = ['a', 'b', 'c', 'd', 'e', 'f',]
# Set the string of the fuel.
pass_fuel = 'mch'
# Set the mixtures to study
mix1 = [10.5, 12.25, 71.75,]
mix2 = [21.0, 00.00, 73.50,]
mix3 = [07.0, 16.35, 71.15,]
for i, case in enumerate(cases):
# Each case is associated with a particular mixture in the
# paper. Set which mixture to use here.
if case == 'a' or case == 'b':
pass_mix = mix1
elif case == 'c' or case == 'd':
pass_mix = mix2
else:
pass_mix = mix3
# Set all the other initial variables and create a zip to send
# to the run_case function.
pass_P_0 = P0s[i]
pass_T_0 = T0s[i]
pass_P_C = PCs[i]
pass_mfuel = mfuels[i]
pass_T_a = Tas[i] + 273.15
send = zip(range(n_runs), rp(pass_fuel), rp(pass_P_0), rp(pass_T_0),
rp(pass_P_C), rp(pass_mfuel), rp(pass_T_a), rp(pass_mix)
)
# Set up a pool of processors to run in parallel.
with Pool(processes=10) as pool:
# Run the analysis and get the result into a NumPy array.
result = np.array(pool.starmap(run_case, send))
# Print the mean and twice the standard deviation to a file.
with open('results-liquid.txt', 'a') as output:
print(case+'tri', result.mean(), result.std()*2, file=output)
# Create and save the histogram data file. The format is:
# Mean temperature, standard deviation
# Bin edges, height
# Note the bin edges are one element longer than the histogram
# so we append a zero at the end of the histogram.
hist, bin_edges = np.histogram(result, bins=100, density=True)
np.savetxt('histogram/histogram-liquid-uni'+case+'.dat',
np.vstack((np.insert(bin_edges, 0, result.mean()), np.insert(np.append(hist, 0), 0, result.std()))).T
)
| [
"itertools.repeat",
"scipy.stats.norm",
"numpy.random.random_sample",
"numpy.polyfit",
"numpy.append",
"numpy.histogram",
"numpy.arange",
"numpy.exp",
"cantera.Solution",
"multiprocessing.Pool",
"sys.exit",
"scipy.special.lambertw"
] | [((849, 860), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (857, 860), False, 'import sys\n'), ((1550, 1579), 'cantera.Solution', 'ct.Solution', (['"""therm-data.xml"""'], {}), "('therm-data.xml')\n", (1561, 1579), True, 'import cantera as ct\n'), ((1900, 1934), 'scipy.stats.norm', 'norm_dist', ([], {'loc': 'T_a', 'scale': 'sigma_Ta'}), '(loc=T_a, scale=sigma_Ta)\n', (1909, 1934), True, 'from scipy.stats import norm as norm_dist\n'), ((2178, 2228), 'scipy.stats.norm', 'norm_dist', ([], {'loc': 'nom_tank_volume', 'scale': 'sigma_volume'}), '(loc=nom_tank_volume, scale=sigma_volume)\n', (2187, 2228), True, 'from scipy.stats import norm as norm_dist\n'), ((2570, 2604), 'scipy.stats.norm', 'norm_dist', ([], {'loc': 'T_0', 'scale': 'sigma_T0'}), '(loc=T_0, scale=sigma_T0)\n', (2579, 2604), True, 'from scipy.stats import norm as norm_dist\n'), ((2769, 2803), 'scipy.stats.norm', 'norm_dist', ([], {'loc': 'P_0', 'scale': 'sigma_P0'}), '(loc=P_0, scale=sigma_P0)\n', (2778, 2803), True, 'from scipy.stats import norm as norm_dist\n'), ((2841, 2875), 'scipy.stats.norm', 'norm_dist', ([], {'loc': 'P_C', 'scale': 'sigma_PC'}), '(loc=P_C, scale=sigma_PC)\n', (2850, 2875), True, 'from scipy.stats import norm as norm_dist\n'), ((3324, 3370), 'scipy.stats.norm', 'norm_dist', ([], {'loc': 'nom_mass_fuel', 'scale': 'sigma_mass'}), '(loc=nom_mass_fuel, scale=sigma_mass)\n', (3333, 3370), True, 'from scipy.stats import norm as norm_dist\n'), ((4252, 4300), 'scipy.stats.norm', 'norm_dist', ([], {'loc': 'nom_o2_pres', 'scale': 'sigma_pressure'}), '(loc=nom_o2_pres, scale=sigma_pressure)\n', (4261, 4300), True, 'from scipy.stats import norm as norm_dist\n'), ((4418, 4466), 'scipy.stats.norm', 'norm_dist', ([], {'loc': 'n2_pressure', 'scale': 'sigma_pressure'}), '(loc=n2_pressure, scale=sigma_pressure)\n', (4427, 4466), True, 'from scipy.stats import norm as norm_dist\n'), ((4584, 4632), 'scipy.stats.norm', 'norm_dist', ([], {'loc': 'ar_pressure', 'scale': 'sigma_pressure'}), '(loc=ar_pressure, scale=sigma_pressure)\n', (4593, 4632), True, 'from scipy.stats import norm as norm_dist\n'), ((6234, 6257), 'numpy.arange', 'np.arange', (['(300)', '(1105)', '(5)'], {}), '(300, 1105, 5)\n', (6243, 6257), True, 'import numpy as np\n'), ((6487, 6522), 'numpy.polyfit', 'np.polyfit', (['temperatures', 'gas_cp', '(1)'], {}), '(temperatures, gas_cp, 1)\n', (6497, 6522), True, 'import numpy as np\n'), ((1084, 1095), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1092, 1095), False, 'import sys\n'), ((1190, 1201), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1198, 1201), False, 'import sys\n'), ((1292, 1303), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1300, 1303), False, 'import sys\n'), ((4332, 4357), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (4355, 4357), True, 'import numpy as np\n'), ((4498, 4523), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (4521, 4523), True, 'import numpy as np\n'), ((4664, 4689), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (4687, 4689), True, 'import numpy as np\n'), ((4832, 4857), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (4855, 4857), True, 'import numpy as np\n'), ((4895, 4920), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (4918, 4920), True, 'import numpy as np\n'), ((6652, 6677), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (6675, 6677), True, 'import numpy as np\n'), ((6705, 6730), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (6728, 6730), True, 'import numpy as np\n'), ((6758, 6783), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (6781, 6783), True, 'import numpy as np\n'), ((9165, 9209), 'numpy.histogram', 'np.histogram', (['result'], {'bins': '(100)', 'density': '(True)'}), '(result, bins=100, density=True)\n', (9177, 9209), True, 'import numpy as np\n'), ((4962, 4987), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (4985, 4987), True, 'import numpy as np\n'), ((8301, 8314), 'itertools.repeat', 'rp', (['pass_fuel'], {}), '(pass_fuel)\n', (8303, 8314), True, 'from itertools import repeat as rp\n'), ((8316, 8328), 'itertools.repeat', 'rp', (['pass_P_0'], {}), '(pass_P_0)\n', (8318, 8328), True, 'from itertools import repeat as rp\n'), ((8330, 8342), 'itertools.repeat', 'rp', (['pass_T_0'], {}), '(pass_T_0)\n', (8332, 8342), True, 'from itertools import repeat as rp\n'), ((8363, 8375), 'itertools.repeat', 'rp', (['pass_P_C'], {}), '(pass_P_C)\n', (8365, 8375), True, 'from itertools import repeat as rp\n'), ((8377, 8391), 'itertools.repeat', 'rp', (['pass_mfuel'], {}), '(pass_mfuel)\n', (8379, 8391), True, 'from itertools import repeat as rp\n'), ((8393, 8405), 'itertools.repeat', 'rp', (['pass_T_a'], {}), '(pass_T_a)\n', (8395, 8405), True, 'from itertools import repeat as rp\n'), ((8407, 8419), 'itertools.repeat', 'rp', (['pass_mix'], {}), '(pass_mix)\n', (8409, 8419), True, 'from itertools import repeat as rp\n'), ((8512, 8530), 'multiprocessing.Pool', 'Pool', ([], {'processes': '(10)'}), '(processes=10)\n', (8516, 8530), False, 'from multiprocessing import Pool\n'), ((6871, 6902), 'numpy.exp', 'np.exp', (['(gas_b * T0_rand / gas_a)'], {}), '(gas_b * T0_rand / gas_a)\n', (6877, 6902), True, 'import numpy as np\n'), ((6970, 6988), 'scipy.special.lambertw', 'lambertw', (['lam_rand'], {}), '(lam_rand)\n', (6978, 6988), False, 'from scipy.special import lambertw\n'), ((9355, 9373), 'numpy.append', 'np.append', (['hist', '(0)'], {}), '(hist, 0)\n', (9364, 9373), True, 'import numpy as np\n')] |
import sys
import os
import time
import numpy as np
import torch
from tqdm import tqdm
from torch.utils import data
from pytorch_pretrained_bert.modeling import BertForTokenClassification
from pytorch_pretrained_bert.optimization import BertAdam
from pytorch_pretrained_bert.tokenization import BertTokenizer
from NER_src.NER_dataset import CoNLLDataProcessor, NerDataset
from NER_src.NER_utils import evaluate, warmup_linear, write_test
from NER_src.Config import cuda_yes, device, max_seq_length
import warnings
warnings.filterwarnings("ignore")
print('Python version ', sys.version)
print('PyTorch version ', torch.__version__)
print('Current dir:', os.getcwd())
print('Cuda is available?', cuda_yes)
print('Device:', device)
data_dir = './NER_data/CoNLL2003/'
do_train = True
do_eval = True
do_predict = True
do_trick = True
load_checkpoint = True
batch_size = 32
learning_rate0 = 5e-5
lr0_crf_fc = 8e-5
weight_decay_finetune = 1e-5
weight_decay_crf_fc = 5e-6
total_train_epochs = 120
gradient_accumulation_steps = 1
warmup_proportion = 0.1
output_dir = './output/'
bert_model_scale = 'bert-base-cased'
do_lower_case = False
patience = 10
np.random.seed(44)
torch.manual_seed(44)
if cuda_yes:
torch.cuda.manual_seed_all(44)
conllProcessor = CoNLLDataProcessor()
label_list = conllProcessor.get_labels()
label_map = conllProcessor.get_label_map()
train_examples = conllProcessor.get_train_examples(data_dir)
dev_examples = conllProcessor.get_dev_examples(data_dir)
test_examples = conllProcessor.get_test_examples(data_dir)
total_train_steps = int(len(train_examples) / batch_size / gradient_accumulation_steps * total_train_epochs)
print("***** Running training *****")
print(" Num examples = %d" % len(train_examples))
print(" Batch size = %d" % batch_size)
print(" Num steps = %d" % total_train_steps)
tokenizer = BertTokenizer.from_pretrained(bert_model_scale, do_lower_case=do_lower_case)
train_dataset = NerDataset(train_examples, tokenizer, label_map, max_seq_length)
dev_dataset = NerDataset(dev_examples, tokenizer, label_map, max_seq_length)
test_dataset = NerDataset(test_examples, tokenizer, label_map, max_seq_length)
num_worker = 4 if sys.platform == 'linux' or sys.platform == 'linux2' else 0
train_dataloader = data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_worker,
collate_fn=NerDataset.pad)
dev_dataloader = data.DataLoader(dataset=dev_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_worker,
collate_fn=NerDataset.pad)
test_dataloader = data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_worker,
collate_fn=NerDataset.pad)
if do_trick:
temp = test_dataloader
test_dataloader = dev_dataloader
dev_dataloader = temp
print('*** Use only BertForTokenClassification ***')
epoch_no_improve = 0
if load_checkpoint and os.path.exists(output_dir + '/ner_bert_checkpoint.pt'):
checkpoint = torch.load(output_dir + '/ner_bert_checkpoint.pt', map_location='cpu')
start_epoch = checkpoint['epoch'] + 1
valid_acc_prev = checkpoint['valid_acc']
valid_f1_prev = checkpoint['valid_f1']
model = BertForTokenClassification.from_pretrained(
bert_model_scale, state_dict=checkpoint['model_state'], num_labels=len(label_list))
print('Loaded the pretrain NER_BERT model, epoch:', checkpoint['epoch'], 'valid acc:',
checkpoint['valid_acc'], 'valid f1:', checkpoint['valid_f1'])
else:
start_epoch = 0
valid_acc_prev = 0
valid_f1_prev = 0
model = BertForTokenClassification.from_pretrained(
bert_model_scale, num_labels=len(label_list))
model.to(device)
named_params = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in named_params if not any(nd in n for nd in no_decay)],
'weight_decay': weight_decay_finetune},
{'params': [p for n, p in named_params if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = BertAdam(optimizer_grouped_parameters, lr=learning_rate0, warmup=warmup_proportion,
t_total=total_train_steps)
global_step_th = int(len(train_examples) / batch_size / gradient_accumulation_steps * start_epoch)
for epoch in range(start_epoch, total_train_epochs):
tr_loss = 0
train_start = time.time()
model.train()
optimizer.zero_grad()
for step, batch in tqdm(enumerate(train_dataloader)):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, predict_mask, label_ids = batch
loss = model(input_ids, segment_ids, input_mask, label_ids)
if gradient_accumulation_steps > 1:
loss = loss / gradient_accumulation_steps
loss.backward()
tr_loss += loss.item()
if (step + 1) % gradient_accumulation_steps == 0:
lr_this_step = learning_rate0 * warmup_linear(global_step_th / total_train_steps, warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step_th += 1
print('--------------------------------------------------------------')
print("Epoch:{} completed, Total training's Loss: {}, Spend: {}m".format(epoch, tr_loss,
(time.time() - train_start) / 60.0))
valid_acc, valid_f1 = evaluate(model, dev_dataloader, batch_size, epoch, 'Valid_set')
if valid_f1 > valid_f1_prev:
torch.save({'epoch': epoch, 'model_state': model.state_dict(), 'valid_acc': valid_acc,
'valid_f1': valid_f1, 'max_seq_length': max_seq_length, 'lower_case': do_lower_case},
os.path.join(output_dir, 'ner_bert_checkpoint.pt'), _use_new_zipfile_serialization=False)
valid_f1_prev = valid_f1
epoch_no_improve = 0
else:
epoch_no_improve += 1
print('Epoch No Improve: {}'.format(epoch_no_improve))
if epoch_no_improve >= patience:
print('Early Stop')
break
evaluate(model, test_dataloader, batch_size, total_train_epochs - 1, 'Test_set')
checkpoint = torch.load(output_dir + '/ner_bert_checkpoint.pt', map_location='cpu')
epoch = checkpoint['epoch']
valid_acc_prev = checkpoint['valid_acc']
valid_f1_prev = checkpoint['valid_f1']
model = BertForTokenClassification.from_pretrained(
bert_model_scale, state_dict=checkpoint['model_state'], num_labels=len(label_list)
)
model.to(device)
print('Loaded the pretrain NER_BERT model, epoch:', checkpoint['epoch'], 'valid acc:',
checkpoint['valid_acc'], 'valid f1:', checkpoint['valid_f1'])
evaluate(model, test_dataloader, batch_size, epoch, 'Test_set')
model.eval()
with torch.no_grad():
demon_dataloader = data.DataLoader(dataset=test_dataset,
batch_size=10,
shuffle=False,
num_workers=num_worker,
collate_fn=NerDataset.pad)
pred_list = []
label_list[:3] = ['O'] * 3
for batch in demon_dataloader:
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, predict_mask, label_ids = batch
out_scores = model(input_ids, segment_ids, input_mask)
_, predicted = torch.max(out_scores, -1)
valid_predicted = torch.masked_select(predicted, predict_mask)
for i in range(predicted.shape[0]):
new_ids = predicted[i].cpu().numpy()[predict_mask[i].cpu().numpy() == 1]
pred_list.extend(list(map(lambda ix: label_list[ix], new_ids)))
write_test(data_dir + 'test.txt', pred_list, 'test-bert.txt')
print(conllProcessor.get_label_map())
| [
"torch.masked_select",
"numpy.random.seed",
"pytorch_pretrained_bert.optimization.BertAdam",
"pytorch_pretrained_bert.tokenization.BertTokenizer.from_pretrained",
"NER_src.NER_utils.evaluate",
"NER_src.NER_utils.write_test",
"NER_src.NER_dataset.CoNLLDataProcessor",
"NER_src.NER_dataset.NerDataset",
... | [((517, 550), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (540, 550), False, 'import warnings\n'), ((1155, 1173), 'numpy.random.seed', 'np.random.seed', (['(44)'], {}), '(44)\n', (1169, 1173), True, 'import numpy as np\n'), ((1174, 1195), 'torch.manual_seed', 'torch.manual_seed', (['(44)'], {}), '(44)\n', (1191, 1195), False, 'import torch\n'), ((1262, 1282), 'NER_src.NER_dataset.CoNLLDataProcessor', 'CoNLLDataProcessor', ([], {}), '()\n', (1280, 1282), False, 'from NER_src.NER_dataset import CoNLLDataProcessor, NerDataset\n'), ((1843, 1919), 'pytorch_pretrained_bert.tokenization.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['bert_model_scale'], {'do_lower_case': 'do_lower_case'}), '(bert_model_scale, do_lower_case=do_lower_case)\n', (1872, 1919), False, 'from pytorch_pretrained_bert.tokenization import BertTokenizer\n'), ((1937, 2001), 'NER_src.NER_dataset.NerDataset', 'NerDataset', (['train_examples', 'tokenizer', 'label_map', 'max_seq_length'], {}), '(train_examples, tokenizer, label_map, max_seq_length)\n', (1947, 2001), False, 'from NER_src.NER_dataset import CoNLLDataProcessor, NerDataset\n'), ((2016, 2078), 'NER_src.NER_dataset.NerDataset', 'NerDataset', (['dev_examples', 'tokenizer', 'label_map', 'max_seq_length'], {}), '(dev_examples, tokenizer, label_map, max_seq_length)\n', (2026, 2078), False, 'from NER_src.NER_dataset import CoNLLDataProcessor, NerDataset\n'), ((2094, 2157), 'NER_src.NER_dataset.NerDataset', 'NerDataset', (['test_examples', 'tokenizer', 'label_map', 'max_seq_length'], {}), '(test_examples, tokenizer, label_map, max_seq_length)\n', (2104, 2157), False, 'from NER_src.NER_dataset import CoNLLDataProcessor, NerDataset\n'), ((2255, 2385), 'torch.utils.data.DataLoader', 'data.DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': 'num_worker', 'collate_fn': 'NerDataset.pad'}), '(dataset=train_dataset, batch_size=batch_size, shuffle=True,\n num_workers=num_worker, collate_fn=NerDataset.pad)\n', (2270, 2385), False, 'from torch.utils import data\n'), ((2540, 2669), 'torch.utils.data.DataLoader', 'data.DataLoader', ([], {'dataset': 'dev_dataset', 'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_worker', 'collate_fn': 'NerDataset.pad'}), '(dataset=dev_dataset, batch_size=batch_size, shuffle=False,\n num_workers=num_worker, collate_fn=NerDataset.pad)\n', (2555, 2669), False, 'from torch.utils import data\n'), ((2817, 2947), 'torch.utils.data.DataLoader', 'data.DataLoader', ([], {'dataset': 'test_dataset', 'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_worker', 'collate_fn': 'NerDataset.pad'}), '(dataset=test_dataset, batch_size=batch_size, shuffle=False,\n num_workers=num_worker, collate_fn=NerDataset.pad)\n', (2832, 2947), False, 'from torch.utils import data\n'), ((4449, 4564), 'pytorch_pretrained_bert.optimization.BertAdam', 'BertAdam', (['optimizer_grouped_parameters'], {'lr': 'learning_rate0', 'warmup': 'warmup_proportion', 't_total': 'total_train_steps'}), '(optimizer_grouped_parameters, lr=learning_rate0, warmup=\n warmup_proportion, t_total=total_train_steps)\n', (4457, 4564), False, 'from pytorch_pretrained_bert.optimization import BertAdam\n'), ((6567, 6652), 'NER_src.NER_utils.evaluate', 'evaluate', (['model', 'test_dataloader', 'batch_size', '(total_train_epochs - 1)', '"""Test_set"""'], {}), "(model, test_dataloader, batch_size, total_train_epochs - 1, 'Test_set'\n )\n", (6575, 6652), False, 'from NER_src.NER_utils import evaluate, warmup_linear, write_test\n'), ((6662, 6732), 'torch.load', 'torch.load', (["(output_dir + '/ner_bert_checkpoint.pt')"], {'map_location': '"""cpu"""'}), "(output_dir + '/ner_bert_checkpoint.pt', map_location='cpu')\n", (6672, 6732), False, 'import torch\n'), ((7155, 7218), 'NER_src.NER_utils.evaluate', 'evaluate', (['model', 'test_dataloader', 'batch_size', 'epoch', '"""Test_set"""'], {}), "(model, test_dataloader, batch_size, epoch, 'Test_set')\n", (7163, 7218), False, 'from NER_src.NER_utils import evaluate, warmup_linear, write_test\n'), ((658, 669), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (667, 669), False, 'import os\n'), ((1213, 1243), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['(44)'], {}), '(44)\n', (1239, 1243), False, 'import torch\n'), ((3283, 3337), 'os.path.exists', 'os.path.exists', (["(output_dir + '/ner_bert_checkpoint.pt')"], {}), "(output_dir + '/ner_bert_checkpoint.pt')\n", (3297, 3337), False, 'import os\n'), ((3356, 3426), 'torch.load', 'torch.load', (["(output_dir + '/ner_bert_checkpoint.pt')"], {'map_location': '"""cpu"""'}), "(output_dir + '/ner_bert_checkpoint.pt', map_location='cpu')\n", (3366, 3426), False, 'import torch\n'), ((4768, 4779), 'time.time', 'time.time', ([], {}), '()\n', (4777, 4779), False, 'import time\n'), ((5914, 5977), 'NER_src.NER_utils.evaluate', 'evaluate', (['model', 'dev_dataloader', 'batch_size', 'epoch', '"""Valid_set"""'], {}), "(model, dev_dataloader, batch_size, epoch, 'Valid_set')\n", (5922, 5977), False, 'from NER_src.NER_utils import evaluate, warmup_linear, write_test\n'), ((7238, 7253), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7251, 7253), False, 'import torch\n'), ((7278, 7400), 'torch.utils.data.DataLoader', 'data.DataLoader', ([], {'dataset': 'test_dataset', 'batch_size': '(10)', 'shuffle': '(False)', 'num_workers': 'num_worker', 'collate_fn': 'NerDataset.pad'}), '(dataset=test_dataset, batch_size=10, shuffle=False,\n num_workers=num_worker, collate_fn=NerDataset.pad)\n', (7293, 7400), False, 'from torch.utils import data\n'), ((8157, 8218), 'NER_src.NER_utils.write_test', 'write_test', (["(data_dir + 'test.txt')", 'pred_list', '"""test-bert.txt"""'], {}), "(data_dir + 'test.txt', pred_list, 'test-bert.txt')\n", (8167, 8218), False, 'from NER_src.NER_utils import evaluate, warmup_linear, write_test\n'), ((7851, 7876), 'torch.max', 'torch.max', (['out_scores', '(-1)'], {}), '(out_scores, -1)\n', (7860, 7876), False, 'import torch\n'), ((7903, 7947), 'torch.masked_select', 'torch.masked_select', (['predicted', 'predict_mask'], {}), '(predicted, predict_mask)\n', (7922, 7947), False, 'import torch\n'), ((6231, 6281), 'os.path.join', 'os.path.join', (['output_dir', '"""ner_bert_checkpoint.pt"""'], {}), "(output_dir, 'ner_bert_checkpoint.pt')\n", (6243, 6281), False, 'import os\n'), ((5336, 5404), 'NER_src.NER_utils.warmup_linear', 'warmup_linear', (['(global_step_th / total_train_steps)', 'warmup_proportion'], {}), '(global_step_th / total_train_steps, warmup_proportion)\n', (5349, 5404), False, 'from NER_src.NER_utils import evaluate, warmup_linear, write_test\n'), ((5852, 5863), 'time.time', 'time.time', ([], {}), '()\n', (5861, 5863), False, 'import time\n')] |
import copy
import functools
import os
import blobfile as bf
import torch as th
import torch.distributed as dist
from guided_diffusion.two_parts_model import TwoPartsUNetModel
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.optim import AdamW
from torchvision.utils import make_grid
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import numpy.matlib
from matplotlib.colors import LogNorm
import matplotlib as mpl
from . import logger
if os.uname().nodename == "titan4":
from guided_diffusion import dist_util_titan as dist_util
else:
from guided_diffusion import dist_util
from .fp16_util import MixedPrecisionTrainer
from .nn import update_ema
from .resample import LossAwareSampler, UniformSampler, TaskAwareSampler
from .gaussian_diffusion import _extract_into_tensor
from .nn import mean_flat
from .losses import normal_kl
from .resample import LossAwareSampler, UniformSampler, TaskAwareSampler, DAEOnlySampler
import wandb
# For ImageNet experiments, this was a good default value.
# We found that the lg_loss_scale quickly climbed to
# 20-21 within the first ~1K steps of training.
# INITIAL_LOG_LOSS_SCALE = 20.0
from .unet import UNetModel
class TrainLoop:
def __init__(
self,
*,
params,
model,
prev_model,
diffusion,
data,
batch_size,
microbatch,
lr,
ema_rate,
log_interval,
skip_save,
save_interval,
plot_interval,
resume_checkpoint,
task_id,
use_fp16=False,
fp16_scale_growth=1e-3,
schedule_sampler=None,
weight_decay=0.0,
lr_anneal_steps=0,
scheduler_rate=1,
scheduler_step=1000,
num_steps=10000,
image_size=32,
in_channels=3,
class_cond=False,
max_class=None,
generate_previous_examples_at_start_of_new_task=False,
generate_previous_samples_continuously=False,
validator=None,
validation_interval=None
):
self.params = params
self.task_id = task_id
self.model = model
self.prev_ddp_model = prev_model
self.diffusion = diffusion
self.data = data
self.image_size = image_size
self.in_channels = in_channels
self.batch_size = batch_size
self.microbatch = microbatch if microbatch > 0 else batch_size
self.lr = lr
self.class_cond = class_cond
self.max_class = max_class
self.ema_rate = (
[ema_rate]
if isinstance(ema_rate, float)
else [float(x) for x in ema_rate.split(",")]
)
self.log_interval = log_interval
self.save_interval = save_interval
self.skip_save = skip_save
self.plot_interval = plot_interval
self.resume_checkpoint = resume_checkpoint
self.use_fp16 = use_fp16
self.fp16_scale_growth = fp16_scale_growth
self.schedule_sampler = schedule_sampler or UniformSampler(diffusion)
self.weight_decay = weight_decay
self.lr_anneal_steps = lr_anneal_steps
self.num_steps = num_steps
self.step = 0
self.resume_step = 0
self.global_batch = self.batch_size * dist.get_world_size()
self.sync_cuda = th.cuda.is_available()
self._load_and_sync_parameters()
self.mp_trainer = MixedPrecisionTrainer(
model=self.model,
use_fp16=self.use_fp16,
fp16_scale_growth=fp16_scale_growth
)
self.opt = AdamW(
self.mp_trainer.master_params, lr=self.lr, weight_decay=self.weight_decay
)
self.scheduler = th.optim.lr_scheduler.ExponentialLR(self.opt, gamma=scheduler_rate)
self.scheduler_step = scheduler_step
if self.resume_step:
self._load_optimizer_state()
# Model was resumed, either due to a restart or a checkpoint
# being specified at the command line.
self.ema_params = [
self._load_ema_parameters(rate) for rate in self.ema_rate
]
else:
self.ema_params = [
copy.deepcopy(self.mp_trainer.master_params)
for _ in range(len(self.ema_rate))
]
if th.cuda.is_available():
self.use_ddp = True
find_unused_params = (not isinstance(self.model, UNetModel)) and (not isinstance(self.schedule_sampler, DAEOnlySampler))
self.ddp_model = DDP(
self.model,
device_ids=[dist_util.dev()],
output_device=dist_util.dev(),
broadcast_buffers=False,
bucket_cap_mb=128,
find_unused_parameters=find_unused_params,
)
else:
if dist.get_world_size() > 1:
logger.warn(
"Distributed training requires CUDA. "
"Gradients will not be synchronized properly!"
)
self.use_ddp = False
self.ddp_model = self.model
self.generate_previous_examples_at_start_of_new_task = generate_previous_examples_at_start_of_new_task
self.generate_previous_samples_continuously = generate_previous_samples_continuously
self.validator = validator
if validator is None:
self.validation_interval = self.num_steps + 1 # Skipping validation
else:
self.validation_interval = validation_interval
def _load_and_sync_parameters(self):
resume_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
if resume_checkpoint:
self.resume_step = parse_resume_step_from_filename(resume_checkpoint)
if dist.get_rank() == 0:
logger.log(f"loading model from checkpoint: {resume_checkpoint}...")
self.model.load_state_dict(
dist_util.load_state_dict(
resume_checkpoint, map_location=dist_util.dev()
)
)
dist_util.sync_params(self.model.parameters())
def _load_ema_parameters(self, rate):
ema_params = copy.deepcopy(self.mp_trainer.master_params)
main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
ema_checkpoint = find_ema_checkpoint(main_checkpoint, self.resume_step, rate)
if ema_checkpoint:
if dist.get_rank() == 0:
logger.log(f"loading EMA from checkpoint: {ema_checkpoint}...")
state_dict = dist_util.load_state_dict(
ema_checkpoint, map_location=dist_util.dev()
)
ema_params = self.mp_trainer.state_dict_to_master_params(state_dict)
dist_util.sync_params(ema_params)
return ema_params
def _load_optimizer_state(self):
main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
opt_checkpoint = bf.join(
bf.dirname(main_checkpoint), f"opt{self.resume_step:06}.pt"
)
if bf.exists(opt_checkpoint):
logger.log(f"loading optimizer state from checkpoint: {opt_checkpoint}")
state_dict = dist_util.load_state_dict(
opt_checkpoint, map_location=dist_util.dev()
)
self.opt.load_state_dict(state_dict)
def run_loop(self):
if isinstance(self.schedule_sampler, DAEOnlySampler):
plot = self.plot_dae_only
else:
plot = self.plot
while (
(not self.lr_anneal_steps
or self.step + self.resume_step < self.lr_anneal_steps) and (self.step < self.num_steps)
):
if self.step > 100:
self.mp_trainer.skip_gradient_thr = self.params.skip_gradient_thr
batch, cond = next(self.data)
self.run_step(batch, cond)
if self.step % self.log_interval == 0:
if logger.get_rank_without_mpi_import() == 0:
wandb.log(logger.getkvs(), step=self.step)
logger.dumpkvs()
if (not self.skip_save) & (self.step % self.save_interval == 0) & (self.step != 0):
self.save(self.task_id)
# Run for a finite amount of time in integration tests.
if os.environ.get("DIFFUSION_TRAINING_TEST", "") and self.step > 0:
return
# make SNR plots
if self.params.snr_log_interval > 0 and self.step % self.params.snr_log_interval == 0:
self.snr_plots(batch, cond, self.task_id, self.step)
if self.step > 0:
if self.step % self.plot_interval == 0:
plot(self.task_id, self.step)
if self.step % self.scheduler_step == 0:
self.scheduler.step()
if self.step % self.validation_interval == 0:
logger.log(f"Validation for step {self.step}")
if self.diffusion.dae_model:
dae_result = self.validate_dae()
logger.log(f"DAE test MAE: {dae_result:.3}")
if logger.get_rank_without_mpi_import() == 0:
wandb.log({"dae_test_MAE": dae_result})
if not isinstance(self.schedule_sampler, DAEOnlySampler):
fid_result, precision, recall = self.validator.calculate_results(train_loop=self,
task_id=self.task_id,
dataset=self.params.dataset,
n_generated_examples=self.params.n_examples_validation,
batch_size=self.params.microbatch if self.params.microbatch > 0 else self.params.batch_size)
if logger.get_rank_without_mpi_import() == 0:
wandb.log({"fid": fid_result})
wandb.log({"precision": precision})
wandb.log({"recall": recall})
logger.log(f"FID: {fid_result}, Prec: {precision}, Rec: {recall}")
self.step += 1
# Save the last checkpoint if it wasn't already saved.
if not self.skip_save:
if (self.step - 1) % self.save_interval != 0:
self.save(self.task_id)
if (self.step - 1) % self.plot_interval != 0:
plot(self.task_id, self.step)
if self.params.snr_log_interval > 0:
self.snr_plots(batch, cond, self.task_id, self.step)
self.draw_final_snr_plot(self.step, self.task_id)
def run_step(self, batch, cond):
self.forward_backward(batch, cond)
took_step = self.mp_trainer.optimize(self.opt)
if took_step:
self._update_ema()
self._anneal_lr()
self.log_step()
def forward_backward(self, batch, cond):
self.mp_trainer.zero_grad()
for i in range(0, batch.shape[0], self.microbatch):
micro = batch[i: i + self.microbatch].to(dist_util.dev())
# micro_cond = cond[i: i + self.microbatch].to(dist_util.dev()) # {
micro_cond = {
k: v[i: i + self.microbatch].to(dist_util.dev())
for k, v in cond.items()
}
last_batch = (i + self.microbatch) >= batch.shape[0]
if isinstance(self.schedule_sampler, TaskAwareSampler):
t, weights = self.schedule_sampler.sample(micro.shape[0], dist_util.dev(), micro_cond["y"],
self.task_id)
else:
t, weights = self.schedule_sampler.sample(micro.shape[0], dist_util.dev())
if self.generate_previous_samples_continuously and (self.task_id > 0):
shape = [self.batch_size, self.in_channels, self.image_size, self.image_size]
prev_loss = self.diffusion.calculate_loss_previous_task(current_model=self.ddp_model,
prev_model=self.prev_ddp_model,
# Frozen copy of the model
schedule_sampler=self.schedule_sampler,
task_id=self.task_id,
n_examples_per_task=self.batch_size,
shape=shape,
batch_size=self.microbatch)
else:
prev_loss = 0
compute_losses = functools.partial(
self.diffusion.training_losses,
self.ddp_model,
micro,
t,
model_kwargs=micro_cond,
)
if last_batch or not self.use_ddp:
losses = compute_losses()
else:
with self.ddp_model.no_sync():
losses = compute_losses()
if isinstance(self.schedule_sampler, LossAwareSampler):
self.schedule_sampler.update_with_local_losses(
t, losses["loss"].detach()
)
loss = (losses["loss"] * weights).mean() + prev_loss
losses["prev_kl"] = prev_loss
log_loss_dict(
self.diffusion, t, {k: v * weights for k, v in losses.items()}
)
self.mp_trainer.backward(loss)
def _update_ema(self):
for rate, params in zip(self.ema_rate, self.ema_params):
update_ema(params, self.mp_trainer.master_params, rate=rate)
def _anneal_lr(self):
if not self.lr_anneal_steps:
return
frac_done = (self.step + self.resume_step) / self.lr_anneal_steps
lr = self.lr * (1 - frac_done)
for param_group in self.opt.param_groups:
param_group["lr"] = lr
def log_step(self):
logger.logkv("step", self.step + self.resume_step)
logger.logkv("samples", (self.step + self.resume_step + 1) * self.global_batch)
def save(self, task_id):
def save_checkpoint(rate, state_dict, suffix=""):
if dist.get_rank() == 0:
logger.log(f"saving model {rate} {suffix}...")
if not rate:
filename = f"model{(self.step + self.resume_step):06d}_{task_id}{suffix}.pt"
else:
filename = f"ema_{rate}_{(self.step + self.resume_step):06d}_{task_id}{suffix}.pt"
with bf.BlobFile(bf.join(get_blob_logdir(), filename), "wb") as f:
th.save(state_dict, f)
if self.params.model_name == "UNetModel":
state_dict = self.mp_trainer.master_params_to_state_dict(self.mp_trainer.master_params)
save_checkpoint(0, state_dict)
else:
state_dict_1, state_dict_2 = self.mp_trainer.master_params_to_state_dict_DAE(self.mp_trainer.master_params)
save_checkpoint(0, state_dict_1, suffix="_part_1")
if state_dict_2 is not None:
save_checkpoint(0, state_dict_2, suffix="_part_2")
# for rate, params in zip(self.ema_rate, self.ema_params):
# save_checkpoint(rate, params)
# if dist.get_rank() == 0:
# with bf.BlobFile(
# bf.join(get_blob_logdir(), f"opt{(self.step + self.resume_step):06d}.pt"),
# "wb",
# ) as f:
# th.save(self.opt.state_dict(), f)
dist.barrier()
@th.no_grad()
def generate_examples(self, task_id, n_examples_per_task, batch_size=-1, only_one_task=False):
if not only_one_task:
total_num_exapmles = n_examples_per_task * (task_id + 1)
else:
total_num_exapmles = n_examples_per_task
if batch_size == -1:
batch_size = total_num_exapmles
model = self.mp_trainer.model
model.eval()
all_images = []
model_kwargs = {}
if self.class_cond: ### @TODO add option for class conditioning not task conditioning
if only_one_task:
tasks = th.zeros(n_examples_per_task, device=dist_util.dev()) + task_id
else:
tasks = th.tensor((list(range(task_id + 1)) * (n_examples_per_task)), device=dist_util.dev()).sort()[0]
else:
tasks = None
i = 0
while len(all_images) < total_num_exapmles:
num_examples_to_generate = min(batch_size, total_num_exapmles - len(all_images))
if self.class_cond:
model_kwargs["y"] = tasks[i * batch_size:i * batch_size + num_examples_to_generate]
sample_fn = (
self.diffusion.p_sample_loop # if not self.use_ddim else diffusion.ddim_sample_loop
)
sample = sample_fn(
model,
(num_examples_to_generate, self.in_channels, self.image_size, self.image_size),
clip_denoised=False, model_kwargs=model_kwargs,
)
all_images.extend(sample.cpu())
print(f"generated: {len(all_images)}/{total_num_exapmles}")
i += 1
model.train()
all_images = th.stack(all_images, 0)
return all_images, tasks
@th.no_grad()
def plot(self, task_id, step, num_exammples=8):
sample, _ = self.generate_examples(task_id, num_exammples)
samples_grid = make_grid(sample.detach().cpu(), num_exammples, normalize=True).permute(1, 2, 0)
sample_wandb = wandb.Image(samples_grid.permute(2, 0, 1), caption=f"sample_task_{task_id}")
logs = {"sampled_images": sample_wandb}
plt.imshow(samples_grid)
plt.axis('off')
if not os.path.exists(os.path.join(logger.get_dir(), f"samples/")):
os.makedirs(os.path.join(logger.get_dir(), f"samples/"))
out_plot = os.path.join(logger.get_dir(), f"samples/task_{task_id:02d}_step_{step:06d}")
plt.savefig(out_plot)
if logger.get_rank_without_mpi_import() == 0:
wandb.log(logs, step=step)
def snr_plots(self, batch, cond, task_id, step):
logs = {}
snr_fwd_fl = []
snr_bwd_fl = []
kl_fl = []
num_examples = 50
for task in range(task_id+1):
if self.class_cond:
id_curr = th.where(cond['y'] == task)[0][:num_examples]
batch = batch[id_curr]
batch = batch.to(dist_util.dev())
num_examples = batch.shape[0]
task_tsr = th.tensor([task]*num_examples, device=dist_util.dev())
snr_fwd, snr_bwd, kl, x_q, x_p = self.get_snr_encode(batch,
task_tsr,
save_x=self.params.num_points_plot)
x_p_q = th.cat([
th.cat([x_q[j::self.params.num_points_plot],
x_p[j::self.params.num_points_plot]
]) for j in range(self.params.num_points_plot)])
x_p_q_vis = make_grid(x_p_q.detach().cpu(),
x_q.shape[0]//self.params.num_points_plot,
normalize=True, scale_each=True)
logs[f"plot/x_{task}"] = wandb.Image(x_p_q_vis)
kl_fl.append(kl.cpu())
snr_bwd_fl.append(snr_bwd.reshape(snr_bwd.shape[0], num_examples, -1).detach().cpu().mean(-1))
snr_fwd_fl.append(snr_fwd.reshape(snr_fwd.shape[0], num_examples, -1).detach().cpu().mean(-1))
logs["plot/snr_encode"] = self.draw_snr_plot(snr_bwd_fl, snr_fwd_fl, log_scale=True)
logs["plot/snr_encode_linear"] = self.draw_snr_plot(snr_bwd_fl, snr_fwd_fl, log_scale=False)
# save the averages
th.save(th.stack([s.mean(1) for s in snr_bwd_fl], 0),
os.path.join(wandb.run.dir, f'bwd_snr_step_{step}.npy'))
fig, axes = plt.subplots(ncols=task_id+1, nrows=1, figsize=(5*(task_id+1), 4),
sharey=True, constrained_layout=True)
if task_id == 0:
axes = np.expand_dims(axes, 0)
for task in range(task_id+1):
time_hist(axes[task], kl_fl[task])
axes[task].set_xlabel('T')
axes[task].set_title(f'KL (task {task})')
axes[task].grid(True)
logs["plot/kl"] = wandb.Image(fig)
if logger.get_rank_without_mpi_import() == 0:
wandb.log(logs, step=step)
@th.no_grad()
def draw_final_snr_plot(self, step, task_id):
av_snrs = []
save_steps = list(range(0, step+1, self.params.snr_log_interval))
if save_steps[-1] < step:
save_steps.append(step)
for s in save_steps:
# N_tasks x T each
av_snrs.append(th.load(os.path.join(wandb.run.dir, f'bwd_snr_step_{s}.npy')))
# linear
cmap = plt.get_cmap('RdYlGn', len(av_snrs))
fig, axes = plt.subplots(ncols=task_id+1, nrows=1, figsize=(5*(task_id+1), 4),
sharey=True, constrained_layout=True)
if task_id == 0:
axes = np.expand_dims(axes, 0)
for task in range(task_id+1):
snr_to_plot = [s[task] for s in av_snrs]
for i in range(len(snr_to_plot)):
axes[task].plot(snr_to_plot[i], c=cmap(i))
axes[task].grid(True)
# Normalizer
norm = mpl.colors.Normalize(vmin=0, vmax=len(av_snrs)-1)
# creating ScalarMappable
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array([])
cbar = plt.colorbar(sm, ticks=np.linspace(0, len(av_snrs)-1, len(av_snrs)))
cbar.ax.set_yticklabels(save_steps)
logs = {"plot/final_snr_linear": wandb.Image(fig)}
# log scale
fig, axes = plt.subplots(ncols=task_id+1, nrows=1, figsize=(5*(task_id+1), 4),
sharey=True, constrained_layout=True)
if task_id == 0:
axes = np.expand_dims(axes, 0)
for task in range(task_id+1):
snr_to_plot = [s[task] for s in av_snrs]
for i in range(len(snr_to_plot)):
axes[task].plot(th.log(snr_to_plot[i]), c=cmap(i))
axes[task].grid(True)
# Normalizer
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array([])
cbar = plt.colorbar(sm, ticks=np.linspace(0, len(av_snrs)-1, len(av_snrs)))
cbar.ax.set_yticklabels(save_steps)
logs["plot/final_snr_log"] = wandb.Image(fig)
if logger.get_rank_without_mpi_import() == 0:
wandb.log(logs, step=step)
@th.no_grad()
def draw_snr_plot(self, snr_bwd_fl, snr_fwd_fl, log_scale=True):
n_task = len(snr_bwd_fl)
fig, axes = plt.subplots(ncols=n_task, nrows=2, figsize=(5*n_task, 8),
sharey=True, sharex=True, constrained_layout=True)
if n_task == 1:
axes = np.expand_dims(axes, 0)
for task in range(n_task):
fwd = snr_fwd_fl[task]
bwd = snr_bwd_fl[task]
title = f'SNR (task {task})'
if log_scale:
title = 'log ' + title
fwd = th.log(fwd)
bwd = th.log(bwd)
time_hist(axes[task, 0], fwd)
axes[task, 0].plot(fwd.mean(1))
time_hist(axes[task, 1], bwd)
axes[task, 1].plot(bwd.mean(1))
axes[task, 0].set_xlabel('T')
axes[task, 1].set_xlabel('T')
axes[task, 0].set_ylabel(title)
axes[task, 0].grid(True)
axes[task, 1].grid(True)
axes[0, 0].set_title(f'Forward diffusion ({snr_fwd_fl[task].shape[1]} points)',
fontsize=20)
axes[0, 1].set_title(f'Backward diffusion ({snr_fwd_fl[task].shape[1]} points)',
fontsize=20);
return wandb.Image(fig)
@th.no_grad()
def get_snr_encode(self, x, task_id, save_x):
model = self.mp_trainer.model
model.eval()
model_kwargs = {}
if self.class_cond:
model_kwargs = {
"y": th.zeros(task_id.shape[0], device=dist_util.dev()) + task_id
}
indices_fwd = list(range(self.diffusion.num_timesteps)) # [0, 1, ....T]
shape = x.shape
x_curr = x.clone()
x_q = []
x_p = []
snr_fwd = []
snr_bwd = []
kl = []
for i in indices_fwd:
t = th.tensor([i] * shape[0], device=dist_util.dev())
# get q(x_{i} | x_{i-1})
mu = _extract_into_tensor(np.sqrt(1.0 - self.diffusion.betas), t, shape) * x_curr
var = _extract_into_tensor(self.diffusion.betas, t, shape)
snr_fwd.append(mu**2 / var)
# sample x_{i}
noise = th.randn_like(x_curr)
x_curr = mu + (var ** 0.5) * noise
x_q.append(x_curr[:save_x])
# get p(x_{i-1} | x_{i})
p_out = self.diffusion.p_mean_variance(
model, x_curr, t, clip_denoised=False, model_kwargs=model_kwargs
)
x_p.append(p_out['mean'][:save_x] + (p_out['variance'][:save_x] ** 0.5) * th.randn_like(x_curr[:save_x]))
snr_bwd.append(p_out['mean']**2 / p_out['variance'])
if i > 0:
# get q(x_{i-1} | x_{i}, x_0) - posterior
true_mean, true_var, true_log_variance_clipped = self.diffusion.q_posterior_mean_variance(
x_start=x, x_t=x_curr, t=t
)
kl.append(mean_flat(normal_kl(
true_mean, true_log_variance_clipped, p_out["mean"], p_out["log_variance"]
)))
return th.stack(snr_fwd), th.stack(snr_bwd), th.stack(kl), th.cat(x_q), th.cat(x_p)
@th.no_grad()
def plot_dae_only(self, task_id, step, num_exammples=8):
test_loader = self.validator.dataloaders[self.task_id]
diffs = []
i = 0
t = th.tensor(0, device=dist_util.dev())
self.model.eval()
batch, cond = next(iter(test_loader))
batch = batch.to(dist_util.dev())
x_t = self.diffusion.q_sample(batch, t)
t = th.tensor([0] * x_t.shape[0], device=x_t.device)
with th.no_grad():
out = self.diffusion.p_sample(
self.model,
x_t,
t,
clip_denoised=False,
)
img = out["sample"]
self.model.train()
to_plot = th.cat([batch[:num_exammples], x_t[:num_exammples],img[:num_exammples]])
samples_grid = make_grid(to_plot.detach().cpu(), num_exammples, normalize=True).permute(1, 2, 0)
sample_wandb = wandb.Image(samples_grid.permute(2, 0, 1), caption=f"sample_task_{task_id}")
if logger.get_rank_without_mpi_import() == 0:
wandb.log({"sampled_images": sample_wandb})
plt.imshow(samples_grid)
plt.axis('off')
if not os.path.exists(os.path.join(logger.get_dir(), f"samples/")):
os.makedirs(os.path.join(logger.get_dir(), f"samples/"))
out_plot = os.path.join(logger.get_dir(), f"samples/task_{task_id:02d}_step_{step:06d}")
plt.savefig(out_plot)
def validate_dae(self):
test_loader = self.validator.dataloaders[self.task_id]
diffs = []
i = 0
t = th.tensor(0, device=dist_util.dev())
self.model.eval()
for batch, cond in test_loader:
batch = batch.to(dist_util.dev())
x_t = self.diffusion.q_sample(batch, t)
t = th.tensor([0] * x_t.shape[0], device=x_t.device)
with th.no_grad():
out = self.diffusion.p_sample(
self.model,
x_t,
t,
clip_denoised=False,
)
img = out["sample"]
diff = th.abs(batch - img).mean().item()
diffs.append(diff)
if i > 100:
break
i += 1
self.model.train()
return np.mean(diffs)
def parse_resume_step_from_filename(filename):
"""
Parse filenames of the form path/to/modelNNNNNN.pt, where NNNNNN is the
checkpoint's number of steps.
"""
split = filename.split("model")
if len(split) < 2:
return 0
split1 = split[-1].split(".")[0]
try:
return int(split1)
except ValueError:
return 0
def get_blob_logdir():
# You can change this to be a separate path to save checkpoints to
# a blobstore or some external drive.
return logger.get_dir()
def find_resume_checkpoint():
# On your infrastructure, you may want to override this to automatically
# discover the latest checkpoint on your blob storage, etc.
return None
def find_ema_checkpoint(main_checkpoint, step, rate):
if main_checkpoint is None:
return None
filename = f"ema_{rate}_{(step):06d}.pt"
path = bf.join(bf.dirname(main_checkpoint), filename)
if bf.exists(path):
return path
return None
def log_loss_dict(diffusion, ts, losses):
for key, values in losses.items():
logger.logkv_mean(key, values.mean().item())
if key != "prev_kl":
# Log the quantiles (four quartiles, in particular).
for sub_t, sub_loss in zip(ts.cpu().numpy(), values.detach().cpu().numpy()):
quartile = int(4 * sub_t / diffusion.num_timesteps)
logger.logkv_mean(f"{key}_q{quartile}", sub_loss)
if sub_t == 0:
logger.logkv_mean(f"{key}_0_step", sub_loss)
def time_hist(ax, data):
num_pt, num_ts = data.shape
num_fine = num_pt*10
x_fine = np.linspace(0, num_pt, num_fine)
y_fine = np.empty((num_ts, num_fine), dtype=float)
for i in range(num_ts):
y_fine[i, :] = np.interp(x_fine, range(num_pt), data[:, i])
y_fine = y_fine.flatten()
x_fine = np.matlib.repmat(x_fine, num_ts, 1).flatten()
cmap = copy.copy(plt.cm.BuPu)
cmap.set_bad(cmap(0))
h, xedges, yedges = np.histogram2d(x_fine, y_fine, bins=[40, 1000])
pcm = ax.pcolormesh(xedges, yedges, h.T, cmap=cmap, vmax=num_ts, rasterized=True)
plt.colorbar(pcm, ax=ax, label="# points", pad=0); | [
"wandb.log",
"numpy.empty",
"torch.optim.AdamW",
"torch.cat",
"numpy.mean",
"torch.distributed.get_world_size",
"torch.no_grad",
"os.path.join",
"guided_diffusion.dist_util.sync_params",
"torch.distributed.get_rank",
"numpy.histogram2d",
"matplotlib.pyplot.imshow",
"os.uname",
"guided_diff... | [((16168, 16180), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (16178, 16180), True, 'import torch as th\n'), ((17921, 17933), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (17931, 17933), True, 'import torch as th\n'), ((21159, 21171), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (21169, 21171), True, 'import torch as th\n'), ((23321, 23333), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (23331, 23333), True, 'import torch as th\n'), ((24620, 24632), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (24630, 24632), True, 'import torch as th\n'), ((26522, 26534), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (26532, 26534), True, 'import torch as th\n'), ((29747, 29762), 'blobfile.exists', 'bf.exists', (['path'], {}), '(path)\n', (29756, 29762), True, 'import blobfile as bf\n'), ((30446, 30478), 'numpy.linspace', 'np.linspace', (['(0)', 'num_pt', 'num_fine'], {}), '(0, num_pt, num_fine)\n', (30457, 30478), True, 'import numpy as np\n'), ((30492, 30533), 'numpy.empty', 'np.empty', (['(num_ts, num_fine)'], {'dtype': 'float'}), '((num_ts, num_fine), dtype=float)\n', (30500, 30533), True, 'import numpy as np\n'), ((30730, 30752), 'copy.copy', 'copy.copy', (['plt.cm.BuPu'], {}), '(plt.cm.BuPu)\n', (30739, 30752), False, 'import copy\n'), ((30803, 30850), 'numpy.histogram2d', 'np.histogram2d', (['x_fine', 'y_fine'], {'bins': '[40, 1000]'}), '(x_fine, y_fine, bins=[40, 1000])\n', (30817, 30850), True, 'import numpy as np\n'), ((30941, 30990), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['pcm'], {'ax': 'ax', 'label': '"""# points"""', 'pad': '(0)'}), "(pcm, ax=ax, label='# points', pad=0)\n", (30953, 30990), True, 'import matplotlib.pyplot as plt\n'), ((501, 511), 'os.uname', 'os.uname', ([], {}), '()\n', (509, 511), False, 'import os\n'), ((3467, 3489), 'torch.cuda.is_available', 'th.cuda.is_available', ([], {}), '()\n', (3487, 3489), True, 'import torch as th\n'), ((3725, 3810), 'torch.optim.AdamW', 'AdamW', (['self.mp_trainer.master_params'], {'lr': 'self.lr', 'weight_decay': 'self.weight_decay'}), '(self.mp_trainer.master_params, lr=self.lr, weight_decay=self.weight_decay\n )\n', (3730, 3810), False, 'from torch.optim import AdamW\n'), ((3853, 3920), 'torch.optim.lr_scheduler.ExponentialLR', 'th.optim.lr_scheduler.ExponentialLR', (['self.opt'], {'gamma': 'scheduler_rate'}), '(self.opt, gamma=scheduler_rate)\n', (3888, 3920), True, 'import torch as th\n'), ((4464, 4486), 'torch.cuda.is_available', 'th.cuda.is_available', ([], {}), '()\n', (4484, 4486), True, 'import torch as th\n'), ((6361, 6405), 'copy.deepcopy', 'copy.deepcopy', (['self.mp_trainer.master_params'], {}), '(self.mp_trainer.master_params)\n', (6374, 6405), False, 'import copy\n'), ((6947, 6980), 'guided_diffusion.dist_util.sync_params', 'dist_util.sync_params', (['ema_params'], {}), '(ema_params)\n', (6968, 6980), False, 'from guided_diffusion import dist_util\n'), ((7249, 7274), 'blobfile.exists', 'bf.exists', (['opt_checkpoint'], {}), '(opt_checkpoint)\n', (7258, 7274), True, 'import blobfile as bf\n'), ((16147, 16161), 'torch.distributed.barrier', 'dist.barrier', ([], {}), '()\n', (16159, 16161), True, 'import torch.distributed as dist\n'), ((17858, 17881), 'torch.stack', 'th.stack', (['all_images', '(0)'], {}), '(all_images, 0)\n', (17866, 17881), True, 'import torch as th\n'), ((18314, 18338), 'matplotlib.pyplot.imshow', 'plt.imshow', (['samples_grid'], {}), '(samples_grid)\n', (18324, 18338), True, 'import matplotlib.pyplot as plt\n'), ((18347, 18362), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (18355, 18362), True, 'import matplotlib.pyplot as plt\n'), ((18613, 18634), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_plot'], {}), '(out_plot)\n', (18624, 18634), True, 'import matplotlib.pyplot as plt\n'), ((20599, 20713), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(task_id + 1)', 'nrows': '(1)', 'figsize': '(5 * (task_id + 1), 4)', 'sharey': '(True)', 'constrained_layout': '(True)'}), '(ncols=task_id + 1, nrows=1, figsize=(5 * (task_id + 1), 4),\n sharey=True, constrained_layout=True)\n', (20611, 20713), True, 'import matplotlib.pyplot as plt\n'), ((21043, 21059), 'wandb.Image', 'wandb.Image', (['fig'], {}), '(fig)\n', (21054, 21059), False, 'import wandb\n'), ((21627, 21741), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(task_id + 1)', 'nrows': '(1)', 'figsize': '(5 * (task_id + 1), 4)', 'sharey': '(True)', 'constrained_layout': '(True)'}), '(ncols=task_id + 1, nrows=1, figsize=(5 * (task_id + 1), 4),\n sharey=True, constrained_layout=True)\n', (21639, 21741), True, 'import matplotlib.pyplot as plt\n'), ((22197, 22240), 'matplotlib.pyplot.cm.ScalarMappable', 'plt.cm.ScalarMappable', ([], {'cmap': 'cmap', 'norm': 'norm'}), '(cmap=cmap, norm=norm)\n', (22218, 22240), True, 'import matplotlib.pyplot as plt\n'), ((22493, 22607), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(task_id + 1)', 'nrows': '(1)', 'figsize': '(5 * (task_id + 1), 4)', 'sharey': '(True)', 'constrained_layout': '(True)'}), '(ncols=task_id + 1, nrows=1, figsize=(5 * (task_id + 1), 4),\n sharey=True, constrained_layout=True)\n', (22505, 22607), True, 'import matplotlib.pyplot as plt\n'), ((22971, 23014), 'matplotlib.pyplot.cm.ScalarMappable', 'plt.cm.ScalarMappable', ([], {'cmap': 'cmap', 'norm': 'norm'}), '(cmap=cmap, norm=norm)\n', (22992, 23014), True, 'import matplotlib.pyplot as plt\n'), ((23205, 23221), 'wandb.Image', 'wandb.Image', (['fig'], {}), '(fig)\n', (23216, 23221), False, 'import wandb\n'), ((23456, 23571), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': 'n_task', 'nrows': '(2)', 'figsize': '(5 * n_task, 8)', 'sharey': '(True)', 'sharex': '(True)', 'constrained_layout': '(True)'}), '(ncols=n_task, nrows=2, figsize=(5 * n_task, 8), sharey=True,\n sharex=True, constrained_layout=True)\n', (23468, 23571), True, 'import matplotlib.pyplot as plt\n'), ((24597, 24613), 'wandb.Image', 'wandb.Image', (['fig'], {}), '(fig)\n', (24608, 24613), False, 'import wandb\n'), ((26915, 26963), 'torch.tensor', 'th.tensor', (['([0] * x_t.shape[0])'], {'device': 'x_t.device'}), '([0] * x_t.shape[0], device=x_t.device)\n', (26924, 26963), True, 'import torch as th\n'), ((27230, 27303), 'torch.cat', 'th.cat', (['[batch[:num_exammples], x_t[:num_exammples], img[:num_exammples]]'], {}), '([batch[:num_exammples], x_t[:num_exammples], img[:num_exammples]])\n', (27236, 27303), True, 'import torch as th\n'), ((27627, 27651), 'matplotlib.pyplot.imshow', 'plt.imshow', (['samples_grid'], {}), '(samples_grid)\n', (27637, 27651), True, 'import matplotlib.pyplot as plt\n'), ((27660, 27675), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (27668, 27675), True, 'import matplotlib.pyplot as plt\n'), ((27926, 27947), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_plot'], {}), '(out_plot)\n', (27937, 27947), True, 'import matplotlib.pyplot as plt\n'), ((28795, 28809), 'numpy.mean', 'np.mean', (['diffs'], {}), '(diffs)\n', (28802, 28809), True, 'import numpy as np\n'), ((29701, 29728), 'blobfile.dirname', 'bf.dirname', (['main_checkpoint'], {}), '(main_checkpoint)\n', (29711, 29728), True, 'import blobfile as bf\n'), ((3419, 3440), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (3438, 3440), True, 'import torch.distributed as dist\n'), ((7168, 7195), 'blobfile.dirname', 'bf.dirname', (['main_checkpoint'], {}), '(main_checkpoint)\n', (7178, 7195), True, 'import blobfile as bf\n'), ((13210, 13314), 'functools.partial', 'functools.partial', (['self.diffusion.training_losses', 'self.ddp_model', 'micro', 't'], {'model_kwargs': 'micro_cond'}), '(self.diffusion.training_losses, self.ddp_model, micro, t,\n model_kwargs=micro_cond)\n', (13227, 13314), False, 'import functools\n'), ((18701, 18727), 'wandb.log', 'wandb.log', (['logs'], {'step': 'step'}), '(logs, step=step)\n', (18710, 18727), False, 'import wandb\n'), ((19949, 19971), 'wandb.Image', 'wandb.Image', (['x_p_q_vis'], {}), '(x_p_q_vis)\n', (19960, 19971), False, 'import wandb\n'), ((20521, 20576), 'os.path.join', 'os.path.join', (['wandb.run.dir', 'f"""bwd_snr_step_{step}.npy"""'], {}), "(wandb.run.dir, f'bwd_snr_step_{step}.npy')\n", (20533, 20576), False, 'import os\n'), ((20781, 20804), 'numpy.expand_dims', 'np.expand_dims', (['axes', '(0)'], {}), '(axes, 0)\n', (20795, 20804), True, 'import numpy as np\n'), ((21126, 21152), 'wandb.log', 'wandb.log', (['logs'], {'step': 'step'}), '(logs, step=step)\n', (21135, 21152), False, 'import wandb\n'), ((21809, 21832), 'numpy.expand_dims', 'np.expand_dims', (['axes', '(0)'], {}), '(axes, 0)\n', (21823, 21832), True, 'import numpy as np\n'), ((22435, 22451), 'wandb.Image', 'wandb.Image', (['fig'], {}), '(fig)\n', (22446, 22451), False, 'import wandb\n'), ((22675, 22698), 'numpy.expand_dims', 'np.expand_dims', (['axes', '(0)'], {}), '(axes, 0)\n', (22689, 22698), True, 'import numpy as np\n'), ((23288, 23314), 'wandb.log', 'wandb.log', (['logs'], {'step': 'step'}), '(logs, step=step)\n', (23297, 23314), False, 'import wandb\n'), ((23642, 23665), 'numpy.expand_dims', 'np.expand_dims', (['axes', '(0)'], {}), '(axes, 0)\n', (23656, 23665), True, 'import numpy as np\n'), ((25530, 25551), 'torch.randn_like', 'th.randn_like', (['x_curr'], {}), '(x_curr)\n', (25543, 25551), True, 'import torch as th\n'), ((26439, 26456), 'torch.stack', 'th.stack', (['snr_fwd'], {}), '(snr_fwd)\n', (26447, 26456), True, 'import torch as th\n'), ((26458, 26475), 'torch.stack', 'th.stack', (['snr_bwd'], {}), '(snr_bwd)\n', (26466, 26475), True, 'import torch as th\n'), ((26477, 26489), 'torch.stack', 'th.stack', (['kl'], {}), '(kl)\n', (26485, 26489), True, 'import torch as th\n'), ((26491, 26502), 'torch.cat', 'th.cat', (['x_q'], {}), '(x_q)\n', (26497, 26502), True, 'import torch as th\n'), ((26504, 26515), 'torch.cat', 'th.cat', (['x_p'], {}), '(x_p)\n', (26510, 26515), True, 'import torch as th\n'), ((26838, 26853), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (26851, 26853), False, 'from guided_diffusion import dist_util\n'), ((26977, 26989), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (26987, 26989), True, 'import torch as th\n'), ((27574, 27617), 'wandb.log', 'wandb.log', (["{'sampled_images': sample_wandb}"], {}), "({'sampled_images': sample_wandb})\n", (27583, 27617), False, 'import wandb\n'), ((28302, 28350), 'torch.tensor', 'th.tensor', (['([0] * x_t.shape[0])'], {'device': 'x_t.device'}), '([0] * x_t.shape[0], device=x_t.device)\n', (28311, 28350), True, 'import torch as th\n'), ((30673, 30708), 'numpy.matlib.repmat', 'np.matlib.repmat', (['x_fine', 'num_ts', '(1)'], {}), '(x_fine, num_ts, 1)\n', (30689, 30708), True, 'import numpy as np\n'), ((4342, 4386), 'copy.deepcopy', 'copy.deepcopy', (['self.mp_trainer.master_params'], {}), '(self.mp_trainer.master_params)\n', (4355, 4386), False, 'import copy\n'), ((4986, 5007), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (5005, 5007), True, 'import torch.distributed as dist\n'), ((5931, 5946), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (5944, 5946), True, 'import torch.distributed as dist\n'), ((6612, 6627), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (6625, 6627), True, 'import torch.distributed as dist\n'), ((11482, 11497), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (11495, 11497), False, 'from guided_diffusion import dist_util\n'), ((14798, 14813), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (14811, 14813), True, 'import torch.distributed as dist\n'), ((19103, 19118), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (19116, 19118), False, 'from guided_diffusion import dist_util\n'), ((23899, 23910), 'torch.log', 'th.log', (['fwd'], {}), '(fwd)\n', (23905, 23910), True, 'import torch as th\n'), ((23933, 23944), 'torch.log', 'th.log', (['bwd'], {}), '(bwd)\n', (23939, 23944), True, 'import torch as th\n'), ((26724, 26739), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (26737, 26739), False, 'from guided_diffusion import dist_util\n'), ((28105, 28120), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (28118, 28120), False, 'from guided_diffusion import dist_util\n'), ((28217, 28232), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (28230, 28232), False, 'from guided_diffusion import dist_util\n'), ((28368, 28380), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (28378, 28380), True, 'import torch as th\n'), ((4791, 4806), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (4804, 4806), False, 'from guided_diffusion import dist_util\n'), ((7458, 7473), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (7471, 7473), False, 'from guided_diffusion import dist_util\n'), ((8511, 8556), 'os.environ.get', 'os.environ.get', (['"""DIFFUSION_TRAINING_TEST"""', '""""""'], {}), "('DIFFUSION_TRAINING_TEST', '')\n", (8525, 8556), False, 'import os\n'), ((11655, 11670), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (11668, 11670), False, 'from guided_diffusion import dist_util\n'), ((11934, 11949), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (11947, 11949), False, 'from guided_diffusion import dist_util\n'), ((12132, 12147), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (12145, 12147), False, 'from guided_diffusion import dist_util\n'), ((15237, 15259), 'torch.save', 'th.save', (['state_dict', 'f'], {}), '(state_dict, f)\n', (15244, 15259), True, 'import torch as th\n'), ((19223, 19238), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (19236, 19238), False, 'from guided_diffusion import dist_util\n'), ((19534, 19621), 'torch.cat', 'th.cat', (['[x_q[j::self.params.num_points_plot], x_p[j::self.params.num_points_plot]]'], {}), '([x_q[j::self.params.num_points_plot], x_p[j::self.params.\n num_points_plot]])\n', (19540, 19621), True, 'import torch as th\n'), ((21482, 21534), 'os.path.join', 'os.path.join', (['wandb.run.dir', 'f"""bwd_snr_step_{s}.npy"""'], {}), "(wandb.run.dir, f'bwd_snr_step_{s}.npy')\n", (21494, 21534), False, 'import os\n'), ((22868, 22890), 'torch.log', 'th.log', (['snr_to_plot[i]'], {}), '(snr_to_plot[i])\n', (22874, 22890), True, 'import torch as th\n'), ((25224, 25239), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (25237, 25239), False, 'from guided_diffusion import dist_util\n'), ((25316, 25351), 'numpy.sqrt', 'np.sqrt', (['(1.0 - self.diffusion.betas)'], {}), '(1.0 - self.diffusion.betas)\n', (25323, 25351), True, 'import numpy as np\n'), ((4743, 4758), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (4756, 4758), False, 'from guided_diffusion import dist_util\n'), ((6819, 6834), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (6832, 6834), False, 'from guided_diffusion import dist_util\n'), ((18989, 19016), 'torch.where', 'th.where', (["(cond['y'] == task)"], {}), "(cond['y'] == task)\n", (18997, 19016), True, 'import torch as th\n'), ((25909, 25939), 'torch.randn_like', 'th.randn_like', (['x_curr[:save_x]'], {}), '(x_curr[:save_x])\n', (25922, 25939), True, 'import torch as th\n'), ((6185, 6200), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (6198, 6200), False, 'from guided_diffusion import dist_util\n'), ((9437, 9476), 'wandb.log', 'wandb.log', (["{'dae_test_MAE': dae_result}"], {}), "({'dae_test_MAE': dae_result})\n", (9446, 9476), False, 'import wandb\n'), ((10315, 10345), 'wandb.log', 'wandb.log', (["{'fid': fid_result}"], {}), "({'fid': fid_result})\n", (10324, 10345), False, 'import wandb\n'), ((10374, 10409), 'wandb.log', 'wandb.log', (["{'precision': precision}"], {}), "({'precision': precision})\n", (10383, 10409), False, 'import wandb\n'), ((10438, 10467), 'wandb.log', 'wandb.log', (["{'recall': recall}"], {}), "({'recall': recall})\n", (10447, 10467), False, 'import wandb\n'), ((16814, 16829), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (16827, 16829), False, 'from guided_diffusion import dist_util\n'), ((24880, 24895), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (24893, 24895), False, 'from guided_diffusion import dist_util\n'), ((28623, 28642), 'torch.abs', 'th.abs', (['(batch - img)'], {}), '(batch - img)\n', (28629, 28642), True, 'import torch as th\n'), ((16952, 16967), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (16965, 16967), False, 'from guided_diffusion import dist_util\n')] |
from typing import Iterable
import numpy as np
from pynwb.misc import Units
import ipywidgets as widgets
from .misc import RasterWidget, PSTHWidget, RasterGridWidget
from .view import default_neurodata_vis_spec
from .utils.pynwb import robust_unique
from .controllers import GroupAndSortController
class AllenRasterWidget(RasterWidget):
def make_group_and_sort(self, group_by=None):
return AllenRasterGroupAndSortController(self.units, group_by=group_by)
class AllenPSTHWidget(PSTHWidget):
def __init__(self, units: Units, unit_index=0, unit_controller=None, sigma_in_secs=.05, ntt=1000):
super().__init__(units, unit_index, unit_controller, sigma_in_secs, ntt)
self.stimulus_type_dd = widgets.Dropdown(options=np.unique(self.trials['stimulus_name'][:]).tolist(),
label='drifting_gratings',
description='stimulus type')
self.stimulus_type_dd.observe(self.stimulus_type_dd_callback)
self.children = [self.stimulus_type_dd] + list(self.children)
def get_trials(self):
return self.units.get_ancestor('NWBFile').epochs
def stimulus_type_dd_callback(self, change):
self.gas.discard_rows = np.where(self.trials['stimulus_name'][:] != self.stimulus_type_dd.value)[0]
def make_group_and_sort(self, window=False):
discard_rows = np.where(self.trials['stimulus_name'][:] != 'drifting_gratings')[0]
gas = GroupAndSortController(self.trials, window=window, start_discard_rows=discard_rows)
return gas
class AllenRasterGroupAndSortController(GroupAndSortController):
def get_groups(self):
self.electrodes = self.dynamic_table.get_ancestor('NWBFile').electrodes
groups = super().get_groups()
groups.update({name: np.unique(self.electrodes[name][:]) for name in self.electrodes.colnames})
return groups
def get_orderable_cols(self):
units_orderable_cols = super().get_orderable_cols()
candidate_cols = [x for x in self.electrodes.colnames
if not (isinstance(self.electrodes[x][0], Iterable) or
isinstance(self.electrodes[x][0], str))]
return units_orderable_cols + [x for x in candidate_cols
if len(robust_unique(self.electrodes[x][:])) > 1]
def get_group_vals(self, by, rows_select=()):
if by is None:
return None
elif by in self.dynamic_table:
return self.dynamic_table[by][:][rows_select]
else:
if self.electrodes is not None and by in self.electrodes:
ids = self.electrodes.id[:]
inds = [np.argmax(ids == val) for val in self.dynamic_table['peak_channel_id'][:]]
return self.electrodes[by][:][inds][rows_select]
class AllenRasterGridWidget(RasterGridWidget):
def get_trials(self):
return self.units.get_ancestor('NWBFile').epochs
def select_trials(self):
self.controls['trials_select'] = widgets.Dropdown(options=np.unique(self.trials['stimulus_name'][:]).tolist(),
label='drifting_gratings',
description='trial select')
self.children = list(self.children) + [self.controls['trials_select']]
def process_controls(self, control_states):
control_states['trials_select'] = self.trials['stimulus_name'][:] == control_states.pop('trials_select')
return control_states
def load_allen_widgets():
default_neurodata_vis_spec[Units]['Session Raster'] = AllenRasterWidget
default_neurodata_vis_spec[Units]['Grouped PSTH'] = AllenPSTHWidget
default_neurodata_vis_spec[Units]['Raster Grid'] = AllenRasterGridWidget
| [
"numpy.where",
"numpy.unique",
"numpy.argmax"
] | [((1268, 1340), 'numpy.where', 'np.where', (["(self.trials['stimulus_name'][:] != self.stimulus_type_dd.value)"], {}), "(self.trials['stimulus_name'][:] != self.stimulus_type_dd.value)\n", (1276, 1340), True, 'import numpy as np\n'), ((1417, 1481), 'numpy.where', 'np.where', (["(self.trials['stimulus_name'][:] != 'drifting_gratings')"], {}), "(self.trials['stimulus_name'][:] != 'drifting_gratings')\n", (1425, 1481), True, 'import numpy as np\n'), ((1846, 1881), 'numpy.unique', 'np.unique', (['self.electrodes[name][:]'], {}), '(self.electrodes[name][:])\n', (1855, 1881), True, 'import numpy as np\n'), ((753, 795), 'numpy.unique', 'np.unique', (["self.trials['stimulus_name'][:]"], {}), "(self.trials['stimulus_name'][:])\n", (762, 795), True, 'import numpy as np\n'), ((2757, 2778), 'numpy.argmax', 'np.argmax', (['(ids == val)'], {}), '(ids == val)\n', (2766, 2778), True, 'import numpy as np\n'), ((3125, 3167), 'numpy.unique', 'np.unique', (["self.trials['stimulus_name'][:]"], {}), "(self.trials['stimulus_name'][:])\n", (3134, 3167), True, 'import numpy as np\n')] |
import copy
import math
import logging
import numpy as np
import matplotlib.pyplot as plt
from abc import ABCMeta, abstractmethod
#from neupy.algorithms import GRNN as grnn
from sklearn.neural_network import MLPRegressor as mlpr
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans as KMeans
from neupy.algorithms import GRNN as grnn
try:
import tensorflow as tf
except Exception:
logging.warning('Could not import tensorflow')
class Estimator(metaclass=ABCMeta):
"""
BASIC ESTIMATOR CLASS
Defines the basic functionality of an estimator.
-Independent of Regression or Classification,
every estimator instance should hold a fitting method, a prediction method
and a evaluate method.
-Data input vectors should be standardized to be column vectors as
most preimplemented classes use this as a standard.
Paramters:
-- pre_processing_dict: dictionary of data-specific information used for
preprocessing. E.g the relevant information for data centering
(mean values, data range). For the moment this is handled outside the
classifiers as not every implementation (i.e tensorflow) has simple way
of integrating this.
-The data used for fitting every estimator should in general be centered
(mean 0) and scaled to the unit interval ([0,1] or [-1,1]). For regression
models this also holds for the target data.
-In order to retrieve the correct output values distribution after fitting
to the input distribution, it is then required to rescale the values obtained
from the prediction method of the estimator using the values passed in the
estimators pre_processing_dict field.
-- score: The value obtained from evaluating a dataset obtained from the
same distribution as the data used for training.(E.g by using a train-test-split)
-- _name: The internal name of this estimator.
-- _type: General type of the estimator. E.g Regression, classification
"""
def __init__(self,name='estimator',pre_proc_dict=None,type=None):
self.pre_proc_dict = pre_proc_dict
self.score = None
self._name = name
self._type = type
@abstractmethod
def fit(self,data,target):
"""
data: Data drawn from the distribution to be fitted by this estimator
target: data from the target distribution in supervised models
"""
pass
@abstractmethod
def fit(self,data):
"""
data: Data drawn from the distribution to be fitted by this estimator
"""
pass
@abstractmethod
def predict(self,data):
"""
data: Data drawn from the fitted distribution to be predicted
"""
pass
@abstractmethod
def evaluate(self,data,target):
"""
-Evaluating the preformance of the estimator by comparing the predictions
of data with the target values with some reasonable distance measure.
data: Data drawn from the fitted distribution to be predicted
target: The target values associated with the data input.
"""
pass
class MLP_Regressor_scikit(Estimator):
'''
Ordinary neural network implementation from scikit-learn.
Hyperparameters for this estimator:
regularization_coefficient: L1 regularization multiplier.
0. --> regularization disabled.
hidden_layers: network architecture. An input of [10,10] corresponds
to a network with two hidden layers with 10 nodes each.
additional to this, the network has input and output
layers corresponding to the number of input features
and output parameters. This is determined from the
input training data.
activation_function: Activation function used throughout the network
possible functions are: 'relu'(default)
'logistic'
'tanh' ...
'''
def __init__(self,hyper_parameter_dict,output_dim=1,n_feature=1,
pre_proc_dict=None):
super().__init__(name='MLP_Regressor_scikit',pre_proc_dict=pre_proc_dict,
type='Regressor')
self.hyper_parameter_dict = hyper_parameter_dict
self.output_dim = output_dim
self.n_feature = n_feature
self.extract_hyper_params_from_dict()
self.mlpr_ = mlpr(solver='lbfgs',
hidden_layer_sizes=self._hidden_layers,
activation=self.activation,
alpha=self.alpha,
max_iter=5000,**kw)
self.score = -np.infty
def extract_hyper_params_from_dict(self):
self._hidden_layers= self.hyper_parameter_dict.get('hidden_layers',[10])
self._hidden_layers= tuple(self._hidden_layers)
self.alpha = self.hyper_parameter_dict.get('regularization_coefficient',0.5)
self.activation = self.hyper_parameter_dict.get('activation_function','relu')
def fit(self, x_train, y_train):
self.mlpr_.fit(x_train, y_train)
self.score = self.evaluate(x_train,y_train)
print('MLP_Regressor scikit trained with '+
str(self.score)+' accuracy on training set.')
def predict(self, x_pred):
"""
Has to be callable by scipy optimizers such as fmin(). I.e input has
has to be wrapped to a list for the estimators predict method.
"""
out = self.mlpr_.predict(x_pred)
return out
def evaluate(self,x,y):
self.score = self.mlpr_.score(x,y)
return self.score
def print_score(self):
print("Training score of ANN: "+str(self.score))
class DNN_Regressor_tf(Estimator):
"""
Ordinary neural network implementation in Tensorflow.
-loss function used: squared loss l(y,y_pred)=||y - y_pred||**2
-accuracy measure: coefficient of determination:
R_acc = 1-sum((y-y_pred)**2)/sum((y-mean(y))**2)
-optimizer: tf.GradientDescentOptimizer
Hyperparameters for this estimator:
learning_rate: learning rate for gradient descent
regularization_coefficient: L1 regularization multiplier.
0. --> regularization disabled.
hidden_layers: network architecture. An input of [10,10] corresponds
to a network with two hidden layers with 10 nodes each.
additional to this, the network has input and output
layers corresponding to the number of input features
and output parameters. This is defined by n_feature
and output_dim.
learning_steps: Number of gradient descents performed. Can potentially
be used for early stopping applications.
"""
def __init__(self,hyper_parameter_dict,output_dim=1,n_feature = 1,
pre_proc_dict = None):
super().__init__(name='DNN_Regressor_tf',pre_proc_dict=pre_proc_dict,
type='Regressor')
self.hyper_parameter_dict=hyper_parameter_dict
self._n_feature = n_feature
self._output_dim = output_dim
self._session = tf.Session()
self.learning_acc = []
self.extract_hyper_params_from_dict()
def extract_hyper_params_from_dict(self):
self._hidden_layers= self.hyper_parameter_dict.get('hidden_layers',[10])
self.alpha = self.hyper_parameter_dict.get('learning_rate',0.5)
self.beta = self.hyper_parameter_dict.get('regularization_coefficient',0.)
self.iters = self.hyper_parameter_dict.get('learning_steps',200)
def get_stddev(self, inp_dim, out_dim):
std = 1.3 / math.sqrt(float(inp_dim) + float(out_dim))
return std
def network(self, x):
x = tf.cast(x,tf.float32)
hidden = []
regularizer = tf.contrib.layers.l1_regularizer(self.beta)
reg_terms = []
#input layer. Input does not need to be transformed as we are regressing
with tf.name_scope("input"):
weights = tf.Variable(tf.truncated_normal([self._n_feature, self._hidden_layers[0]],
stddev=self.get_stddev(self._n_feature,
self._hidden_layers[0])),
name='weights')
biases = tf.Variable(tf.zeros([self._hidden_layers[0]]), name='biases')
input_ = tf.matmul(x, weights) + biases
reg_terms.append(tf.contrib.layers.apply_regularization(regularizer,[weights,biases]))
#hidden layers
for ind, size in enumerate(self._hidden_layers):
if ind == len(self._hidden_layers) - 1: break
with tf.name_scope("hidden{}".format(ind+1)):
weights = tf.Variable(tf.truncated_normal([size, self._hidden_layers[ind+1]],
stddev=self.get_stddev(self._n_feature, self._hidden_layers[ind+1])), name='weights')
biases = tf.Variable(tf.zeros([self._hidden_layers[ind+1]]), name='biases')
inputs = input_ if ind == 0 else hidden[ind-1]
hidden.append(tf.nn.relu(tf.matmul(inputs,weights)+biases,name="hidden{}".format(ind+1)))
reg_terms.append(tf.contrib.layers.apply_regularization(regularizer,[weights,biases]))
#output layer
with tf.name_scope("output"):
weights = tf.Variable(tf.truncated_normal([self._hidden_layers[-1],self._output_dim],
stddev=self.get_stddev(self._hidden_layers[-1],self._output_dim)),name='weights')
biases = tf.Variable(tf.zeros([self._output_dim]),name='biases')
logits = tf.matmul(hidden[-1],weights)+biases #regression model. Select linear act. fct.
reg_terms.append(tf.contrib.layers.apply_regularization(regularizer,[weights,biases]))
return logits, reg_terms
def fit(self,x_train=None,y_train=None):
if x_train is not None:
logging.warning('< DNN_Regressor_tf > has already been trained!'
're-training estimator on new input data!')
# x_train,y_train, \
x = tf.placeholder(tf.float32, [None, self._n_feature])
y = tf.placeholder(tf.float32, [None, self._output_dim])
logits, reg_terms = self.network(x)
self.learning_acc = []
loss = self.loss(logits, y) + tf.reduce_sum(reg_terms)
print(loss)
print(self.alpha)
train_op = tf.train.GradientDescentOptimizer(self.alpha).minimize(loss)
self._x = x
self._y = y
self._logits = logits
accuracy = self.evaluate_np(logits,y) #used for learning curve creation
init = tf.initialize_all_variables()
self._session.run(init)
# plt.figure()
# plt.grid(True)
# plt.title('learning curve') ## Learning Curve plotting
# plt.xlabel('learning epoch')
# plt.ylabel('loss')
for i in range(self.iters):
self._session.run(train_op,feed_dict={x: x_train, y: y_train})
_acc = self._session.run(accuracy, feed_dict={x: x_train, y: y_train})
self.learning_acc.append([i, _acc])
self.score = self.learning_acc[-1]
# plt.plot(range(self.iters),learning_progress,'go')
# plt.show()
def loss(self, logits_test, y_test):
_tmp = tf.square(y_test-logits_test)
_loss = tf.reduce_sum(_tmp) / tf.reduce_sum(tf.square(y_test-tf.reduce_mean(y_test)))
return _loss
def evaluate_np(self,logits_test,y_test):
_accuracy = 1 - \
tf.reduce_sum(tf.square(y_test-logits_test)) \
/tf.reduce_sum(tf.square(y_test-tf.reduce_mean(y_test)))
return _accuracy
def evaluate(self,logits_test=np.array([]),y_test=np.array([])):
y_pred = self.predict(logits_test)
self.score = 1 - \
np.linalg.norm((y_test-y_pred)**2) \
/np.linalg.norm((y_test-np.mean(y_test,axis=0)**2))
return self.score
def predict(self, samples):
predictions = self._logits
#return self._session.run(predictions, {self._x: [samples]})
return self._session.run(predictions, {self._x: samples})
class Polynomial_Regression(Estimator):
"""
Estimator for a Polynomial regression with degre as a hyperparameter
Hyperparameters for this estimator:
polynomail_dimension: degree of the regression polynomial
"""
def __init__(self,hyper_parameter_dict,pre_proc_dict=None):
super().__init__(name='Polynomial_Regression_scikit',
pre_proc_dict=pre_proc_dict,type='Regressor')
self._polyReg = None
self.extract_hyper_params_from_dict()
def extract_hyper_params_from_dict(self):
self.ndim = self.hyper_parameter_dict.get('polynomial_dimension',1)
def poly_features_transform(self,X):
if X.ndim==1:
data_shape = [len(X),1]
else:
data_shape = np.shape(X)
#so far does not support fits with mixed polynomials
A = np.zeros((data_shape[0],1+self.ndim*data_shape[1]))
A[:,0] = np.ones((data_shape[0]))
for it in range(self.ndim):
A[:,1+it*data_shape[1]:1+(it+1)*data_shape[1]] = X**(it+1)
return A
def fit(self,x_train,y_train):
self._polyReg = LinearRegression(fit_intercept=False)
self._polyReg.fit(self.poly_features_transform(x_train),y_train)
#x = np.transpose(np.array([np.linspace(-1,1,50)]))
# plt.figure()
# plt.plot(np.linspace(-1,1,50),
# self.predict(x)
# )
# plt.plot(x_train[:,0], y_train, 'o')
# plt.show()
def predict(self,samples):
if not isinstance(samples,np.ndarray):
samples = np.array(samples)
return self._polyReg.predict(self.poly_features_transform(samples))
def evaluate(self,x,y):
pred = self.predict(x)
self.score= 1. - np.linalg.norm(pred-y)**2 \
/ np.linalg.norm(y-np.mean(y,axis=0))**2
return self.score
class GRNN_neupy(Estimator):
"""
Generalized Regression Neural Network implementation from neupy
If the training data target values are multidimensional, for every paramter
in the target data, a new GRNN is initialized and trained.
Hyperparameters for this estimator:
gamma: list of scaling factors for the standard dev. input.
1.--> use std (or -if None- the regular std dev of
the input data)
"""
def __init__(self,hyper_parameter_dict,verbose =False,
pre_proc_dict=None):
super().__init__(name='GRNN_neupy',pre_proc_dict=pre_proc_dict,
type='Regressor')
self.hyper_parameter_dict = hyper_parameter_dict
self.extract_hyper_params_from_dict()
self._verbose = verbose
self._grnn = None
def extract_hyper_params_from_dict(self):
self._std= self.hyper_parameter_dict.get('standard_deviations',None)
self._gamma = self.hyper_parameter_dict.get('std_scaling',[1.])
if not isinstance(self._gamma,list):
self._gamma = [self._gamma]
def fit(self,x_train,y_train):
if not isinstance(x_train,np.ndarray):
x_train = np.array(x_train)
if x_train.ndim == 1:
x_train.shape = (np.size(x_train),x_train.ndim)
#x_train.reshape((np.size(x_train),x_train.ndim))
if not isinstance(y_train,np.ndarray):
y_train = np.array(y_train)
if y_train.ndim == 1:
#y_train.reshape((np.size(y_train),y_train.ndim))
y_train.shape = (np.size(y_train),y_train.ndim)
if len(self._gamma) != y_train.ndim:
logging.warning('Hyperparameter gamma contains only '
+str(len(self._gamma))+
' values while there are '+str(y_train.ndim)+ ' output'
' dimensions. Missing values are set to the value for'
'the first parameter!')
while len(self._gamma) <= y_train.ndim:
self._gamma.append(self._gamma[0])
if self._std is None:
std_x = 0.
for it in range(np.shape(x_train)[1]):
std_x += np.std(x_train[:,it])
self._std = std_x/x_train.ndim
self._grnn = []
for it in range(np.shape(y_train)[1]):
new_grnn = grnn(std=self._gamma[it]*self._std)
print('GRNN initialized with std: ',self._std)
new_grnn.train(x_train,y_train[:,it])
self._grnn.append(new_grnn)
def predict(self,samples):
if not isinstance(samples,np.ndarray):
samples = np.array(samples)
predictions = np.zeros((np.shape(samples)[0],len(self._grnn)))
for it in range(len(self._grnn)):
pred = self._grnn[it].predict(samples)
predictions[:,it] = np.reshape(pred,(len(pred)))
if np.shape(samples)[1] == 1.: #unwrap output if single sample
predictions=predictions[0]
return predictions
def evaluate(self,x,y):
pred = self.predict(x)
self.score = 1. - np.linalg.norm(pred-y)**2 \
/ np.linalg.norm(y-np.mean(y,axis=0))**2
return self.score
class CrossValidationEstimator(Estimator):
'''
Estimator wrapper performing a n_fold Cross Validation
Hyperparameters for this estimator:
cv_n_fold : Number ov splittings of the training data
E.g: for cv_n_fold = 5. the training data is split into 5
equally sized partitions of which 4 are used for
training and one for validation. The roles of the sets
then switch until the estimator was tested on all
partitions
'''
def __init__(self,hyper_parameter_dict,estimator : Estimator):
super().__init__(name='CV_Estimator_Wrapper',
type='Wrapper')
self.estimator = estimator
self.hyper_parameter_dict=hyper_parameter_dict
self.extract_hyper_params_from_dict()
self.pre_proc_dict = self.estimator.pre_proc_dict
self.gen_error_emp = None
self.batch_errors = None
def extract_hyper_params_from_dict(self):
self.n_fold= self.hyper_parameter_dict.get('cv_n_fold',1)
def fit(self,x_train,y_train):
if not isinstance(x_train,np.ndarray):
x_train = np.array(x_train)
if x_train.ndim == 1:
x_train.shape = (np.size(x_train),x_train.ndim)
if not isinstance(y_train,np.ndarray):
y_train = np.array(y_train)
if y_train.ndim == 1:
y_train.shape = (np.size(y_train),y_train.ndim)
sample_number = np.shape(x_train)[0]
if sample_number != np.shape(y_train)[0]:
logging.error('training and target values have different first dimension'
'. Sample number missmatch.')
reminder = sample_number % self.n_fold
batch_size = int((sample_number-reminder)/self.n_fold)
self.batch_errors = []
for it in range(0,sample_number-reminder,batch_size):
test_batch = x_train[it:(it+batch_size-1),:]
train_batch = np.concatenate((x_train[:it,:],x_train[it+batch_size:,:]),
axis=0)
test_target = y_train[it:it+batch_size-1,:]
train_target = np.concatenate((y_train[:it,:],y_train[it+batch_size:,:]),
axis=0)
self.estimator.fit(train_batch,train_target)
batch_error = self.estimator.evaluate(test_batch,test_target)
print('batch accuracy: ',batch_error)
self.batch_errors.append(batch_error)
self.estimator.fit(x_train,y_train) #refit estimator on training data
self.score = np.mean(self.batch_errors)
# if self.score <= 0.8:
# logging.warning('Cross Validation finished with average score: ',self.score,
# '. Most likely unstable predictions. Try larger training data set.')
self.std_error_emp = np.std(self.batch_errors)
def predict(self,data):
if not isinstance(data,np.ndarray):
data = np.array(data)
out = self.estimator.predict(data)
return out
def evaluate(self,x,y):
return self.estimator.evaluate(x,y)
def get_std_error(self):
return self.std_error_emp
class K_means_scikit(Estimator):
def __init__(self,hyper_parameter_dictionary,pre_proc_dict=None):
super().__init__(name='K_means_scikit',type='Clustering',
pre_proc_dict=pre_proc_dict)
self.hyper_parameter_dictionary = hyper_parameter_dictionary
self.extract_hyper_params_from_dict()
self.cluster_centers = None
self.labels = None
def extract_hyper_params_from_dict(self):
self.n_clusters= self.hyper_parameter_dict.pop('cluster_number',1)
def fit(self,X_train):
if not isinstance(X_train,np.ndarray):
x_train = np.array(X_train)
if x_train.ndim == 1:
x_train.reshape((np.size(X_train),X_train.ndim))
self._kmeans = KMeans(n_clusters=self.n_clusters)
self._kmeans.fit(X_train)
self.labels = self._kmeans.labels_
self.cluster_centers = self._kmeans.cluster_centers_
def predict(self,data):
return self._kmeans.predict(data)
def evaluate(self,data):
return self._kmeans.score(data)
def get_labels(self):
return self.labels | [
"tensorflow.reduce_sum",
"numpy.ones",
"numpy.shape",
"tensorflow.matmul",
"numpy.mean",
"numpy.linalg.norm",
"logging.error",
"neupy.algorithms.GRNN",
"logging.warning",
"numpy.std",
"sklearn.cluster.KMeans",
"tensorflow.cast",
"tensorflow.placeholder",
"tensorflow.initialize_all_variable... | [((416, 462), 'logging.warning', 'logging.warning', (['"""Could not import tensorflow"""'], {}), "('Could not import tensorflow')\n", (431, 462), False, 'import logging\n'), ((4642, 4774), 'sklearn.neural_network.MLPRegressor', 'mlpr', ([], {'solver': '"""lbfgs"""', 'hidden_layer_sizes': 'self._hidden_layers', 'activation': 'self.activation', 'alpha': 'self.alpha', 'max_iter': '(5000)'}), "(solver='lbfgs', hidden_layer_sizes=self._hidden_layers, activation=\n self.activation, alpha=self.alpha, max_iter=5000, **kw)\n", (4646, 4774), True, 'from sklearn.neural_network import MLPRegressor as mlpr\n'), ((7530, 7542), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (7540, 7542), True, 'import tensorflow as tf\n'), ((8142, 8164), 'tensorflow.cast', 'tf.cast', (['x', 'tf.float32'], {}), '(x, tf.float32)\n', (8149, 8164), True, 'import tensorflow as tf\n'), ((8206, 8249), 'tensorflow.contrib.layers.l1_regularizer', 'tf.contrib.layers.l1_regularizer', (['self.beta'], {}), '(self.beta)\n', (8238, 8249), True, 'import tensorflow as tf\n'), ((10667, 10718), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self._n_feature]'], {}), '(tf.float32, [None, self._n_feature])\n', (10681, 10718), True, 'import tensorflow as tf\n'), ((10731, 10783), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self._output_dim]'], {}), '(tf.float32, [None, self._output_dim])\n', (10745, 10783), True, 'import tensorflow as tf\n'), ((11218, 11247), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (11245, 11247), True, 'import tensorflow as tf\n'), ((11892, 11923), 'tensorflow.square', 'tf.square', (['(y_test - logits_test)'], {}), '(y_test - logits_test)\n', (11901, 11923), True, 'import tensorflow as tf\n'), ((12314, 12326), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (12322, 12326), True, 'import numpy as np\n'), ((12334, 12346), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (12342, 12346), True, 'import numpy as np\n'), ((13628, 13684), 'numpy.zeros', 'np.zeros', (['(data_shape[0], 1 + self.ndim * data_shape[1])'], {}), '((data_shape[0], 1 + self.ndim * data_shape[1]))\n', (13636, 13684), True, 'import numpy as np\n'), ((13697, 13719), 'numpy.ones', 'np.ones', (['data_shape[0]'], {}), '(data_shape[0])\n', (13704, 13719), True, 'import numpy as np\n'), ((13906, 13943), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (13922, 13943), False, 'from sklearn.linear_model import LinearRegression\n'), ((20606, 20632), 'numpy.mean', 'np.mean', (['self.batch_errors'], {}), '(self.batch_errors)\n', (20613, 20632), True, 'import numpy as np\n'), ((20884, 20909), 'numpy.std', 'np.std', (['self.batch_errors'], {}), '(self.batch_errors)\n', (20890, 20909), True, 'import numpy as np\n'), ((21982, 22016), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'self.n_clusters'}), '(n_clusters=self.n_clusters)\n', (21988, 22016), True, 'from sklearn.cluster import KMeans as KMeans\n'), ((8367, 8389), 'tensorflow.name_scope', 'tf.name_scope', (['"""input"""'], {}), "('input')\n", (8380, 8389), True, 'import tensorflow as tf\n'), ((9813, 9836), 'tensorflow.name_scope', 'tf.name_scope', (['"""output"""'], {}), "('output')\n", (9826, 9836), True, 'import tensorflow as tf\n'), ((10489, 10604), 'logging.warning', 'logging.warning', (['"""< DNN_Regressor_tf > has already been trained!re-training estimator on new input data!"""'], {}), "(\n '< DNN_Regressor_tf > has already been trained!re-training estimator on new input data!'\n )\n", (10504, 10604), False, 'import logging\n'), ((10898, 10922), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['reg_terms'], {}), '(reg_terms)\n', (10911, 10922), True, 'import tensorflow as tf\n'), ((11938, 11957), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['_tmp'], {}), '(_tmp)\n', (11951, 11957), True, 'import tensorflow as tf\n'), ((13543, 13554), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (13551, 13554), True, 'import numpy as np\n'), ((14367, 14384), 'numpy.array', 'np.array', (['samples'], {}), '(samples)\n', (14375, 14384), True, 'import numpy as np\n'), ((15898, 15915), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (15906, 15915), True, 'import numpy as np\n'), ((16137, 16154), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (16145, 16154), True, 'import numpy as np\n'), ((17081, 17118), 'neupy.algorithms.GRNN', 'grnn', ([], {'std': '(self._gamma[it] * self._std)'}), '(std=self._gamma[it] * self._std)\n', (17085, 17118), True, 'from neupy.algorithms import GRNN as grnn\n'), ((17367, 17384), 'numpy.array', 'np.array', (['samples'], {}), '(samples)\n', (17375, 17384), True, 'import numpy as np\n'), ((19166, 19183), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (19174, 19183), True, 'import numpy as np\n'), ((19343, 19360), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (19351, 19360), True, 'import numpy as np\n'), ((19475, 19492), 'numpy.shape', 'np.shape', (['x_train'], {}), '(x_train)\n', (19483, 19492), True, 'import numpy as np\n'), ((19558, 19668), 'logging.error', 'logging.error', (['"""training and target values have different first dimension. Sample number missmatch."""'], {}), "(\n 'training and target values have different first dimension. Sample number missmatch.'\n )\n", (19571, 19668), False, 'import logging\n'), ((19974, 20045), 'numpy.concatenate', 'np.concatenate', (['(x_train[:it, :], x_train[it + batch_size:, :])'], {'axis': '(0)'}), '((x_train[:it, :], x_train[it + batch_size:, :]), axis=0)\n', (19988, 20045), True, 'import numpy as np\n'), ((20165, 20236), 'numpy.concatenate', 'np.concatenate', (['(y_train[:it, :], y_train[it + batch_size:, :])'], {'axis': '(0)'}), '((y_train[:it, :], y_train[it + batch_size:, :]), axis=0)\n', (20179, 20236), True, 'import numpy as np\n'), ((21002, 21016), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (21010, 21016), True, 'import numpy as np\n'), ((21842, 21859), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (21850, 21859), True, 'import numpy as np\n'), ((8768, 8802), 'tensorflow.zeros', 'tf.zeros', (['[self._hidden_layers[0]]'], {}), '([self._hidden_layers[0]])\n', (8776, 8802), True, 'import tensorflow as tf\n'), ((8840, 8861), 'tensorflow.matmul', 'tf.matmul', (['x', 'weights'], {}), '(x, weights)\n', (8849, 8861), True, 'import tensorflow as tf\n'), ((8900, 8970), 'tensorflow.contrib.layers.apply_regularization', 'tf.contrib.layers.apply_regularization', (['regularizer', '[weights, biases]'], {}), '(regularizer, [weights, biases])\n', (8938, 8970), True, 'import tensorflow as tf\n'), ((10107, 10135), 'tensorflow.zeros', 'tf.zeros', (['[self._output_dim]'], {}), '([self._output_dim])\n', (10115, 10135), True, 'import tensorflow as tf\n'), ((10172, 10202), 'tensorflow.matmul', 'tf.matmul', (['hidden[-1]', 'weights'], {}), '(hidden[-1], weights)\n', (10181, 10202), True, 'import tensorflow as tf\n'), ((10295, 10365), 'tensorflow.contrib.layers.apply_regularization', 'tf.contrib.layers.apply_regularization', (['regularizer', '[weights, biases]'], {}), '(regularizer, [weights, biases])\n', (10333, 10365), True, 'import tensorflow as tf\n'), ((10988, 11033), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['self.alpha'], {}), '(self.alpha)\n', (11021, 11033), True, 'import tensorflow as tf\n'), ((12431, 12469), 'numpy.linalg.norm', 'np.linalg.norm', (['((y_test - y_pred) ** 2)'], {}), '((y_test - y_pred) ** 2)\n', (12445, 12469), True, 'import numpy as np\n'), ((15975, 15991), 'numpy.size', 'np.size', (['x_train'], {}), '(x_train)\n', (15982, 15991), True, 'import numpy as np\n'), ((16276, 16292), 'numpy.size', 'np.size', (['y_train'], {}), '(y_train)\n', (16283, 16292), True, 'import numpy as np\n'), ((16922, 16944), 'numpy.std', 'np.std', (['x_train[:, it]'], {}), '(x_train[:, it])\n', (16928, 16944), True, 'import numpy as np\n'), ((17035, 17052), 'numpy.shape', 'np.shape', (['y_train'], {}), '(y_train)\n', (17043, 17052), True, 'import numpy as np\n'), ((19243, 19259), 'numpy.size', 'np.size', (['x_train'], {}), '(x_train)\n', (19250, 19259), True, 'import numpy as np\n'), ((19420, 19436), 'numpy.size', 'np.size', (['y_train'], {}), '(y_train)\n', (19427, 19436), True, 'import numpy as np\n'), ((19524, 19541), 'numpy.shape', 'np.shape', (['y_train'], {}), '(y_train)\n', (19532, 19541), True, 'import numpy as np\n'), ((9442, 9482), 'tensorflow.zeros', 'tf.zeros', (['[self._hidden_layers[ind + 1]]'], {}), '([self._hidden_layers[ind + 1]])\n', (9450, 9482), True, 'import tensorflow as tf\n'), ((9699, 9769), 'tensorflow.contrib.layers.apply_regularization', 'tf.contrib.layers.apply_regularization', (['regularizer', '[weights, biases]'], {}), '(regularizer, [weights, biases])\n', (9737, 9769), True, 'import tensorflow as tf\n'), ((12144, 12175), 'tensorflow.square', 'tf.square', (['(y_test - logits_test)'], {}), '(y_test - logits_test)\n', (12153, 12175), True, 'import tensorflow as tf\n'), ((14546, 14570), 'numpy.linalg.norm', 'np.linalg.norm', (['(pred - y)'], {}), '(pred - y)\n', (14560, 14570), True, 'import numpy as np\n'), ((16874, 16891), 'numpy.shape', 'np.shape', (['x_train'], {}), '(x_train)\n', (16882, 16891), True, 'import numpy as np\n'), ((17417, 17434), 'numpy.shape', 'np.shape', (['samples'], {}), '(samples)\n', (17425, 17434), True, 'import numpy as np\n'), ((17626, 17643), 'numpy.shape', 'np.shape', (['samples'], {}), '(samples)\n', (17634, 17643), True, 'import numpy as np\n'), ((17843, 17867), 'numpy.linalg.norm', 'np.linalg.norm', (['(pred - y)'], {}), '(pred - y)\n', (17857, 17867), True, 'import numpy as np\n'), ((11991, 12013), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['y_test'], {}), '(y_test)\n', (12005, 12013), True, 'import tensorflow as tf\n'), ((21927, 21943), 'numpy.size', 'np.size', (['X_train'], {}), '(X_train)\n', (21934, 21943), True, 'import numpy as np\n'), ((9601, 9627), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'weights'], {}), '(inputs, weights)\n', (9610, 9627), True, 'import tensorflow as tf\n'), ((12229, 12251), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['y_test'], {}), '(y_test)\n', (12243, 12251), True, 'import tensorflow as tf\n'), ((12504, 12527), 'numpy.mean', 'np.mean', (['y_test'], {'axis': '(0)'}), '(y_test, axis=0)\n', (12511, 12527), True, 'import numpy as np\n'), ((14612, 14630), 'numpy.mean', 'np.mean', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (14619, 14630), True, 'import numpy as np\n'), ((17909, 17927), 'numpy.mean', 'np.mean', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (17916, 17927), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#################################################################
# File : camera_compare.py
# Version : 0.0.1
# Author : sebi06
# Date : 21.10.2021
#
#
# This code probably does not reflect the latest new technologies
# of microscope cameras anymore but is hopefully still useful to compare cameras
# and understand the lines of reasoning when choosing the "right" camera
#
# Disclaimer: The code is purely experimental. Feel free to
# use it at your own risk.
#
#################################################################
from __future__ import annotations
from PyQt5 import QtWidgets, QtGui, uic
from PyQt5 import QtCore
import sys
import os
import numpy as np
from typing import List, Dict, Tuple, Optional, Type, Any, Union
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
# Load the UI Page
uic.loadUi('mainwindow.ui', self)
# on eway to modify the color
palr = self.label_camera1L.palette()
palg = self.label_camera1R.palette()
palr.setColor(QtGui.QPalette.WindowText, QtGui.QColor("red"))
palg.setColor(QtGui.QPalette.WindowText, QtGui.QColor("green"))
self.label_camera1L.setPalette(palr)
self.label_camera1R.setPalette(palr)
self.label_camera2L.setPalette(palg)
self.label_camera2R.setPalette(palg)
# another way to modify the color
self.name1.setStyleSheet("""QLineEdit {color: red }""")
self.name2.setStyleSheet("""QLineEdit {color: green }""")
self.phf1.setStyleSheet("""QSpinBox {color: red }""")
self.phf2.setStyleSheet("""QLineEdit {color: green }""")
self.addmag1.setStyleSheet("""QDoubleSpinBox {color: red }""")
self.addmag2.setStyleSheet("""QDoubleSpinBox {color: green }""")
self.objmag1.setStyleSheet("""QDoubleSpinBox {color: red }""")
self.objmag2.setStyleSheet("""QDoubleSpinBox {color: green }""")
self.objna1.setStyleSheet("""QDoubleSpinBox {color: red }""")
self.objna2.setStyleSheet("""QDoubleSpinBox {color: green }""")
# define default values for objectives and update ui elements
objmag = 20
objna = 0.7
addmag = 1.0
# define other default values and update ui elements
emwl = 520
phf = 50
sampling = 2.0
# store them
self.objmag1.setValue(objmag)
self.objmag2.setValue(objmag)
self.objna1.setValue(objna)
self.objna2.setValue(objna)
self.addmag1.setValue(addmag)
self.addmag2.setValue(addmag)
# define default values for optics etc.
self.mic1 = Microscope(name="Mic1",
objmag=objmag,
objna=objna,
addmag=addmag)
self.mic2 = Microscope(name="Mic2",
objmag=objmag,
objna=objna,
addmag=addmag)
self.emwl_value = emwl
self.emwl.setValue(emwl)
self.phf1_value = phf
self.phf1.setValue(phf)
self.sampling_value = sampling
self.nyq.setValue(sampling)
# define camera types
camera_types = ["CCD", "EM-CCD", "CMOS"]
# define defaults for camera 1
name1 = "cam1"
type1 = "CCD"
gain1 = 1
bin1 = 1
qe1 = 0.74
pixsize1 = 4.54
readout1 = 6.5
dark1 = 0.06
cic1 = 0.0
# define defaults for camera 2
name2 = "cam2"
type2 = "EM-CCD"
gain2 = 150
bin2 = 1
qe2 = 0.94
pixsize2 = 12.0
readout2 = 130
dark2 = 0.0003
cic2 = 0.006
# initialize tow cameras with default values
self.cam1 = Camera(name=name1,
qe=qe1,
pixsize=pixsize1,
binning=bin1,
cameratype=type1,
emgain=gain1,
readout=readout1,
dark=dark1,
cic=cic1)
# update the UI elements with the choosen defaults
self.name1.setText(name1)
self.qe1.setValue(qe1)
self.type1.setCurrentIndex(camera_types.index(type1))
self.bin1.setCurrentIndex(bin1 - 1)
self.pixsize1.setValue(pixsize1)
self.readnoise1.setValue(readout1)
self.emgain1.setValue(gain1)
self.dark1.setValue(dark1)
self.cic1.setValue(cic1)
self.cam2 = Camera(name=name2,
qe=qe2,
pixsize=pixsize2,
binning=bin2,
cameratype=type2,
emgain=gain2,
readout=readout2,
dark=dark2,
cic=cic2)
# check the camera type and enable or disable the gain
self.checktype()
self.name2.setText(name2)
self.qe2.setValue(qe2)
self.type2.setCurrentIndex(camera_types.index(type2))
self.bin2.setCurrentIndex(bin2 - 1)
self.pixsize2.setValue(pixsize2)
self.readnoise2.setValue(readout2)
self.emgain2.setValue(gain2)
self.dark2.setValue(dark2)
self.cic2.setValue(cic2)
# adapt the noise factor and readout noise
self.cam1 = adapt_noise_readout(self.cam1)
self.cam2 = adapt_noise_readout(self.cam2)
self.noisef1.setText(str(self.cam1.nf))
self.noisef2.setText(str(self.cam2.nf))
# calculate the values for both cameras
self.cp1, self.cp2 = calc_values(self.cam1, self.cam2, self.mic1, self.mic2,
emwl=self.emwl_value,
phf=self.phf1_value,
sampling=self.sampling_value)
print("Cameras initialized and values calculated.")
# update ui
self.phf2.setText(str(self.cp2["flux"]))
self.sizef1.setText("1.00")
self.sizef2.setText(str(self.cp2["corrf_pixarea"]))
# update values for the pixel sizes
self.piximage1.setText(str(self.cp1["piximage"]))
self.piximage2.setText(str(self.cp2["piximage"]))
self.pixrequired1.setText(str(self.cp1["req_pixsize"]))
self.pixrequired2.setText(str(self.cp2["req_pixsize"]))
# configure plot
self.MplWidget.canvas.axes.set_title("Camera SNR Plot", size=18, weight="bold")
self.MplWidget.canvas.axes.set_xlabel("Photons / Pixel / Frame", size=14, weight="bold")
self.MplWidget.canvas.axes.set_ylabel("SNR Ratio", size=14, weight="bold")
self.MplWidget.canvas.axes.grid(True, linestyle="--")
self.MplWidget.canvas.axes.set_xlim(0, 200)
self.MplWidget.canvas.axes.set_ylim(0, 10)
# plot SNR curves (returns a tuple of line objects, thus the comma)
self.snr1_curve, = self.MplWidget.canvas.axes.plot(self.cp1["phf"], self.cp1["snr"], "r-", lw=4, label="SNR 1")
self.snr2_curve, = self.MplWidget.canvas.axes.plot(self.cp1["phf"], self.cp2["snr"], "g-", lw=4, label="SNR 2")
# plot indicator lines (returns a tuple of line objects, thus the comma)
self.indicator1_line, = self.MplWidget.canvas.axes.plot(self.cp1["phindx"], self.cp1["phindy"], "r--", lw=3, label="PH 1")
self.indicator2_line, = self.MplWidget.canvas.axes.plot(self.cp2["phindx"], self.cp2["phindy"], "g--", lw=3, label="PH 2")
self.MplWidget.canvas.axes.legend()
self.update_plot()
# connect scaling values for the plot
self.xscale_min.valueChanged.connect(self.change_scale)
self.xscale_max.valueChanged.connect(self.change_scale)
self.yscale_min.valueChanged.connect(self.change_scale)
self.yscale_max.valueChanged.connect(self.change_scale)
# connect binning selectors
self.bin1.currentIndexChanged.connect(self.change_binning)
self.bin2.currentIndexChanged.connect(self.change_binning)
# connect qe values
self.qe1.valueChanged.connect(self.change_qe)
self.qe2.valueChanged.connect(self.change_qe)
# connect pixel size values
self.pixsize1.valueChanged.connect(self.change_pix)
self.pixsize2.valueChanged.connect(self.change_pix)
# connect readout noise values
self.readnoise1.valueChanged.connect(self.change_readoutnoise)
self.readnoise2.valueChanged.connect(self.change_readoutnoise)
# connect camera type values
self.type1.currentIndexChanged.connect(self.change_type)
self.type2.currentIndexChanged.connect(self.change_type)
# connect emgain values
self.emgain1.valueChanged.connect(self.change_gain)
self.emgain2.valueChanged.connect(self.change_gain)
# connect dark current values
self.dark1.valueChanged.connect(self.change_dark)
self.dark2.valueChanged.connect(self.change_dark)
# connect the CIC noise values
self.cic1.valueChanged.connect(self.change_cic)
self.cic2.valueChanged.connect(self.change_cic)
# connect objective magnification values
self.objmag1.valueChanged.connect(self.change_objmag)
self.objmag2.valueChanged.connect(self.change_objmag)
# connect additional magnification values
self.objna1.valueChanged.connect(self.change_objna)
self.objna2.valueChanged.connect(self.change_objna)
# connect additional magnification values
self.addmag1.valueChanged.connect(self.change_addmag)
self.addmag2.valueChanged.connect(self.change_addmag)
# connect sampling value
self.nyq.valueChanged.connect(self.change_sampling)
# connect EM-WL value
self.emwl.valueChanged.connect(self.change_emwl)
# connect photon flux value
self.phf1.valueChanged.connect(self.change_flux)
# check camera type and selected gain
def checktype(self) -> None:
# disable EM gain if CMOS or CCD
if self.cam1.cameratype == "CMOS" or self.cam1.cameratype == "CCD":
# disable the spinbox
self.emgain1.setDisabled(True)
# set emgain value for cam1 = 1
self.cam1.emgain = 1
# set the value for the spinbox = 1
self.emgain1.setValue(1)
# set CIC to zero
self.cic1.setValue(0.0)
self.cam1.cic = 0.0
elif self.cam1.cameratype == "EM-CCD":
# enable the spinbox
self.emgain1.setEnabled(True)
if self.cam2.cameratype == "CMOS" or self.cam2.cameratype == "CCD":
self.emgain2.setDisabled(True)
self.cam1.emgain = 1
self.emgain2.setValue(1)
self.cic2.setValue(0.0)
self.cam2.cic = 0.0
elif self.cam2.cameratype == "EM-CCD":
self.emgain2.setEnabled(True)
# modify plot
def change_scale(self: QtWidgets.QMainWindow) -> None:
# change the range for both axis
self.MplWidget.canvas.axes.set_xlim(self.xscale_min.value(), self.xscale_max.value())
self.MplWidget.canvas.axes.set_ylim(self.yscale_min.value(), self.yscale_max.value())
# update the plot
self.MplWidget.canvas.draw()
# change camera parameters
def change_binning(self: QtWidgets.QMainWindow) -> None:
# change binning values
self.cam1.binning = self.bin1.currentIndex() + 1
self.cam2.binning = self.bin2.currentIndex() + 1
# adapt the noise factor and readout noise
self.cam1 = adapt_noise_readout(self.cam1)
self.cam2 = adapt_noise_readout(self.cam2)
self.noisef1.setText(str(self.cam1.nf))
self.noisef2.setText(str(self.cam2.nf))
# update the plot and redraw
self.update_plot()
def change_qe(self: QtWidgets.QMainWindow) -> None:
# change the qe values values
self.cam1.qe = self.qe1.value()
self.cam2.qe = self.qe2.value()
# update the plot and redraw
self.update_plot()
def change_pix(self: QtWidgets.QMainWindow) -> None:
# change the pixel size values
self.cam1.pixsize = self.pixsize1.value()
self.cam2.pixsize = self.pixsize2.value()
# update the plot and redraw
self.update_plot()
def change_readoutnoise(self: QtWidgets.QMainWindow) -> None:
# change the readout noise
self.cam1.readout = self.readnoise1.value()
self.cam2.readout = self.readnoise2.value()
# adapt the noise factor and readout noise
self.cam1 = adapt_noise_readout(self.cam1)
self.cam2 = adapt_noise_readout(self.cam2)
self.noisef1.setText(str(self.cam1.nf))
self.noisef2.setText(str(self.cam2.nf))
# update the plot and redraw
self.update_plot()
def change_dark(self: QtWidgets.QMainWindow) -> None:
# change the readout noise
self.cam1.dark = self.dark1.value()
self.cam2.dark = self.dark2.value()
# update the plot and redraw
self.update_plot()
def change_cic(self: QtWidgets.QMainWindow) -> None:
# change the readout noise
self.cam1.cic = self.cic1.value()
self.cam2.cic = self.cic2.value()
# update the plot and redraw
self.update_plot()
def change_gain(self: QtWidgets.QMainWindow) -> None:
# change the camera gain
self.cam1.emgain = self.emgain1.value()
self.cam2.emgain = self.emgain2.value()
# update the plot and redraw
self.update_plot()
def change_type(self: QtWidgets.QMainWindow) -> None:
# change the camera type
self.cam1.cameratype = self.type1.currentText()
self.cam2.cameratype = self.type2.currentText()
if self.cam1.cameratype == "CCD" or self.cam1.cameratype == "CMOS":
self.emgain1.setValue(1)
if self.cam2.cameratype == "CCD" or self.cam2.cameratype == "CMOS":
self.emgain2.setValue(1)
# the the camera type and adjust UI
self.checktype()
# adapt the noise factor and readout noise
self.cam1 = adapt_noise_readout(self.cam1)
self.cam2 = adapt_noise_readout(self.cam2)
self.noisef1.setText(str(self.cam1.nf))
self.noisef2.setText(str(self.cam2.nf))
# update the plot and redraw
self.update_plot()
# change optics
def change_addmag(self: QtWidgets.QMainWindow) -> None:
# change the readout noise
self.mic1.addmag = self.addmag1.value()
self.mic2.addmag = self.addmag2.value()
# update the plot and redraw
self.update_plot()
def change_objmag(self: QtWidgets.QMainWindow) -> None:
# change the readout noise
self.mic1.objmag = self.objmag1.value()
self.mic2.objmag = self.objmag2.value()
# update the plot and redraw
self.update_plot()
def change_objna(self: QtWidgets.QMainWindow) -> None:
# change the readout noise
self.mic1.objna = self.objna1.value()
self.mic2.objna = self.objna2.value()
# update the plot and redraw
self.update_plot()
def change_sampling(self: QtWidgets.QMainWindow) -> None:
# change the sampling value
self.sampling_value = self.nyq.value()
# update the plot and redraw
self.update_plot()
def change_emwl(self: QtWidgets.QMainWindow) -> None:
# change the readout noise
self.emwl_value = self.emwl.value()
# update the plot and redraw
self.update_plot()
# change the photon flux
def change_flux(self: QtWidgets.QMainWindow) -> None:
# change the readout noise
self.phf1_value = self.phf1.value()
# update the plot and redraw
self.update_plot()
# update the plot and UI
def update_plot(self):
# recalculate the values
self.cp1, self.cp2 = calc_values(self.cam1, self.cam2, self.mic1, self.mic2,
emwl=self.emwl_value,
phf=self.phf1_value,
sampling=self.sampling_value)
# update the line data for the SNR curves
self.snr1_curve.set_xdata(self.cp1["phf"])
self.snr1_curve.set_ydata(self.cp1["snr"])
self.snr2_curve.set_xdata(self.cp1["phf"])
self.snr2_curve.set_ydata(self.cp2["snr"])
# update the line data for the indicator lines
self.indicator1_line.set_xdata(self.cp1["phindx"])
self.indicator1_line.set_ydata(self.cp1["phindy"])
self.indicator2_line.set_xdata(self.cp2["phindx"])
self.indicator2_line.set_ydata(self.cp2["phindy"])
# update the size factors and photon flux
self.sizef2.setText(str(self.cp2["corrf_pixarea"]))
self.phf2.setText(str(self.cp2["flux"]))
# update pixel sizes
self.piximage1.setText(str(self.cp1["piximage"]))
self.piximage2.setText(str(self.cp2["piximage"]))
self.pixrequired1.setText(str(self.cp1["req_pixsize"]))
self.pixrequired2.setText(str(self.cp2["req_pixsize"]))
# setting background color ofr the pixelsize in the UI
if self.cp1["piximage"] > self.cp1["req_pixsize"]:
# if the pixel size is to big set it to orange
self.piximage1.setStyleSheet("""QLineEdit { background-color: orange;}""")
else:
# if the pixel size is to big set it to gree
self.piximage1.setStyleSheet("""QLineEdit { background-color: lightgreen;}""")
if self.cp2["piximage"] > self.cp2["req_pixsize"]:
self.piximage2.setStyleSheet("""QLineEdit { background-color: orange;}""")
else:
self.piximage2.setStyleSheet("""QLineEdit { background-color: lightgreen;}""")
# update the whole plot
self.MplWidget.canvas.draw()
self.MplWidget.canvas.flush_events()
class Camera:
def __init__(self, name: str = "Camera1",
qe: float = 0.75,
pixsize: float = 4.54,
binning: int = 1,
cameratype: str = "CCD",
emgain: int = 1,
readout: float = 1.0,
noisefactor: float = 1.0,
dark: float = 0.005,
cic: float = 0.0) -> None:
"""
:param name: name of the camera
:param qe: quantum efficiency
:param pixsize: physical pixel size of camera
:param binning: binning
:param cameratype: type of camera - CCD, CMOS or EM-CCD
:param emgain: EM-Gain, will be set to 1 for CCD or CMOS
:param readout: readout noise
:param noisefactor: noise factor for EM-CCD
:param dark: dark current
:param cic: clock-induced charge
"""
# allowed types are
if not cameratype in ["CCD", "CMOS", "EM-CCD"]:
cameratype = "CCD"
print("Specified CameraType is not valid. Use CCD as fallback")
# store all the parameters
self.qe = qe
self.pixsize = pixsize
self.binning = binning
self.cameratype = cameratype
self.emgain = emgain
self.readout = readout
self.readout_mod = readout
self.nf = noisefactor
self.dark = dark
self.cic = cic
class Microscope:
def __init__(self, name: str = "Mic1",
objmag: float = 20.0,
objna: float = 0.95,
addmag: float = 1.0) -> None:
"""
:param name: name of objective, eg. the respective "arm" of the detection system
:param objmag: magnificaton factor
:param objna: numerical aperture of the objective
:param addmag: additional magnification infront of the camera (C-mount etc.)
"""
self.mic1 = name
self.objmag = objmag
self.objna = objna
self.addmag = addmag
def calc_values(cam1: type[Camera], cam2: type[Camera], mic1: type[Microscope], mic2: type[Microscope],
emwl: int = 520,
phf: int = 50,
sampling: float = 2.0) -> Tuple[Dict, Dict]:
cp1 = {}
cp2 = {}
cp1["flux"] = phf
# pixel size in image plane incl. binning
cp1["piximage"] = float(np.round(cam1.pixsize * cam1.binning / (mic1.objmag * mic1.addmag), 3))
cp2["piximage"] = float(np.round(cam2.pixsize * cam2.binning / (mic2.objmag * mic2.addmag), 3))
# required pixel since in image to fulfil Nyquist
cp1["req_pixsize"] = float(np.round(0.61 * (emwl / 1000) / (sampling * mic1.objna), 3))
cp2["req_pixsize"] = float(np.round(0.61 * (emwl / 1000) / (sampling * mic2.objna), 3))
# correction factor for pixel area
cp2["corrf_pixarea"] = 1.00
cp2["corrf_pixarea"] = float(np.round((cp2["piximage"] ** 2) / (cp1["piximage"] ** 2), 2))
# create ph vector containing the number of detected photons and use for both cameras
cp1["phf"] = np.arange(0, 500, 1, dtype=np.int16)
# calculation of SNR including CIC - Clock Induced Charge
cp1["snr"] = (cam1.qe * cp1["phf"] / np.sqrt(cam1.nf**2 * (cam1.qe * cp1["phf"] + cam1.dark **
2 + cam1.cic**2) + (cam1.readout_mod**2 / cam1.emgain**2))).astype(float)
cp2["snr"] = (cam2.qe * cp1["phf"] / np.sqrt(cam2.nf**2 * (cam2.qe * cp1["phf"] + cam2.dark **
2 + cam2.cic**2) + (cam2.readout_mod**2 / cam2.emgain**2))).astype(float)
# calculate values for photon indicators
cp2["flux"] = (np.round(cp1["flux"] * cp2["corrf_pixarea"], 0)).astype(int)
# calculate explicit SNR values
cp1["snr_value"] = ((cam1.qe * cp1["flux"]) / np.sqrt(cam1.nf**2 * (cam1.qe * cp1["flux"] + cam1.dark **
2 + cam1.cic**2) + (cam1.readout_mod**2 / cam1.emgain**2))).astype(float)
cp2["snr_value"] = ((cam2.qe * cp2["flux"]) / np.sqrt(cam2.nf**2 * (cam2.qe * cp2["flux"] + cam2.dark **
2 + cam2.cic**2) + (cam2.readout_mod**2 / cam2.emgain**2))).astype(float)
cp1["phindx"] = np.array([cp1["flux"], cp1["flux"], 0])
cp1["phindy"] = np.array([0, cp1["snr_value"], cp1["snr_value"]])
cp2["phindx"] = np.array([cp2["flux"], cp2["flux"], 0])
cp2["phindy"] = np.array([0, cp2["snr_value"], cp2["snr_value"]])
return cp1, cp2
def adapt_noise_readout(cam: Camera) -> Camera:
# adjust noise factor due to CCD type
if cam.cameratype == "CCD":
# reset noise factor and gain in case of an normal CCD
cam.nf = 1.0
cam.emgain = 1
cam.cic = 0.0
cam.readout_mod = cam.readout
elif cam.cameratype == "EM-CCD":
cam.nf = 1.41
cam.readout_mod = cam.readout
# adapt the readout noise if camera is an CMOS
if cam.cameratype == "CMOS":
cam.readout_mod = cam.readout * np.sqrt(cam.binning)
return cam
def main():
app = QtWidgets.QApplication(sys.argv)
main = MainWindow()
main.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| [
"PyQt5.QtGui.QColor",
"PyQt5.uic.loadUi",
"numpy.array",
"numpy.arange",
"PyQt5.QtWidgets.QApplication",
"numpy.round",
"numpy.sqrt"
] | [((21315, 21351), 'numpy.arange', 'np.arange', (['(0)', '(500)', '(1)'], {'dtype': 'np.int16'}), '(0, 500, 1, dtype=np.int16)\n', (21324, 21351), True, 'import numpy as np\n'), ((22396, 22435), 'numpy.array', 'np.array', (["[cp1['flux'], cp1['flux'], 0]"], {}), "([cp1['flux'], cp1['flux'], 0])\n", (22404, 22435), True, 'import numpy as np\n'), ((22456, 22505), 'numpy.array', 'np.array', (["[0, cp1['snr_value'], cp1['snr_value']]"], {}), "([0, cp1['snr_value'], cp1['snr_value']])\n", (22464, 22505), True, 'import numpy as np\n'), ((22527, 22566), 'numpy.array', 'np.array', (["[cp2['flux'], cp2['flux'], 0]"], {}), "([cp2['flux'], cp2['flux'], 0])\n", (22535, 22566), True, 'import numpy as np\n'), ((22587, 22636), 'numpy.array', 'np.array', (["[0, cp2['snr_value'], cp2['snr_value']]"], {}), "([0, cp2['snr_value'], cp2['snr_value']])\n", (22595, 22636), True, 'import numpy as np\n'), ((23234, 23266), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (23256, 23266), False, 'from PyQt5 import QtWidgets, QtGui, uic\n'), ((963, 996), 'PyQt5.uic.loadUi', 'uic.loadUi', (['"""mainwindow.ui"""', 'self'], {}), "('mainwindow.ui', self)\n", (973, 996), False, 'from PyQt5 import QtWidgets, QtGui, uic\n'), ((20629, 20699), 'numpy.round', 'np.round', (['(cam1.pixsize * cam1.binning / (mic1.objmag * mic1.addmag))', '(3)'], {}), '(cam1.pixsize * cam1.binning / (mic1.objmag * mic1.addmag), 3)\n', (20637, 20699), True, 'import numpy as np\n'), ((20729, 20799), 'numpy.round', 'np.round', (['(cam2.pixsize * cam2.binning / (mic2.objmag * mic2.addmag))', '(3)'], {}), '(cam2.pixsize * cam2.binning / (mic2.objmag * mic2.addmag), 3)\n', (20737, 20799), True, 'import numpy as np\n'), ((20887, 20946), 'numpy.round', 'np.round', (['(0.61 * (emwl / 1000) / (sampling * mic1.objna))', '(3)'], {}), '(0.61 * (emwl / 1000) / (sampling * mic1.objna), 3)\n', (20895, 20946), True, 'import numpy as np\n'), ((20979, 21038), 'numpy.round', 'np.round', (['(0.61 * (emwl / 1000) / (sampling * mic2.objna))', '(3)'], {}), '(0.61 * (emwl / 1000) / (sampling * mic2.objna), 3)\n', (20987, 21038), True, 'import numpy as np\n'), ((21145, 21201), 'numpy.round', 'np.round', (["(cp2['piximage'] ** 2 / cp1['piximage'] ** 2)", '(2)'], {}), "(cp2['piximage'] ** 2 / cp1['piximage'] ** 2, 2)\n", (21153, 21201), True, 'import numpy as np\n'), ((1175, 1194), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['"""red"""'], {}), "('red')\n", (1187, 1194), False, 'from PyQt5 import QtWidgets, QtGui, uic\n'), ((1245, 1266), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['"""green"""'], {}), "('green')\n", (1257, 1266), False, 'from PyQt5 import QtWidgets, QtGui, uic\n'), ((21862, 21909), 'numpy.round', 'np.round', (["(cp1['flux'] * cp2['corrf_pixarea'])", '(0)'], {}), "(cp1['flux'] * cp2['corrf_pixarea'], 0)\n", (21870, 21909), True, 'import numpy as np\n'), ((23173, 23193), 'numpy.sqrt', 'np.sqrt', (['cam.binning'], {}), '(cam.binning)\n', (23180, 23193), True, 'import numpy as np\n'), ((21456, 21582), 'numpy.sqrt', 'np.sqrt', (["(cam1.nf ** 2 * (cam1.qe * cp1['phf'] + cam1.dark ** 2 + cam1.cic ** 2) + \n cam1.readout_mod ** 2 / cam1.emgain ** 2)"], {}), "(cam1.nf ** 2 * (cam1.qe * cp1['phf'] + cam1.dark ** 2 + cam1.cic **\n 2) + cam1.readout_mod ** 2 / cam1.emgain ** 2)\n", (21463, 21582), True, 'import numpy as np\n'), ((21647, 21773), 'numpy.sqrt', 'np.sqrt', (["(cam2.nf ** 2 * (cam2.qe * cp1['phf'] + cam2.dark ** 2 + cam2.cic ** 2) + \n cam2.readout_mod ** 2 / cam2.emgain ** 2)"], {}), "(cam2.nf ** 2 * (cam2.qe * cp1['phf'] + cam2.dark ** 2 + cam2.cic **\n 2) + cam2.readout_mod ** 2 / cam2.emgain ** 2)\n", (21654, 21773), True, 'import numpy as np\n'), ((22011, 22138), 'numpy.sqrt', 'np.sqrt', (["(cam1.nf ** 2 * (cam1.qe * cp1['flux'] + cam1.dark ** 2 + cam1.cic ** 2) + \n cam1.readout_mod ** 2 / cam1.emgain ** 2)"], {}), "(cam1.nf ** 2 * (cam1.qe * cp1['flux'] + cam1.dark ** 2 + cam1.cic **\n 2) + cam1.readout_mod ** 2 / cam1.emgain ** 2)\n", (22018, 22138), True, 'import numpy as np\n'), ((22218, 22345), 'numpy.sqrt', 'np.sqrt', (["(cam2.nf ** 2 * (cam2.qe * cp2['flux'] + cam2.dark ** 2 + cam2.cic ** 2) + \n cam2.readout_mod ** 2 / cam2.emgain ** 2)"], {}), "(cam2.nf ** 2 * (cam2.qe * cp2['flux'] + cam2.dark ** 2 + cam2.cic **\n 2) + cam2.readout_mod ** 2 / cam2.emgain ** 2)\n", (22225, 22345), True, 'import numpy as np\n')] |
from django.shortcuts import render, redirect
from django.http import HttpResponseRedirect, HttpResponse
from django.urls import reverse
from .form import ImageForm , CreateUserForm
from .models import *
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.decorators import login_required
from django.utils import timezone
# Create your views here.
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.template.loader import get_template
from xhtml2pdf import pisa
from tensorflow.keras.models import load_model, model_from_json
import cv2
import json
import numpy as np
import os
import tensorflow as tf
from tensorflow import Graph
import keras
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
config = tf.compat.v1.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.6
shape=(200,200)
with open('./models/labels.json','r') as f:
label=f.read()
labels=json.loads(label)
model_graph=Graph()
with model_graph.as_default():
tf_session=tf.compat.v1.Session()
with tf_session.as_default():
model = model_from_json(open("./models/fer.json", "r").read())
model.load_weights("./models/fer_test.h5")
def index(request):
if request.user.is_authenticated:
return redirect('crop_detector:dashboard')
else:
form=CreateUserForm()
if request.method=="POST":
if request.POST.get('submit') == 'sign_in':
# your sign in logic goes here
username=request.POST.get('username')
password=request.POST.get('password')
user=authenticate(request,username=username,password=password)
if user is not None:
login(request, user)
return HttpResponseRedirect(reverse('crop_detector:dashboard'))
else:
messages.info(request, "Username or Password is incorrect")
elif request.POST.get('submit') == 'sign_up':
form=CreateUserForm(request.POST)
if form.is_valid():
form.save()
messages.success(request,"Account was created successfully")
context={}
return HttpResponseRedirect(reverse('crop_detector:index'))
else:
messages.error(request,"Error !!! Account was not created, please Sign Up with correct details")
context={'form':form}
return render(request,'crop_detector/index.html',context)
context={'form':form}
return render(request,'crop_detector/index.html',context)
def logoutUser(request):
logout(request)
return redirect('crop_detector:index')
@login_required(login_url='crop_detector:index')
def delete_image(request, pk):
if request.method=="POST":
obj=Image.objects.filter(user=request.user).get(id=pk)
obj.delete()
return HttpResponseRedirect(reverse('crop_detector:dashboard'))
@login_required(login_url='crop_detector:index')
def download(request):
img=Image.objects.filter(user=request.user).order_by('-pub_date')
template_path="crop_detector/download.html"
context={"img":img}
response=HttpResponse(content_type='application/pdf')
response['Content-Disposition']='filename="report.pdf"'
template=get_template(template_path)
html=template.render(context)
pisa_status=pisa.CreatePDF(html,dest=response)
if pisa_status.err:
return HttpResponse("We had some errors <pre>"+html+"</pre>")
return response
@login_required(login_url='crop_detector:index')
def dashboard(request):
diseasedata={
"Mango":{"Powdery mildew":"https://www.gardendesign.com/how-to/powdery-mildew.html","Anthracnose":"http://ipm.ucanr.edu/PMG/PESTNOTES/pn7420.html","Die back":"https://www.britannica.com/science/dieback","Phoma blight":"https://www.gardeningknowhow.com/plant-problems/disease/phoma-blight-disease.htm","Bacterial canker":"https://www.planetnatural.com/pest-problem-solver/plant-disease/bacterial-canker/","Red rust":"https://www.vedantu.com/question-answer/red-rust-of-tea-is-caused- by-parasitic-aalgae-class-8-biology-cbse-5f550d903035db208c0dfa72","Sooty mould":"http://ipm.ucanr.edu/PMG/PESTNOTES/pn74108.html","Mango malformation":"https://agritech.tnau.ac.in/crop_protection/mango_3.html","Others":"https://vikaspedia.in/agriculture/crop-production/integrated-pest-managment/ipm-for-fruit-crops/ipm-strategies-for-mango/mango-diseases-and-symptoms"},
"Alstoni-Scholaris":{"Pauropsylla tuberculata":"https://www.ijcrt.org/papers/IJCRT1802217.pdf","Leaf gall":"https://www.ijcrt.org/papers/IJCRT1802217.pdf","Others":"https://vikaspedia.in/agriculture/crop-production/package-of-practices/medicinal-and-aromatic-plants/alstonia-scholaris"},
"Arjun":{"Ascomycetes":"https://en.wikipedia.org/wiki/Ascomycota","Basidiomycetes":"https://www.cliffsnotes.com/study-guides/biology/biology/fungi/basidiomycetes","Leaf spot":"https://en.wikipedia.org/wiki/Pestalotiopsis_palmarum","Black nodal girdling":"http://silks.csb.gov.in/jhansi/diseases-and-pests-of-food-plants","Powdery mildew":"https://en.wikipedia.org/wiki/Phyllactinia_guttata","Leaf Curl":"https://en.wikipedia.org/wiki/Leaf_curl","Others":"http://silks.csb.gov.in/jhansi/diseases-and-pests-of-food-plants/"},
"Gauva":{"Dieback and Anthracnose":"https://vikaspedia.in/agriculture/crop-production/integrated-pest-managment/ipm-for-spice-crops/ipm-strategies-for-chilli/chilli-description-of-plant-diseases","Guava wilt":"https://krishijagran.com/featured/guava-wilt-a-challenge-in-plant-pathology/","Algal leaf and fruit spot":"https://hgic.clemson.edu/factsheet/algal-leaf-spot/","Styler end rot ":"https://www.gardeningknowhow.com/edible/fruits/citrus/managing-fruit-with-stylar-end-rot.htm","Fruit canker ":"https://en.wikipedia.org/wiki/Citrus_canker","Others":"https://vikaspedia.in/agriculture/crop-production/integrated-pest-managment/ipm-for-fruit-crops/ipm-strategies-for-guava/guava-diseases-and-symptoms"},
"Jamun":{"Anthracnose":"http://ipm.ucanr.edu/PMG/PESTNOTES/pn7420.html","White fly":"https://en.wikipedia.org/wiki/Dialeurodes","Leaf eating caterpillar":"https://blogs.massaudubon.org/yourgreatoutdoors/the-leaf-eating-tree-damaging-little-green-caterpillar/","Others":"https://agritech.tnau.ac.in/horticulture/horti_fruits_jamun.html"},
"Jatropha":{"Anthracnose":"http://ipm.ucanr.edu/PMG/PESTNOTES/pn7420.html","Passiflora":"https://en.wikipedia.org/wiki/Passiflora","Pseudocercosporaleaf spot":"http://ipm.ucanr.edu/PMG/r735100511.html","Powdery mildew":"https://www.gardendesign.com/how-to/powdery-mildew.html","Rust":"https://www.planetnatural.com/pest-problem-solver/plant-disease/common-rust/","Stem canker and dieback":"http://ipm.illinois.edu/diseases/series600/rpd636/","Collar and root rot":"https://en.wikipedia.org/wiki/Collar_rot","Others":"https://www.intechopen.com/books/biodiesel-feedstocks-production-and-applications/major-diseases-of-the-biofuel-plant-physic-nut-jatropha-curcas-"},
"Lemon":{"Citrus scab":"https://idtools.org/id/citrus/diseases/factsheet.php?name=Citrus%20scab","Citrus canker":"https://www.missouribotanicalgarden.org/gardens-gardening/your-garden/help-for-the-home-gardener/advice-tips-resources/pests-and-problems/diseases/cankers/gummosis-of-fruit-trees.aspx","Citrus tristeza":"https://en.wikipedia.org/wiki/Citrus_tristeza_virus","Huanglongbing":"https://www.frontiersin.org/articles/10.3389/fpls.2018.01976/full","Anthracnose":"http://ipm.ucanr.edu/PMG/PESTNOTES/pn7420.html","Sooty mould":"http://ipm.ucanr.edu/PMG/PESTNOTES/pn74108.html","Others":" https://vikaspedia.in/agriculture/crop-production/integrated-pest-managment/ipm-for-fruit-crops/ipm-strategies-for-citrus/diseases-and-symptoms"},
"Pomegranate":{"Anthracnose":"http://ipm.ucanr.edu/PMG/PESTNOTES/pn7420.html","Leaf and Fruit Spots":"https://idtools.org/id/citrus/diseases/factsheet.php?name=Pseudocercospora+fruit+and+leaf+spot","Dwiroopa punicae":"https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7241677/","Fruit Rot and Mummification":"https://www2.ipm.ucanr.edu/agriculture/pomegranate/Alternaria-Fruit-Rot-Black-Heart/","Others":"https://edis.ifas.ufl.edu/publication/pp349"},
"Pongamia-Pinnata":{"Leaf spot and blight":"https://idtools.org/id/palms/symptoms/factsheet.php?name=Leaf+Spots+and+Leaf+Blights","Leaf Rust":"https://cropwatch.unl.edu/plantdisease/wheat/leaf-rust","Powdery mildew":"https://www.gardendesign.com/how-to/powdery-mildew.html","Others":"https://agritech.tnau.ac.in/forestry/forest_disease_pungam.html"},
"Chinar":{"Canker stain":"https://www.forestresearch.gov.uk/tools-and-resources/fthr/pest-and-disease-resources/canker-stain-plane-ceratocystis-platani/","Stem canker ":"https://cropwatch.unl.edu/plantdisease/soybean/stem-canker","Anthracnose ":"http://ipm.ucanr.edu/PMG/PESTNOTES/pn7420.html","Lace bugs":"https://en.wikipedia.org/wiki/Tingidae","Others":"https://balconygardenweb.com/everything-about-chinar-trees/"}
}
if request.method=="POST":
form=ImageForm(data=request.POST,files=request.FILES)
if form.is_valid():
profile = form.save(commit=False)
profile.user = request.user
profile.pub_date=timezone.now()
profile.save()
img=Image.objects.filter(user=request.user).latest('pub_date')
# print(request.FILES)
# print(img.images.url)
#
# print(profile.images.url)
# print(profile.plant_name)
# print(profile.plant_health)
img=cv2.imread('.'+img.images.url)
img=cv2.resize(img,shape)
img=np.array(img)
img = np.expand_dims(img, axis=0)
img = tf.cast(img, tf.float32)
with model_graph.as_default():
with tf_session.as_default():
predict=model.predict(img,steps=1)
#print(labels[str(np.argmax(predict))])
plant_details=labels[str(np.argmax(predict))].split("_")
print(plant_details)
profile.plant_name=plant_details[0]
profile.plant_health=plant_details[1]
profile.save()
# print(profile.images.url)
# print(profile.plant_name)
# print(profile.plant_health)
obj=form.instance
context={"obj":obj}
#return render(request,'crop_detector/dashboard.html',context)
return HttpResponseRedirect(reverse('crop_detector:dashboard'))
else:
form=ImageForm()
img=Image.objects.filter(user=request.user).order_by('-pub_date')
#img=Image.objects.filter(user=request.user)
context={"form":form,"img":img,"diseasedata":diseasedata}
return render(request,'crop_detector/dashboard.html',context)
| [
"numpy.argmax",
"django.contrib.messages.error",
"django.contrib.messages.info",
"django.contrib.auth.login",
"django.contrib.auth.decorators.login_required",
"json.loads",
"django.http.HttpResponse",
"django.utils.timezone.now",
"tensorflow.compat.v1.Session",
"django.contrib.auth.logout",
"ten... | [((791, 817), 'tensorflow.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {}), '()\n', (815, 817), True, 'import tensorflow as tf\n'), ((962, 979), 'json.loads', 'json.loads', (['label'], {}), '(label)\n', (972, 979), False, 'import json\n'), ((993, 1000), 'tensorflow.Graph', 'Graph', ([], {}), '()\n', (998, 1000), False, 'from tensorflow import Graph\n'), ((2785, 2832), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""crop_detector:index"""'}), "(login_url='crop_detector:index')\n", (2799, 2832), False, 'from django.contrib.auth.decorators import login_required\n'), ((3054, 3101), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""crop_detector:index"""'}), "(login_url='crop_detector:index')\n", (3068, 3101), False, 'from django.contrib.auth.decorators import login_required\n'), ((3633, 3680), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""crop_detector:index"""'}), "(login_url='crop_detector:index')\n", (3647, 3680), False, 'from django.contrib.auth.decorators import login_required\n'), ((1047, 1069), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (1067, 1069), True, 'import tensorflow as tf\n'), ((2724, 2739), 'django.contrib.auth.logout', 'logout', (['request'], {}), '(request)\n', (2730, 2739), False, 'from django.contrib.auth import authenticate, login, logout\n'), ((2751, 2782), 'django.shortcuts.redirect', 'redirect', (['"""crop_detector:index"""'], {}), "('crop_detector:index')\n", (2759, 2782), False, 'from django.shortcuts import render, redirect\n'), ((3282, 3326), 'django.http.HttpResponse', 'HttpResponse', ([], {'content_type': '"""application/pdf"""'}), "(content_type='application/pdf')\n", (3294, 3326), False, 'from django.http import HttpResponseRedirect, HttpResponse\n'), ((3401, 3428), 'django.template.loader.get_template', 'get_template', (['template_path'], {}), '(template_path)\n', (3413, 3428), False, 'from django.template.loader import get_template\n'), ((3480, 3515), 'xhtml2pdf.pisa.CreatePDF', 'pisa.CreatePDF', (['html'], {'dest': 'response'}), '(html, dest=response)\n', (3494, 3515), False, 'from xhtml2pdf import pisa\n'), ((10890, 10946), 'django.shortcuts.render', 'render', (['request', '"""crop_detector/dashboard.html"""', 'context'], {}), "(request, 'crop_detector/dashboard.html', context)\n", (10896, 10946), False, 'from django.shortcuts import render, redirect\n'), ((1300, 1335), 'django.shortcuts.redirect', 'redirect', (['"""crop_detector:dashboard"""'], {}), "('crop_detector:dashboard')\n", (1308, 1335), False, 'from django.shortcuts import render, redirect\n'), ((2643, 2695), 'django.shortcuts.render', 'render', (['request', '"""crop_detector/index.html"""', 'context'], {}), "(request, 'crop_detector/index.html', context)\n", (2649, 2695), False, 'from django.shortcuts import render, redirect\n'), ((3555, 3613), 'django.http.HttpResponse', 'HttpResponse', (["('We had some errors <pre>' + html + '</pre>')"], {}), "('We had some errors <pre>' + html + '</pre>')\n", (3567, 3613), False, 'from django.http import HttpResponseRedirect, HttpResponse\n'), ((3015, 3049), 'django.urls.reverse', 'reverse', (['"""crop_detector:dashboard"""'], {}), "('crop_detector:dashboard')\n", (3022, 3049), False, 'from django.urls import reverse\n'), ((9359, 9373), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (9371, 9373), False, 'from django.utils import timezone\n'), ((9702, 9734), 'cv2.imread', 'cv2.imread', (["('.' + img.images.url)"], {}), "('.' + img.images.url)\n", (9712, 9734), False, 'import cv2\n'), ((9749, 9771), 'cv2.resize', 'cv2.resize', (['img', 'shape'], {}), '(img, shape)\n', (9759, 9771), False, 'import cv2\n'), ((9787, 9800), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (9795, 9800), True, 'import numpy as np\n'), ((9819, 9846), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (9833, 9846), True, 'import numpy as np\n'), ((9865, 9889), 'tensorflow.cast', 'tf.cast', (['img', 'tf.float32'], {}), '(img, tf.float32)\n', (9872, 9889), True, 'import tensorflow as tf\n'), ((1644, 1703), 'django.contrib.auth.authenticate', 'authenticate', (['request'], {'username': 'username', 'password': 'password'}), '(request, username=username, password=password)\n', (1656, 1703), False, 'from django.contrib.auth import authenticate, login, logout\n'), ((10615, 10649), 'django.urls.reverse', 'reverse', (['"""crop_detector:dashboard"""'], {}), "('crop_detector:dashboard')\n", (10622, 10649), False, 'from django.urls import reverse\n'), ((1760, 1780), 'django.contrib.auth.login', 'login', (['request', 'user'], {}), '(request, user)\n', (1765, 1780), False, 'from django.contrib.auth import authenticate, login, logout\n'), ((1907, 1966), 'django.contrib.messages.info', 'messages.info', (['request', '"""Username or Password is incorrect"""'], {}), "(request, 'Username or Password is incorrect')\n", (1920, 1966), False, 'from django.contrib import messages\n'), ((1829, 1863), 'django.urls.reverse', 'reverse', (['"""crop_detector:dashboard"""'], {}), "('crop_detector:dashboard')\n", (1836, 1863), False, 'from django.urls import reverse\n'), ((2165, 2226), 'django.contrib.messages.success', 'messages.success', (['request', '"""Account was created successfully"""'], {}), "(request, 'Account was created successfully')\n", (2181, 2226), False, 'from django.contrib import messages\n'), ((2379, 2481), 'django.contrib.messages.error', 'messages.error', (['request', '"""Error !!! Account was not created, please Sign Up with correct details"""'], {}), "(request,\n 'Error !!! Account was not created, please Sign Up with correct details')\n", (2393, 2481), False, 'from django.contrib import messages\n'), ((2546, 2598), 'django.shortcuts.render', 'render', (['request', '"""crop_detector/index.html"""', 'context'], {}), "(request, 'crop_detector/index.html', context)\n", (2552, 2598), False, 'from django.shortcuts import render, redirect\n'), ((2305, 2335), 'django.urls.reverse', 'reverse', (['"""crop_detector:index"""'], {}), "('crop_detector:index')\n", (2312, 2335), False, 'from django.urls import reverse\n'), ((10124, 10142), 'numpy.argmax', 'np.argmax', (['predict'], {}), '(predict)\n', (10133, 10142), True, 'import numpy as np\n')] |
from pydex.core.designer import Designer
import numpy as np
def simulate(ti_controls, model_parameters):
return np.array([
model_parameters[0] +
model_parameters[1] * np.exp(model_parameters[2] * ti_controls[0]) +
model_parameters[3] * np.exp(model_parameters[4] * ti_controls[1])
])
designer = Designer()
designer.simulate = simulate
designer.ti_controls_candidates = designer.enumerate_candidates(
bounds=[
[-1, 1],
[-1, 1],
],
levels=[
11,
11,
],
)
designer.ti_controls_names = ["$x_1$", "$x_2$"]
mp = np.array([1, 2, 2, 10, 2])
designer._num_steps = 30
designer.model_parameters = mp
designer.initialize(verbose=2)
criterion = designer.d_opt_criterion
designer.design_experiment(criterion, write=False)
designer.print_optimal_candidates()
designer.plot_optimal_controls(write=False, title=True, non_opt_candidates=True, tol=1e-3)
criterion = designer.a_opt_criterion
designer.design_experiment(criterion, write=False)
designer.print_optimal_candidates()
designer.plot_optimal_controls(write=False, title=True, non_opt_candidates=True, tol=1e-3)
criterion = designer.e_opt_criterion
designer.design_experiment(criterion, write=False, optimizer="MOSEK")
designer.print_optimal_candidates(tol=3e-3)
designer.plot_optimal_controls(write=False, title=True, non_opt_candidates=True, tol=1e-3)
designer.show_plots()
| [
"numpy.array",
"numpy.exp",
"pydex.core.designer.Designer"
] | [((330, 340), 'pydex.core.designer.Designer', 'Designer', ([], {}), '()\n', (338, 340), False, 'from pydex.core.designer import Designer\n'), ((589, 615), 'numpy.array', 'np.array', (['[1, 2, 2, 10, 2]'], {}), '([1, 2, 2, 10, 2])\n', (597, 615), True, 'import numpy as np\n'), ((266, 310), 'numpy.exp', 'np.exp', (['(model_parameters[4] * ti_controls[1])'], {}), '(model_parameters[4] * ti_controls[1])\n', (272, 310), True, 'import numpy as np\n'), ((189, 233), 'numpy.exp', 'np.exp', (['(model_parameters[2] * ti_controls[0])'], {}), '(model_parameters[2] * ti_controls[0])\n', (195, 233), True, 'import numpy as np\n')] |
"""
Converts Aviv.dat files into numpy files for NRC capped homopolymer repeats.
"""
import numpy as np
import pandas as pd
import ntpath # Good for path manipulations on a PC?
import glob # Allows for unix-like specifications paths, using *, ?, etc.
import os
import json
import time
start = time.time()
PATH = os.path.dirname(os.path.abspath(__file__))
NRC_DATA_PATH = os.path.join(PATH, "NRC_data")
proj_name = "cANK"
den_nsig_const_melt = []
constructs = [] # List of constructs used to build partition functions and
# frac_folded expressions in next script.
melts = [] # List of melts to be used in fitting.
# Create an empty pandas dataframe to output a csv file from.
den_nsig_const_melt_df = pd.DataFrame(
columns=["denat", "signal", "construct_melt", "dataset"]
)
# Gets file names, and extracts information including construct name, melt number.
num = 0
for filename in glob.glob(os.path.join(NRC_DATA_PATH, "*.dat")):
num = num + 1
base = ntpath.basename(filename)
melt = base.split(".")[0]
construct = melt[:-2]
# Reads the data portion of Aviv file, sticks it in a list, then normalizes
# the y values and writes out a data file including construct and
# a melt number to use as an ID for fitting in ising script.
with open(filename, "r") as f:
lines = (
f.read().splitlines()
) # define the beginning and end of the data
begin = 0
end = 0
while not lines[begin] == "$DATA":
begin = begin + 1
begin = begin + 4
while not lines[end] == "$ENDDATA":
end = end + 1
xylist = []
xyarray = []
for row in range(begin, end - 1): # extract the [denat] and CD signal
line = lines[row]
n = line.split()
xylist.append([float(n[0]), float(n[1])])
xyarray = np.array(xylist)
# Below, the data is normalized.
maxval = max(xyarray[:, 1])
minval = min(xyarray[:, 1])
normylist = []
for i in range(0, len(xyarray)):
normy = float(((xyarray[i, 1] - maxval) / (minval - maxval)))
normylist.append(normy)
for i in range(0, len(xylist)):
den_nsig_const_melt.append(
[xyarray[i, 0], normylist[i], construct, num]
)
# Build a numpy array for each melt and output for Ising fitter.
# Columns are denaturant, normalized CD, construct, melt number.
single_melt_dncm = []
for i in range(0, len(xylist)):
single_melt_dncm.append(
[xyarray[i, 0], normylist[i], construct, num]
)
melt_array = np.array(single_melt_dncm)
np.save(
os.path.join(PATH, f"{melt}.npy"), melt_array
) # Writes an npy file to disk for each melt.
temp_df = pd.DataFrame(melt_array)
den_nsig_const_melt_df = den_nsig_const_melt_df.append(temp_df)
if construct not in constructs:
constructs.append(construct)
melts.append(melt)
den_nsig_const_melt_df.to_csv(
os.path.join(PATH, f"{proj_name}_combined_data.csv"),
index=False,
header=False,
)
# This loop puts melts in order of type (NRxC, NRx, RxC) and length.
NRClist = []
NRlist = []
RClist = []
melts.sort()
i = 0
for melt in melts:
if melt[0] == "N":
if melt[-3] == "C":
NRClist.append(melt)
else:
NRlist.append(melt)
else:
RClist.append(melt)
melts = NRClist + NRlist + RClist
# This loop generates a construct list in the same order as the melts list.
for melt in melts:
construct = melt[:-2]
if construct not in constructs:
constructs.append(construct)
# Write out the results.
with open(os.path.join(PATH, f"{proj_name}_constructs.json"), "w") as r:
json.dump(constructs, r)
with open(os.path.join(PATH, f"{proj_name}_melts.json"), "w") as s:
json.dump(melts, s)
stop = time.time()
runtime = stop - start
print("\nThe elapsed time was " + str(runtime) + " sec")
| [
"pandas.DataFrame",
"json.dump",
"os.path.abspath",
"ntpath.basename",
"time.time",
"numpy.array",
"os.path.join"
] | [((297, 308), 'time.time', 'time.time', ([], {}), '()\n', (306, 308), False, 'import time\n'), ((376, 406), 'os.path.join', 'os.path.join', (['PATH', '"""NRC_data"""'], {}), "(PATH, 'NRC_data')\n", (388, 406), False, 'import os\n'), ((711, 781), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['denat', 'signal', 'construct_melt', 'dataset']"}), "(columns=['denat', 'signal', 'construct_melt', 'dataset'])\n", (723, 781), True, 'import pandas as pd\n'), ((3957, 3968), 'time.time', 'time.time', ([], {}), '()\n', (3966, 3968), False, 'import time\n'), ((333, 358), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (348, 358), False, 'import os\n'), ((906, 942), 'os.path.join', 'os.path.join', (['NRC_DATA_PATH', '"""*.dat"""'], {}), "(NRC_DATA_PATH, '*.dat')\n", (918, 942), False, 'import os\n'), ((974, 999), 'ntpath.basename', 'ntpath.basename', (['filename'], {}), '(filename)\n', (989, 999), False, 'import ntpath\n'), ((3095, 3147), 'os.path.join', 'os.path.join', (['PATH', 'f"""{proj_name}_combined_data.csv"""'], {}), "(PATH, f'{proj_name}_combined_data.csv')\n", (3107, 3147), False, 'import os\n'), ((3831, 3855), 'json.dump', 'json.dump', (['constructs', 'r'], {}), '(constructs, r)\n', (3840, 3855), False, 'import json\n'), ((3929, 3948), 'json.dump', 'json.dump', (['melts', 's'], {}), '(melts, s)\n', (3938, 3948), False, 'import json\n'), ((1867, 1883), 'numpy.array', 'np.array', (['xylist'], {}), '(xylist)\n', (1875, 1883), True, 'import numpy as np\n'), ((2679, 2705), 'numpy.array', 'np.array', (['single_melt_dncm'], {}), '(single_melt_dncm)\n', (2687, 2705), True, 'import numpy as np\n'), ((2854, 2878), 'pandas.DataFrame', 'pd.DataFrame', (['melt_array'], {}), '(melt_array)\n', (2866, 2878), True, 'import pandas as pd\n'), ((3764, 3814), 'os.path.join', 'os.path.join', (['PATH', 'f"""{proj_name}_constructs.json"""'], {}), "(PATH, f'{proj_name}_constructs.json')\n", (3776, 3814), False, 'import os\n'), ((3867, 3912), 'os.path.join', 'os.path.join', (['PATH', 'f"""{proj_name}_melts.json"""'], {}), "(PATH, f'{proj_name}_melts.json')\n", (3879, 3912), False, 'import os\n'), ((2735, 2768), 'os.path.join', 'os.path.join', (['PATH', 'f"""{melt}.npy"""'], {}), "(PATH, f'{melt}.npy')\n", (2747, 2768), False, 'import os\n')] |
import pandas
import PIL.Image
from cStringIO import StringIO
import IPython.display
import numpy as np
import trace.common as com
import trace.sampling as samp
import trace.train as train
import trace.train.hooks as hooks
import trace.evaluation as eva
def showarray(a, fmt='png'):
a = np.uint8(a)
f = StringIO()
PIL.Image.fromarray(a).save(f, fmt)
IPython.display.display(IPython.display.Image(data=f.getvalue()))
def print_and_save_metrics(model_name, df, p_error, r_full, r_merge, r_split):
print(model_name)
print('Pixel Error: %.6f' % p_error)
print('Rand - Full: %.6f' % r_full)
print('Rand - Merge: %.6f' % r_merge)
print('Rand - Split: %.6f' % r_split)
df.loc[model_name] = [p_error, r_full, r_merge, r_split]
def get_metrics(pipeline, classifier, inputs, labels, targets):
preds = classifier.predict(inputs, pipeline.inference_params, mirror_inputs=True)
binary_preds = np.round(preds)
pixel_error = np.mean(np.absolute(binary_preds - targets))
scores = eva.rand_error_from_prediction(labels[0, :, :, :, 0],
preds[0],
pred_type=pipeline.model_arch.output_mode)
return preds, pixel_error, scores['Rand F-Score Full'], scores['Rand F-Score Merge'], scores['Rand F-Score Split']
def load_classifier(pipeline, run_name):
train_params = pipeline.training_params
# Create model
arch = pipeline.model_arch
model_const = pipeline.model_constructor
model = model_const(arch)
# Determine the input size to be sampled from the dataset
sample_shape = np.asarray(train_params.patch_shape) + np.asarray(arch.fov_shape) - 1
# Create the dataset sampler
dataset = pipeline.dataset_constructor(pipeline.data_path)
dset_sampler = samp.EMDatasetSampler(dataset, sample_shape=sample_shape, batch_size=train_params.batch_size,
augmentation_config=pipeline.augmentation_config,
label_output_type=arch.output_mode)
# Define results folder
ckpt_folder = pipeline.data_path + 'results/' + model.model_name + '/run-' + run_name + '/'
# Create and restore the classifier
classifier = train.Learner(model, ckpt_folder)
classifier.restore()
return classifier, dset_sampler
def run_full_training(pipeline, run_name):
arch = pipeline.model_arch
train_params = pipeline.training_params
model_const = pipeline.model_constructor
model = model_const(arch)
# Determine the input size to be sampled from the dataset
sample_shape = np.asarray(train_params.patch_shape) + np.asarray(arch.fov_shape) - 1
# Construct the dataset sampler
dataset = pipeline.dataset_constructor(pipeline.data_path)
dset_sampler = samp.EMDatasetSampler(dataset,
sample_shape=sample_shape,
batch_size=train_params.batch_size,
augmentation_config=pipeline.augmentation_config,
label_output_type=arch.output_mode)
ckpt_folder = pipeline.data_path + 'results/' + model.model_name + '/run-' + run_name + '/'
classifier = train.Learner(model, ckpt_folder)
hooks_list = [
hooks.LossHook(50, model),
hooks.ModelSaverHook(500, ckpt_folder),
hooks.ValidationHook(100, dset_sampler, model, pipeline.data_path, arch.output_mode, pipeline.inference_params),
hooks.ImageVisualizationHook(2000, model),
# hooks.HistogramHook(100, model),
# hooks.LayerVisualizationHook(500, model),
]
# Train the model
print('Training for %d iterations' % train_params.n_iterations)
classifier.train(train_params, dset_sampler, hooks_list)
return classifier, dset_sampler | [
"numpy.absolute",
"numpy.uint8",
"trace.train.hooks.LossHook",
"numpy.asarray",
"trace.evaluation.rand_error_from_prediction",
"trace.train.hooks.ValidationHook",
"trace.train.hooks.ImageVisualizationHook",
"trace.train.Learner",
"cStringIO.StringIO",
"trace.sampling.EMDatasetSampler",
"numpy.ro... | [((293, 304), 'numpy.uint8', 'np.uint8', (['a'], {}), '(a)\n', (301, 304), True, 'import numpy as np\n'), ((313, 323), 'cStringIO.StringIO', 'StringIO', ([], {}), '()\n', (321, 323), False, 'from cStringIO import StringIO\n'), ((936, 951), 'numpy.round', 'np.round', (['preds'], {}), '(preds)\n', (944, 951), True, 'import numpy as np\n'), ((1028, 1139), 'trace.evaluation.rand_error_from_prediction', 'eva.rand_error_from_prediction', (['labels[0, :, :, :, 0]', 'preds[0]'], {'pred_type': 'pipeline.model_arch.output_mode'}), '(labels[0, :, :, :, 0], preds[0], pred_type=\n pipeline.model_arch.output_mode)\n', (1058, 1139), True, 'import trace.evaluation as eva\n'), ((1822, 2011), 'trace.sampling.EMDatasetSampler', 'samp.EMDatasetSampler', (['dataset'], {'sample_shape': 'sample_shape', 'batch_size': 'train_params.batch_size', 'augmentation_config': 'pipeline.augmentation_config', 'label_output_type': 'arch.output_mode'}), '(dataset, sample_shape=sample_shape, batch_size=\n train_params.batch_size, augmentation_config=pipeline.\n augmentation_config, label_output_type=arch.output_mode)\n', (1843, 2011), True, 'import trace.sampling as samp\n'), ((2267, 2300), 'trace.train.Learner', 'train.Learner', (['model', 'ckpt_folder'], {}), '(model, ckpt_folder)\n', (2280, 2300), True, 'import trace.train as train\n'), ((2841, 3030), 'trace.sampling.EMDatasetSampler', 'samp.EMDatasetSampler', (['dataset'], {'sample_shape': 'sample_shape', 'batch_size': 'train_params.batch_size', 'augmentation_config': 'pipeline.augmentation_config', 'label_output_type': 'arch.output_mode'}), '(dataset, sample_shape=sample_shape, batch_size=\n train_params.batch_size, augmentation_config=pipeline.\n augmentation_config, label_output_type=arch.output_mode)\n', (2862, 3030), True, 'import trace.sampling as samp\n'), ((3300, 3333), 'trace.train.Learner', 'train.Learner', (['model', 'ckpt_folder'], {}), '(model, ckpt_folder)\n', (3313, 3333), True, 'import trace.train as train\n'), ((978, 1013), 'numpy.absolute', 'np.absolute', (['(binary_preds - targets)'], {}), '(binary_preds - targets)\n', (989, 1013), True, 'import numpy as np\n'), ((3362, 3387), 'trace.train.hooks.LossHook', 'hooks.LossHook', (['(50)', 'model'], {}), '(50, model)\n', (3376, 3387), True, 'import trace.train.hooks as hooks\n'), ((3397, 3435), 'trace.train.hooks.ModelSaverHook', 'hooks.ModelSaverHook', (['(500)', 'ckpt_folder'], {}), '(500, ckpt_folder)\n', (3417, 3435), True, 'import trace.train.hooks as hooks\n'), ((3445, 3561), 'trace.train.hooks.ValidationHook', 'hooks.ValidationHook', (['(100)', 'dset_sampler', 'model', 'pipeline.data_path', 'arch.output_mode', 'pipeline.inference_params'], {}), '(100, dset_sampler, model, pipeline.data_path, arch.\n output_mode, pipeline.inference_params)\n', (3465, 3561), True, 'import trace.train.hooks as hooks\n'), ((3566, 3607), 'trace.train.hooks.ImageVisualizationHook', 'hooks.ImageVisualizationHook', (['(2000)', 'model'], {}), '(2000, model)\n', (3594, 3607), True, 'import trace.train.hooks as hooks\n'), ((1636, 1672), 'numpy.asarray', 'np.asarray', (['train_params.patch_shape'], {}), '(train_params.patch_shape)\n', (1646, 1672), True, 'import numpy as np\n'), ((1675, 1701), 'numpy.asarray', 'np.asarray', (['arch.fov_shape'], {}), '(arch.fov_shape)\n', (1685, 1701), True, 'import numpy as np\n'), ((2652, 2688), 'numpy.asarray', 'np.asarray', (['train_params.patch_shape'], {}), '(train_params.patch_shape)\n', (2662, 2688), True, 'import numpy as np\n'), ((2691, 2717), 'numpy.asarray', 'np.asarray', (['arch.fov_shape'], {}), '(arch.fov_shape)\n', (2701, 2717), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import pandas as pd
import matplotlib as mpl
from sklearn.ensemble import RandomForestRegressor
from sklearn import metrics
from sklearn.metrics import accuracy_score
from merf.utils import MERFDataGenerator
from merf.merf import MERF
from merf.viz import plot_merf_training_stats
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
import numpy as np
from sklearn.preprocessing import OrdinalEncoder
ord_enc = OrdinalEncoder()
import matplotlib.pyplot as plt
######## Read data ########
fip_data = "H:/cloud/cloud_data/Projects/MixedEffectModels/data/forMERF.csv"
df_csv = pd.read_csv(fip_data)
# cols_select_mahmoud = ['patient_ID','dataset','study','intercept','slope','intercept1','slope1','study_no','gender',
# 'typical3 ','age','CT_pos','Agatston_score','typical3_imp','atypical2_imp','nongil_imp','iv_prot_imp',
# 'contrast_amount_imp','contrast_conc_imp','height_imp','hypertension_imp','diabetes_imp','hyperlipidemia_imp',
# 'smoker_imp','risk','pos_fam_hist_imp','prior_myo_inf_imp','Study','interaction1',' interaction2','interaction3']
cols_select_features = ['study','gender','age','Agatston_score','typical3_imp','contrast_amount_imp','contrast_conc_imp',
'height_imp','hypertension_imp','diabetes_imp','hyperlipidemia_imp','smoker_imp','risk',
'pos_fam_hist_imp','prior_myo_inf_imp']
cols_select_label = ['Cath_pos']
features = df_csv[cols_select_features]
target = df_csv[cols_select_label]
######## Split data ########
data_train, data_test, target_train, target_test = train_test_split(features,target, stratify=df_csv['study'], test_size = 0.20, random_state = 10,)
#data_train = data_train.loc[:, (data_train.columns != 'study')]
#data_test = data_test.loc[:, (data_test.columns != 'study')]
data_train['study'] = pd.factorize(data_train['study'])[0]
data_test['study'] = pd.factorize(data_test['study'])[0]
######## Train RF ########
X_train_rf = np.array(data_train)
X_train_rf = np.array(data_train)
Y_train_rf = np.array(target_train)[:,0]
X_test_rf = np.array(data_test)
X_test_rf = np.array(data_test)
Y_test_rf = np.array(target_test)[:,0]
rf = RandomForestClassifier(n_estimators=100)
rf.fit(X_train_rf, Y_train_rf)
P_pred_rf = rf.predict_proba(X_test_rf)[:, 1]
Y_pred_rf = rf.predict(X_test_rf)
C_RF = confusion_matrix(Y_test_rf, Y_pred_rf)
ACC_RF = accuracy_score(Y_test_rf, Y_pred_rf)
importances = rf.feature_importances_
std = np.std([tree.feature_importances_ for tree in rf.estimators_],axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
column_names = list(data_train.columns[indices])
for f in range(X_train_rf.shape[1]):
print("%d. %s (%f)" % (f + 1, column_names[f], importances[indices[f]]))
# Plot the impurity-based feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X_train_rf.shape[1]), importances[indices],color="r", yerr=std[indices], align="center")
plt.xticks(range(X_train_rf.shape[1]), indices)
plt.xlim([-1, X_train_rf.shape[1]])
plt.show()
######## Train MERF ########
X_train_merf = np.array(data_train.loc[:, data_train.columns != 'study'])
Y_train_merf = np.array(target_train)[:,0]
Z_train_merf = np.ones((len(X_train_merf),1))
clusters_train = pd.Series(ord_enc.fit_transform(data_train[['study']])[:,0])
X_test_merf = np.array(data_test.loc[:, data_test.columns != 'study'])
Y_test_merf = np.array(target_test)[:,0]
Z_test_merf = np.ones((len(X_test_merf),1))
clusters_test = pd.Series(ord_enc.fit_transform(data_test[['study']])[:,0])
fixed_effects_model=RandomForestRegressor(n_estimators=100, n_jobs=-1)
gll_early_stop_threshold = None
mrf = MERF(fixed_effects_model=fixed_effects_model, max_iterations=50, gll_early_stop_threshold=gll_early_stop_threshold)
mrf.fit(X_train_merf, Z_train_merf, clusters_train, Y_train_merf)
Y_pred_merf = mrf.predict(X_test_merf, Z_test_merf, clusters_test)
C_MERF = confusion_matrix(Y_test_merf, np.round(Y_pred_merf))
ACC_MERF = accuracy_score(Y_test_merf, np.round(Y_pred_merf))
plot_merf_training_stats(mrf, num_clusters_to_plot=10)
| [
"sklearn.ensemble.RandomForestClassifier",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"merf.viz.plot_merf_training_stats",
"matplotlib.pyplot.show",
"merf.merf.MERF",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score",
"numpy.std",
"sklearn.... | [((576, 592), 'sklearn.preprocessing.OrdinalEncoder', 'OrdinalEncoder', ([], {}), '()\n', (590, 592), False, 'from sklearn.preprocessing import OrdinalEncoder\n'), ((740, 761), 'pandas.read_csv', 'pd.read_csv', (['fip_data'], {}), '(fip_data)\n', (751, 761), True, 'import pandas as pd\n'), ((1690, 1786), 'sklearn.model_selection.train_test_split', 'train_test_split', (['features', 'target'], {'stratify': "df_csv['study']", 'test_size': '(0.2)', 'random_state': '(10)'}), "(features, target, stratify=df_csv['study'], test_size=0.2,\n random_state=10)\n", (1706, 1786), False, 'from sklearn.model_selection import train_test_split\n'), ((2072, 2092), 'numpy.array', 'np.array', (['data_train'], {}), '(data_train)\n', (2080, 2092), True, 'import numpy as np\n'), ((2106, 2126), 'numpy.array', 'np.array', (['data_train'], {}), '(data_train)\n', (2114, 2126), True, 'import numpy as np\n'), ((2180, 2199), 'numpy.array', 'np.array', (['data_test'], {}), '(data_test)\n', (2188, 2199), True, 'import numpy as np\n'), ((2212, 2231), 'numpy.array', 'np.array', (['data_test'], {}), '(data_test)\n', (2220, 2231), True, 'import numpy as np\n'), ((2277, 2317), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (2299, 2317), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2437, 2475), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['Y_test_rf', 'Y_pred_rf'], {}), '(Y_test_rf, Y_pred_rf)\n', (2453, 2475), False, 'from sklearn.metrics import confusion_matrix\n'), ((2485, 2521), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Y_test_rf', 'Y_pred_rf'], {}), '(Y_test_rf, Y_pred_rf)\n', (2499, 2521), False, 'from sklearn.metrics import accuracy_score\n'), ((2568, 2638), 'numpy.std', 'np.std', (['[tree.feature_importances_ for tree in rf.estimators_]'], {'axis': '(0)'}), '([tree.feature_importances_ for tree in rf.estimators_], axis=0)\n', (2574, 2638), True, 'import numpy as np\n'), ((2958, 2970), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2968, 2970), True, 'import matplotlib.pyplot as plt\n'), ((2971, 3003), 'matplotlib.pyplot.title', 'plt.title', (['"""Feature importances"""'], {}), "('Feature importances')\n", (2980, 3003), True, 'import matplotlib.pyplot as plt\n'), ((3155, 3190), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-1, X_train_rf.shape[1]]'], {}), '([-1, X_train_rf.shape[1]])\n', (3163, 3190), True, 'import matplotlib.pyplot as plt\n'), ((3191, 3201), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3199, 3201), True, 'import matplotlib.pyplot as plt\n'), ((3247, 3305), 'numpy.array', 'np.array', (["data_train.loc[:, data_train.columns != 'study']"], {}), "(data_train.loc[:, data_train.columns != 'study'])\n", (3255, 3305), True, 'import numpy as np\n'), ((3487, 3543), 'numpy.array', 'np.array', (["data_test.loc[:, data_test.columns != 'study']"], {}), "(data_test.loc[:, data_test.columns != 'study'])\n", (3495, 3543), True, 'import numpy as np\n'), ((3726, 3776), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(100)', 'n_jobs': '(-1)'}), '(n_estimators=100, n_jobs=-1)\n', (3747, 3776), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((3815, 3934), 'merf.merf.MERF', 'MERF', ([], {'fixed_effects_model': 'fixed_effects_model', 'max_iterations': '(50)', 'gll_early_stop_threshold': 'gll_early_stop_threshold'}), '(fixed_effects_model=fixed_effects_model, max_iterations=50,\n gll_early_stop_threshold=gll_early_stop_threshold)\n', (3819, 3934), False, 'from merf.merf import MERF\n'), ((4191, 4245), 'merf.viz.plot_merf_training_stats', 'plot_merf_training_stats', (['mrf'], {'num_clusters_to_plot': '(10)'}), '(mrf, num_clusters_to_plot=10)\n', (4215, 4245), False, 'from merf.viz import plot_merf_training_stats\n'), ((1937, 1970), 'pandas.factorize', 'pd.factorize', (["data_train['study']"], {}), "(data_train['study'])\n", (1949, 1970), True, 'import pandas as pd\n'), ((1995, 2027), 'pandas.factorize', 'pd.factorize', (["data_test['study']"], {}), "(data_test['study'])\n", (2007, 2027), True, 'import pandas as pd\n'), ((2140, 2162), 'numpy.array', 'np.array', (['target_train'], {}), '(target_train)\n', (2148, 2162), True, 'import numpy as np\n'), ((2244, 2265), 'numpy.array', 'np.array', (['target_test'], {}), '(target_test)\n', (2252, 2265), True, 'import numpy as np\n'), ((2648, 2671), 'numpy.argsort', 'np.argsort', (['importances'], {}), '(importances)\n', (2658, 2671), True, 'import numpy as np\n'), ((3321, 3343), 'numpy.array', 'np.array', (['target_train'], {}), '(target_train)\n', (3329, 3343), True, 'import numpy as np\n'), ((3558, 3579), 'numpy.array', 'np.array', (['target_test'], {}), '(target_test)\n', (3566, 3579), True, 'import numpy as np\n'), ((4104, 4125), 'numpy.round', 'np.round', (['Y_pred_merf'], {}), '(Y_pred_merf)\n', (4112, 4125), True, 'import numpy as np\n'), ((4166, 4187), 'numpy.round', 'np.round', (['Y_pred_merf'], {}), '(Y_pred_merf)\n', (4174, 4187), True, 'import numpy as np\n')] |
from collections import namedtuple
import numpy as np
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch import Tensor
from .base import VariationalParameters
class DiagonalGaussianVariationalParameters(VariationalParameters):
"""
diag Gaussian distribution.
mean + N(0, exp(diag_lstd)^2)
"""
DiagonalGaussianVariationalParameter = namedtuple('DiagonalGaussianVariationalParameter', \
['mean', 'diag_lstd'])
def __init__(self, init_mean_routine=None, init_zero_mean=True,
init_lstd_routine=None, init_std_level=.1,
mode='paired'):
"""
The mean is randomized according to the specific layer randomization pattern unless init_zero_mean=True or
init_mean_routine is provided. init_mean_routine takes precedence if not None.
The diag_lstd is set to log(init_std_level*std(p)) where p follows the specific layer randomization pattern, unless
init_lstd_routine is provided.
By default, init_num_passes is set to 1 (mean) + 1 (lstd).
init_mean_routine: target, source, *args -> None
init_lstd_routine: target, source, *args -> None
mode = 'single', 'paired'.
single -> 1 sample per eps
paired -> 2 samples per eps, symmetrized w.r.t. the mean; much better convergence properties for the mean.
"""
super(DiagonalGaussianVariationalParameters, self).__init__()
self.init_num_passes = 2
if mode == 'single':
_n_samples = 1
_mode_int = 0
elif mode == 'paired':
_n_samples = 2
_mode_int = 1
else:
raise ValueError('Mode for Diagonal Gaussian Variational Inference not recognized: ' + str(mode) + \
'. Use single or paired [default].')
self.register_buffer('_num_coupled_samples', Tensor([_n_samples]).int())
self.register_buffer('_mode', Tensor([_mode_int]).int())
_pm_lookup_m = torch.tensor([1, -1], dtype=torch.int32)
self.register_buffer('_pm_lookup_m', _pm_lookup_m)
if init_mean_routine is not None:
self.init_mean_routine = init_mean_routine
elif init_zero_mean:
self.init_mean_routine = self._zero_parameter_data
else:
self.init_mean_routine = self._copy_parameter_data
if init_lstd_routine is not None:
self.init_lstd_routine = init_lstd_routine
else:
self.init_lstd_routine = lambda t, s, *args: self._fill_parameter_data_to_lstd(t, s, init_std_level)
# caches
self._logdet_sqrta_cache = None
self._sqnorm_epsa_cache = None
self._dim_log_sqrt_2pi_cache = None
self._dim_by_2_cache = None
def num_coupled_samples(self):
"""
For some distributions, inference works much better when using a tuple of joint samples or more.
In that case, the Variationalize module needs to know in advance that it needs to keep track of more than one model.
"""
return self._num_coupled_samples.item()
def _to_variational_parameter(self, p):
mean = Parameter(torch.empty_like(p))
diag_lstd = Parameter(torch.empty_like(p))
return self.DiagonalGaussianVariationalParameter(mean, diag_lstd)
def initialize_variational_parameter(self, vp, p, i, *args):
if i == 0:
self.init_mean_routine(vp.mean, p, *args)
elif i == 1:
self.init_lstd_routine(vp.diag_lstd, p, *args)
else:
raise ValueError("Expected i=0 or 1, received {}.".format(i))
return None
def sample_parameter_eps(self, vp, global_args):
return torch.randn_like(vp.diag_lstd)
def initiate_rebuild_parameters(self, global_parameters, vp_list, global_eps, eps_list):
r"""rebuild mlog q / entropy related caches; we exploit the matrix determinant lemma and Woodbury matrix identity."""
# compute parameter contributions
n = len(vp_list)
vp_contributions = [self._parameter_cache_contribution(vp_list[i], eps_list[i])
for i in torch.arange(n)]
contrib_logdet, contrib_sqnorm, contrib_dim = zip(*vp_contributions)
# aggregate contributions
self._logdet_sqrta_cache = torch.sum(torch.stack(contrib_logdet, dim=0))
self._sqnorm_epsa_cache = torch.sum(torch.stack(contrib_sqnorm, dim=0))
dim = torch.sum(torch.tensor(contrib_dim))
self._dim_log_sqrt_2pi_cache = dim*(np.log(np.pi*2)/2)
self._dim_by_2_cache = dim*0.5
return
@staticmethod
def _parameter_cache_contribution(vp, eps):
return torch.sum(vp.diag_lstd), torch.sum(torch.pow(eps, 2)), eps.numel()
def rebuild_parameter(self, vp, eps, global_args, i):
return vp.mean + torch.mul(self._pm_lookup_m[i],
torch.mul(eps,torch.exp(vp.diag_lstd)))
def sample_globals(self, global_parameters):
return None
def mlog_q(self, global_parameters, vp_list, p_list, i):
# we shortcut computations directly in terms of eps.
# allows to reuse much of the computations for all samples without having to cache much either.
E = self._sqnorm_epsa_cache*.5
return E + self.normalisation()
def entropy_q(self, global_parameters, vp_list):
return self.normalisation() + self._dim_by_2_cache
def normalisation(self):
return self._logdet_sqrta_cache + self._dim_log_sqrt_2pi_cache
def variational_parameter_names(self):
return self.DiagonalGaussianVariationalParameter._fields
def variational_parameter_type(self):
return self.DiagonalGaussianVariationalParameter
def num_passes_initialize(self):
return self.init_num_passes
@staticmethod
def _fill_parameter_data_to_lstd(target, source, *args):
"""
Utility function for the initialization of variational parameters.
"""
with torch.no_grad():
val = torch.sqrt(torch.mean(torch.pow(source,2)))
if val==0.:
val.data.fill_(1e-3)
if args:
val = val*args[0]
val.log_()
target.data.fill_(val) | [
"torch.stack",
"torch.randn_like",
"numpy.log",
"torch.exp",
"torch.Tensor",
"torch.empty_like",
"collections.namedtuple",
"torch.arange",
"torch.pow",
"torch.no_grad",
"torch.sum",
"torch.tensor"
] | [((402, 475), 'collections.namedtuple', 'namedtuple', (['"""DiagonalGaussianVariationalParameter"""', "['mean', 'diag_lstd']"], {}), "('DiagonalGaussianVariationalParameter', ['mean', 'diag_lstd'])\n", (412, 475), False, 'from collections import namedtuple\n'), ((2183, 2223), 'torch.tensor', 'torch.tensor', (['[1, -1]'], {'dtype': 'torch.int32'}), '([1, -1], dtype=torch.int32)\n', (2195, 2223), False, 'import torch\n'), ((3975, 4005), 'torch.randn_like', 'torch.randn_like', (['vp.diag_lstd'], {}), '(vp.diag_lstd)\n', (3991, 4005), False, 'import torch\n'), ((3400, 3419), 'torch.empty_like', 'torch.empty_like', (['p'], {}), '(p)\n', (3416, 3419), False, 'import torch\n'), ((3451, 3470), 'torch.empty_like', 'torch.empty_like', (['p'], {}), '(p)\n', (3467, 3470), False, 'import torch\n'), ((4624, 4658), 'torch.stack', 'torch.stack', (['contrib_logdet'], {'dim': '(0)'}), '(contrib_logdet, dim=0)\n', (4635, 4658), False, 'import torch\n'), ((4704, 4738), 'torch.stack', 'torch.stack', (['contrib_sqnorm'], {'dim': '(0)'}), '(contrib_sqnorm, dim=0)\n', (4715, 4738), False, 'import torch\n'), ((4773, 4798), 'torch.tensor', 'torch.tensor', (['contrib_dim'], {}), '(contrib_dim)\n', (4785, 4798), False, 'import torch\n'), ((5024, 5047), 'torch.sum', 'torch.sum', (['vp.diag_lstd'], {}), '(vp.diag_lstd)\n', (5033, 5047), False, 'import torch\n'), ((6434, 6449), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6447, 6449), False, 'import torch\n'), ((4432, 4447), 'torch.arange', 'torch.arange', (['n'], {}), '(n)\n', (4444, 4447), False, 'import torch\n'), ((4844, 4861), 'numpy.log', 'np.log', (['(np.pi * 2)'], {}), '(np.pi * 2)\n', (4850, 4861), True, 'import numpy as np\n'), ((5059, 5076), 'torch.pow', 'torch.pow', (['eps', '(2)'], {}), '(eps, 2)\n', (5068, 5076), False, 'import torch\n'), ((2058, 2078), 'torch.Tensor', 'Tensor', (['[_n_samples]'], {}), '([_n_samples])\n', (2064, 2078), False, 'from torch import Tensor\n'), ((2124, 2143), 'torch.Tensor', 'Tensor', (['[_mode_int]'], {}), '([_mode_int])\n', (2130, 2143), False, 'from torch import Tensor\n'), ((5268, 5291), 'torch.exp', 'torch.exp', (['vp.diag_lstd'], {}), '(vp.diag_lstd)\n', (5277, 5291), False, 'import torch\n'), ((6491, 6511), 'torch.pow', 'torch.pow', (['source', '(2)'], {}), '(source, 2)\n', (6500, 6511), False, 'import torch\n')] |
import matplotlib.pyplot as plt
import numpy as np
import scipy.spatial.distance as distance
class Point(object):
def __init__(self, data=None, weights=None):
self.pt = None
if data is not None:
self.fit(data, weights=weights)
@property
def min_sample_size(self):
return 1
def fit(self, data, weights=None):
if data.shape[0] < self.min_sample_size:
raise ValueError('At least one point is needed to fit a point')
if (weights is not None and
np.count_nonzero(weights) < self.min_sample_size):
raise ValueError('At least one point is needed to fit a point')
if data.shape[1] != 2:
raise ValueError('Points must be 2D')
self.pt = np.average(data, weights=weights, axis=0)
def distances(self, data):
return np.squeeze(distance.cdist(data, self.pt[np.newaxis, :]))
def plot(self, **kwargs):
if 'edgecolor' not in kwargs:
kwargs['edgecolor'] = 'none'
plt.scatter(self.pt[0], self.pt[1], **kwargs)
| [
"scipy.spatial.distance.cdist",
"matplotlib.pyplot.scatter",
"numpy.count_nonzero",
"numpy.average"
] | [((772, 813), 'numpy.average', 'np.average', (['data'], {'weights': 'weights', 'axis': '(0)'}), '(data, weights=weights, axis=0)\n', (782, 813), True, 'import numpy as np\n'), ((1036, 1081), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.pt[0]', 'self.pt[1]'], {}), '(self.pt[0], self.pt[1], **kwargs)\n', (1047, 1081), True, 'import matplotlib.pyplot as plt\n'), ((872, 916), 'scipy.spatial.distance.cdist', 'distance.cdist', (['data', 'self.pt[np.newaxis, :]'], {}), '(data, self.pt[np.newaxis, :])\n', (886, 916), True, 'import scipy.spatial.distance as distance\n'), ((545, 570), 'numpy.count_nonzero', 'np.count_nonzero', (['weights'], {}), '(weights)\n', (561, 570), True, 'import numpy as np\n')] |
#!/usr/bin/python3.6
import sys
import json
import numpy as np
import math
problem_instance_file = sys.argv[1]
D = np.genfromtxt (problem_instance_file, delimiter=",")
# Now compute our solution
import pyrankability
search = pyrankability.exact.ExhaustiveSearch(D)
search.find_P()
print(pyrankability.common.as_json(search.k,search.P,{}))
| [
"pyrankability.exact.ExhaustiveSearch",
"numpy.genfromtxt",
"pyrankability.common.as_json"
] | [((117, 168), 'numpy.genfromtxt', 'np.genfromtxt', (['problem_instance_file'], {'delimiter': '""","""'}), "(problem_instance_file, delimiter=',')\n", (130, 168), True, 'import numpy as np\n'), ((229, 268), 'pyrankability.exact.ExhaustiveSearch', 'pyrankability.exact.ExhaustiveSearch', (['D'], {}), '(D)\n', (265, 268), False, 'import pyrankability\n'), ((292, 344), 'pyrankability.common.as_json', 'pyrankability.common.as_json', (['search.k', 'search.P', '{}'], {}), '(search.k, search.P, {})\n', (320, 344), False, 'import pyrankability\n')] |
import contextlib
import functools
import hashlib
import json
import random
import flowws
from flowws import Argument as Arg
import keras_gtar
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend as K
try:
import tensorflow_addons as tfa
except ImportError:
tfa = None
@flowws.add_stage_arguments
class Train(flowws.Stage):
ARGS = [
Arg('optimizer', '-o', str, 'adam',
help='optimizer to use'),
Arg('epochs', '-e', int, 2000,
help='Max number of epochs'),
Arg('batch_size', '-b', int, 256,
help='Batch size'),
Arg('validation_split', '-v', float, .3),
Arg('early_stopping', type=int),
Arg('reduce_lr', type=int),
Arg('ring_count', type=int),
Arg('ring_k', type=float, default=1),
Arg('ring_eps', type=float),
Arg('dump_filename', '-f', default='dump.tar'),
Arg('dump_period', '-d', int),
Arg('seed', '-s', int),
Arg('verbose', None, bool, True,
help='If True, print the training progress'),
]
@staticmethod
def ring_name_updater(layer, i):
cfg = layer.get_config()
cfg['name'] = cfg['name'] + '_ring{}'.format(i)
return layer.__class__.from_config(cfg)
def run(self, scope, storage):
if 'seed' in self.arguments:
s = self.arguments['seed']
random.seed(s)
random.seed(random.randrange(2**32))
np.random.seed(random.randrange(2**32))
tf.random.set_seed(random.randrange(2**32))
model = keras.models.Model(scope['input_symbol'], scope['output'])
if self.arguments.get('ring_count', None):
models = []
for i in range(self.arguments['ring_count']):
clone = functools.partial(self.ring_name_updater, i=i)
models.append(keras.models.clone_model(model, scope['input_symbol'], clone))
final_output = K.sum([m.output for m in models], axis=0)
final_output = K.softmax(final_output)
model = keras.models.Model(scope['input_symbol'], final_output)
for (left, right) in zip(models, np.roll(models, -1)):
harmonic = lambda left=left, right=right: (
.5*self.arguments['ring_k']*sum(
K.sum(K.square(l - r)) for (l, r) in zip(left.trainable_weights, right.trainable_weights)))
model.add_loss(harmonic)
scope['model'] = model
for term in scope.get('extra_losses', []):
model.add_loss(term)
metrics = scope.get('metrics', [])
model.compile(self.arguments['optimizer'], loss=scope['loss'], metrics=metrics)
if self.arguments.get('ring_count', None) and self.arguments.get('ring_eps', None):
print('randomizing ring weights')
eps = self.arguments['ring_eps']
names = [l.name for l in model.layers if 'ring' in l.name]
base_values = {}
for name in names:
base = re.sub(r'_ring\d+$', '', name)
layer = model.get_layer(name)
if base in base_values:
for (value, tensor) in zip(base_values[base], layer.trainable_weights):
new_value = value*np.random.normal(loc=1, scale=eps, size=value.shape)
tensor.assign(value)
else:
base_values[base] = [w.numpy() for w in layer.trainable_weights]
else:
print('not randomizing ring weights')
callbacks = scope.get('callbacks', [])
verbose = self.arguments['verbose']
if tfa is not None and verbose:
callbacks.append(tfa.callbacks.TQDMProgressBar(
show_epoch_progress=False, update_per_second=1))
verbose = False
if 'early_stopping' in self.arguments:
callbacks.append(keras.callbacks.EarlyStopping(
patience=self.arguments['early_stopping'], monitor='val_loss'))
if 'reduce_lr' in self.arguments:
callbacks.append(keras.callbacks.ReduceLROnPlateau(
patience=self.arguments['reduce_lr'], monitor='val_loss', factor=.5, verbose=True))
with contextlib.ExitStack() as context_stack:
if self.arguments.get('dump_period', None):
modifiers = [hashlib.sha1(json.dumps(scope['workflow'].to_JSON()).encode()).hexdigest()[:32]]
handle = context_stack.enter_context(storage.open(
scope.get('dump_filename', 'dump.tar'), 'a', modifiers, on_filesystem=True))
cbk = keras_gtar.GTARLogger(
handle.name, self.arguments['dump_period'], append=True, when='pre_epoch')
callbacks.append(cbk)
model.fit(
scope['x_train'], scope['y_train'], verbose=verbose, epochs=self.arguments['epochs'],
batch_size=self.arguments['batch_size'], validation_split=self.arguments['validation_split'],
callbacks=callbacks)
| [
"flowws.Argument",
"functools.partial",
"tensorflow.keras.backend.sum",
"tensorflow.keras.backend.square",
"tensorflow.keras.backend.softmax",
"numpy.roll",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"keras_gtar.GTARLogger",
"tensorflow.keras.models.clone_model",
"contextlib.ExitStack",
"te... | [((411, 471), 'flowws.Argument', 'Arg', (['"""optimizer"""', '"""-o"""', 'str', '"""adam"""'], {'help': '"""optimizer to use"""'}), "('optimizer', '-o', str, 'adam', help='optimizer to use')\n", (414, 471), True, 'from flowws import Argument as Arg\n'), ((492, 551), 'flowws.Argument', 'Arg', (['"""epochs"""', '"""-e"""', 'int', '(2000)'], {'help': '"""Max number of epochs"""'}), "('epochs', '-e', int, 2000, help='Max number of epochs')\n", (495, 551), True, 'from flowws import Argument as Arg\n'), ((572, 624), 'flowws.Argument', 'Arg', (['"""batch_size"""', '"""-b"""', 'int', '(256)'], {'help': '"""Batch size"""'}), "('batch_size', '-b', int, 256, help='Batch size')\n", (575, 624), True, 'from flowws import Argument as Arg\n'), ((645, 686), 'flowws.Argument', 'Arg', (['"""validation_split"""', '"""-v"""', 'float', '(0.3)'], {}), "('validation_split', '-v', float, 0.3)\n", (648, 686), True, 'from flowws import Argument as Arg\n'), ((695, 726), 'flowws.Argument', 'Arg', (['"""early_stopping"""'], {'type': 'int'}), "('early_stopping', type=int)\n", (698, 726), True, 'from flowws import Argument as Arg\n'), ((736, 762), 'flowws.Argument', 'Arg', (['"""reduce_lr"""'], {'type': 'int'}), "('reduce_lr', type=int)\n", (739, 762), True, 'from flowws import Argument as Arg\n'), ((772, 799), 'flowws.Argument', 'Arg', (['"""ring_count"""'], {'type': 'int'}), "('ring_count', type=int)\n", (775, 799), True, 'from flowws import Argument as Arg\n'), ((809, 845), 'flowws.Argument', 'Arg', (['"""ring_k"""'], {'type': 'float', 'default': '(1)'}), "('ring_k', type=float, default=1)\n", (812, 845), True, 'from flowws import Argument as Arg\n'), ((855, 882), 'flowws.Argument', 'Arg', (['"""ring_eps"""'], {'type': 'float'}), "('ring_eps', type=float)\n", (858, 882), True, 'from flowws import Argument as Arg\n'), ((892, 938), 'flowws.Argument', 'Arg', (['"""dump_filename"""', '"""-f"""'], {'default': '"""dump.tar"""'}), "('dump_filename', '-f', default='dump.tar')\n", (895, 938), True, 'from flowws import Argument as Arg\n'), ((948, 977), 'flowws.Argument', 'Arg', (['"""dump_period"""', '"""-d"""', 'int'], {}), "('dump_period', '-d', int)\n", (951, 977), True, 'from flowws import Argument as Arg\n'), ((987, 1009), 'flowws.Argument', 'Arg', (['"""seed"""', '"""-s"""', 'int'], {}), "('seed', '-s', int)\n", (990, 1009), True, 'from flowws import Argument as Arg\n'), ((1019, 1096), 'flowws.Argument', 'Arg', (['"""verbose"""', 'None', 'bool', '(True)'], {'help': '"""If True, print the training progress"""'}), "('verbose', None, bool, True, help='If True, print the training progress')\n", (1022, 1096), True, 'from flowws import Argument as Arg\n'), ((1622, 1680), 'tensorflow.keras.models.Model', 'keras.models.Model', (["scope['input_symbol']", "scope['output']"], {}), "(scope['input_symbol'], scope['output'])\n", (1640, 1680), False, 'from tensorflow import keras\n'), ((1433, 1447), 'random.seed', 'random.seed', (['s'], {}), '(s)\n', (1444, 1447), False, 'import random\n'), ((2006, 2047), 'tensorflow.keras.backend.sum', 'K.sum', (['[m.output for m in models]'], {'axis': '(0)'}), '([m.output for m in models], axis=0)\n', (2011, 2047), True, 'from tensorflow.keras import backend as K\n'), ((2075, 2098), 'tensorflow.keras.backend.softmax', 'K.softmax', (['final_output'], {}), '(final_output)\n', (2084, 2098), True, 'from tensorflow.keras import backend as K\n'), ((2119, 2174), 'tensorflow.keras.models.Model', 'keras.models.Model', (["scope['input_symbol']", 'final_output'], {}), "(scope['input_symbol'], final_output)\n", (2137, 2174), False, 'from tensorflow import keras\n'), ((4313, 4335), 'contextlib.ExitStack', 'contextlib.ExitStack', ([], {}), '()\n', (4333, 4335), False, 'import contextlib\n'), ((1472, 1497), 'random.randrange', 'random.randrange', (['(2 ** 32)'], {}), '(2 ** 32)\n', (1488, 1497), False, 'import random\n'), ((1524, 1549), 'random.randrange', 'random.randrange', (['(2 ** 32)'], {}), '(2 ** 32)\n', (1540, 1549), False, 'import random\n'), ((1580, 1605), 'random.randrange', 'random.randrange', (['(2 ** 32)'], {}), '(2 ** 32)\n', (1596, 1605), False, 'import random\n'), ((1839, 1885), 'functools.partial', 'functools.partial', (['self.ring_name_updater'], {'i': 'i'}), '(self.ring_name_updater, i=i)\n', (1856, 1885), False, 'import functools\n'), ((2221, 2240), 'numpy.roll', 'np.roll', (['models', '(-1)'], {}), '(models, -1)\n', (2228, 2240), True, 'import numpy as np\n'), ((3780, 3857), 'tensorflow_addons.callbacks.TQDMProgressBar', 'tfa.callbacks.TQDMProgressBar', ([], {'show_epoch_progress': '(False)', 'update_per_second': '(1)'}), '(show_epoch_progress=False, update_per_second=1)\n', (3809, 3857), True, 'import tensorflow_addons as tfa\n'), ((3981, 4077), 'tensorflow.keras.callbacks.EarlyStopping', 'keras.callbacks.EarlyStopping', ([], {'patience': "self.arguments['early_stopping']", 'monitor': '"""val_loss"""'}), "(patience=self.arguments['early_stopping'],\n monitor='val_loss')\n", (4010, 4077), False, 'from tensorflow import keras\n'), ((4164, 4285), 'tensorflow.keras.callbacks.ReduceLROnPlateau', 'keras.callbacks.ReduceLROnPlateau', ([], {'patience': "self.arguments['reduce_lr']", 'monitor': '"""val_loss"""', 'factor': '(0.5)', 'verbose': '(True)'}), "(patience=self.arguments['reduce_lr'],\n monitor='val_loss', factor=0.5, verbose=True)\n", (4197, 4285), False, 'from tensorflow import keras\n'), ((4706, 4807), 'keras_gtar.GTARLogger', 'keras_gtar.GTARLogger', (['handle.name', "self.arguments['dump_period']"], {'append': '(True)', 'when': '"""pre_epoch"""'}), "(handle.name, self.arguments['dump_period'], append=\n True, when='pre_epoch')\n", (4727, 4807), False, 'import keras_gtar\n'), ((1916, 1977), 'tensorflow.keras.models.clone_model', 'keras.models.clone_model', (['model', "scope['input_symbol']", 'clone'], {}), "(model, scope['input_symbol'], clone)\n", (1940, 1977), False, 'from tensorflow import keras\n'), ((3349, 3401), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(1)', 'scale': 'eps', 'size': 'value.shape'}), '(loc=1, scale=eps, size=value.shape)\n', (3365, 3401), True, 'import numpy as np\n'), ((2382, 2397), 'tensorflow.keras.backend.square', 'K.square', (['(l - r)'], {}), '(l - r)\n', (2390, 2397), True, 'from tensorflow.keras import backend as K\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.