code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#!/usr/bin/env python
# %%
import numpy as np
import numba
# meta parameters
LINEWIDTH = 0.3
EXPERIMENT_UNCERTAIN_SCALE = 10
# reference data
grid_short = np.array([ 0.138912671079212, 0.136826895237182, 0.134802828739590, 0.132837772927060, 0.130929184235579, 0.129074663212412, 0.127271944452462, 0.125518887366340, 0.123813467701037, 0.122153769742578, 0.120537979137517, 0.118964376276714, 0.117431330190674, 0.115937292910894, 0.114480794256235, 0.113060437007398, 0.111674892436229, 0.110322896159762, 0.109003244291822, 0.107714789867569, 0.106456439518648, 0.105227150378710, 0.104025927200871, 0.102851819670387, 0.101703919897280, 0.100581360075014, 0.099483310292536, 0.098408976488081, 0.097357598534149])
experiment_short = np.array([16062.12376,16022.67847,14294.97459,13537.62494,11707.36329,8788.411529,6153.465882,3234.514118,2193.158353,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
mae = np.array((0.009367729738238392, 0.007644026369760024, 0.006634207502480805, 0.03514044031325359, 0.037518177682891826, 0.035671701408550395))
# Definition of spectrum
@numba.jit(nopython=True)
def spectrum(values):
sigma_inv=1/(LINEWIDTH*0.036749304951208)
const=16836.70285
dec=0.4342944819032518
return const*dec*(0.05746523219) * sigma_inv*(values[3]*np.exp(-0.5* ((grid_short-values[0]) * sigma_inv) ** 2) +values[4]*np.exp(-0.5* ((grid_short-values[1]) * sigma_inv) ** 2) +values[5]*np.exp(-0.5* ((grid_short-values[2]) * sigma_inv) ** 2))
# %%
upper_limit = spectrum((0.145, 1, 1, 0.5, 0, 0))
upper_limit = np.maximum(upper_limit, experiment_short + 300)
#upper_limit = np.maximum(upper_limit, experiment_short * 2)
upper_limit[:10] += 4000
lower_limit = experiment_short
lower_limit = np.minimum(lower_limit, experiment_short - 4000)
lower_limit = np.maximum(lower_limit, 0)
# %%
def draw_random_from_molecular_distribution(centers, mae, N):
sigma = mae / np.sqrt(2/np.pi)
# random oscillator strengths
osz1 = np.random.normal(centers[3], sigma[3], N)
osz1 = osz1[osz1 > 0]
osz2 = np.random.normal(centers[4], sigma[4], len(osz1))
osz2 = osz2[osz2 > 0]
osz3 = np.random.normal(centers[5], sigma[5], len(osz2))
osz3 = osz3[osz3 > 0]
# trim length
N = len(osz3)
osz1 = osz1[:N]
osz2 = osz2[:N]
# matching energies
S1 = np.random.normal(centers[0], sigma[0], N)
S2 = np.random.normal(centers[1], sigma[1], N)
S3 = np.random.normal(centers[2], sigma[2], N)
# glue
return np.vstack((S1, S2, S3, osz1, osz2, osz3))
def monte_carlo_probability(candidate, N):
candidates = draw_random_from_molecular_distribution(candidate, mae, N).T
success = 0
trials = 0
tmax = max(experiment_short)
for candidate in candidates:
s = spectrum(candidate)
trials += 1
rescale = max(s) / tmax
if rescale > EXPERIMENT_UNCERTAIN_SCALE or rescale < 1/EXPERIMENT_UNCERTAIN_SCALE:
continue
s /= rescale
if np.all(s >= lower_limit) and np.all(s <= upper_limit):
success += 1
return success / N
# %%
class Task:
def __init__(self, connection):
pass
def run(self, commandstring):
try:
dbg = [float(_) for _ in commandstring.split(",")]
probability = str(monte_carlo_probability(dbg, 10000))
except:
probability = "--"
return probability, 0
| [
"numpy.minimum",
"numpy.maximum",
"numpy.all",
"numba.jit",
"numpy.array",
"numpy.random.normal",
"numpy.exp",
"numpy.vstack",
"numpy.sqrt"
] | [((157, 758), 'numpy.array', 'np.array', (['[0.138912671079212, 0.136826895237182, 0.13480282873959, 0.13283777292706, \n 0.130929184235579, 0.129074663212412, 0.127271944452462, \n 0.12551888736634, 0.123813467701037, 0.122153769742578, \n 0.120537979137517, 0.118964376276714, 0.117431330190674, \n 0.115937292910894, 0.114480794256235, 0.113060437007398, \n 0.111674892436229, 0.110322896159762, 0.109003244291822, \n 0.107714789867569, 0.106456439518648, 0.10522715037871, \n 0.104025927200871, 0.102851819670387, 0.10170391989728, \n 0.100581360075014, 0.099483310292536, 0.098408976488081, 0.097357598534149]'], {}), '([0.138912671079212, 0.136826895237182, 0.13480282873959, \n 0.13283777292706, 0.130929184235579, 0.129074663212412, \n 0.127271944452462, 0.12551888736634, 0.123813467701037, \n 0.122153769742578, 0.120537979137517, 0.118964376276714, \n 0.117431330190674, 0.115937292910894, 0.114480794256235, \n 0.113060437007398, 0.111674892436229, 0.110322896159762, \n 0.109003244291822, 0.107714789867569, 0.106456439518648, \n 0.10522715037871, 0.104025927200871, 0.102851819670387, \n 0.10170391989728, 0.100581360075014, 0.099483310292536, \n 0.098408976488081, 0.097357598534149])\n', (165, 758), True, 'import numpy as np\n'), ((739, 935), 'numpy.array', 'np.array', (['[16062.12376, 16022.67847, 14294.97459, 13537.62494, 11707.36329, \n 8788.411529, 6153.465882, 3234.514118, 2193.158353, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {}), '([16062.12376, 16022.67847, 14294.97459, 13537.62494, 11707.36329, \n 8788.411529, 6153.465882, 3234.514118, 2193.158353, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n', (747, 935), True, 'import numpy as np\n'), ((905, 1050), 'numpy.array', 'np.array', (['(0.009367729738238392, 0.007644026369760024, 0.006634207502480805, \n 0.03514044031325359, 0.037518177682891826, 0.035671701408550395)'], {}), '((0.009367729738238392, 0.007644026369760024, 0.006634207502480805,\n 0.03514044031325359, 0.037518177682891826, 0.035671701408550395))\n', (913, 1050), True, 'import numpy as np\n'), ((1074, 1098), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (1083, 1098), False, 'import numba\n'), ((1537, 1584), 'numpy.maximum', 'np.maximum', (['upper_limit', '(experiment_short + 300)'], {}), '(upper_limit, experiment_short + 300)\n', (1547, 1584), True, 'import numpy as np\n'), ((1718, 1766), 'numpy.minimum', 'np.minimum', (['lower_limit', '(experiment_short - 4000)'], {}), '(lower_limit, experiment_short - 4000)\n', (1728, 1766), True, 'import numpy as np\n'), ((1781, 1807), 'numpy.maximum', 'np.maximum', (['lower_limit', '(0)'], {}), '(lower_limit, 0)\n', (1791, 1807), True, 'import numpy as np\n'), ((1961, 2002), 'numpy.random.normal', 'np.random.normal', (['centers[3]', 'sigma[3]', 'N'], {}), '(centers[3], sigma[3], N)\n', (1977, 2002), True, 'import numpy as np\n'), ((2314, 2355), 'numpy.random.normal', 'np.random.normal', (['centers[0]', 'sigma[0]', 'N'], {}), '(centers[0], sigma[0], N)\n', (2330, 2355), True, 'import numpy as np\n'), ((2365, 2406), 'numpy.random.normal', 'np.random.normal', (['centers[1]', 'sigma[1]', 'N'], {}), '(centers[1], sigma[1], N)\n', (2381, 2406), True, 'import numpy as np\n'), ((2416, 2457), 'numpy.random.normal', 'np.random.normal', (['centers[2]', 'sigma[2]', 'N'], {}), '(centers[2], sigma[2], N)\n', (2432, 2457), True, 'import numpy as np\n'), ((2481, 2522), 'numpy.vstack', 'np.vstack', (['(S1, S2, S3, osz1, osz2, osz3)'], {}), '((S1, S2, S3, osz1, osz2, osz3))\n', (2490, 2522), True, 'import numpy as np\n'), ((1894, 1912), 'numpy.sqrt', 'np.sqrt', (['(2 / np.pi)'], {}), '(2 / np.pi)\n', (1901, 1912), True, 'import numpy as np\n'), ((2970, 2994), 'numpy.all', 'np.all', (['(s >= lower_limit)'], {}), '(s >= lower_limit)\n', (2976, 2994), True, 'import numpy as np\n'), ((2999, 3023), 'numpy.all', 'np.all', (['(s <= upper_limit)'], {}), '(s <= upper_limit)\n', (3005, 3023), True, 'import numpy as np\n'), ((1410, 1468), 'numpy.exp', 'np.exp', (['(-0.5 * ((grid_short - values[2]) * sigma_inv) ** 2)'], {}), '(-0.5 * ((grid_short - values[2]) * sigma_inv) ** 2)\n', (1416, 1468), True, 'import numpy as np\n'), ((1276, 1334), 'numpy.exp', 'np.exp', (['(-0.5 * ((grid_short - values[0]) * sigma_inv) ** 2)'], {}), '(-0.5 * ((grid_short - values[0]) * sigma_inv) ** 2)\n', (1282, 1334), True, 'import numpy as np\n'), ((1343, 1401), 'numpy.exp', 'np.exp', (['(-0.5 * ((grid_short - values[1]) * sigma_inv) ** 2)'], {}), '(-0.5 * ((grid_short - values[1]) * sigma_inv) ** 2)\n', (1349, 1401), True, 'import numpy as np\n')] |
from torch.utils.data import DataLoader, Dataset
import torch
import h5py
import numpy as np
import os
import random
import json
import copy
def get_dataloader(phase, config):
is_shuffle = phase == 'train'
dataset = SDFdata(phase, config, shuffle=is_shuffle)
dataloader = DataLoader(dataset, batch_size=config.batch_size, shuffle=is_shuffle, num_workers=config.num_workers,
worker_init_fn=np.random.seed())
return dataloader
def get_all_info(CUR_PATH):
with open(CUR_PATH+'/info.json') as json_file:
data = json.load(json_file)
lst_dir, cats, all_cats, raw_dirs = data["lst_dir"], data['cats'], data['all_cats'], data["raw_dirs_v1"]
return lst_dir, cats, all_cats, raw_dirs
def getids(info_dir,sdf_h5_path,cat_id,num_views):
ids = []
with open(info_dir, 'r') as f:
lines = f.read().splitlines()
for line in lines:
sdf_path = os.path.join(sdf_h5_path, cat_id, line.strip(), 'ori_sample.h5')
if os.path.exists(sdf_path):
for render in range(num_views):
ids += [(cat_id, line.strip(), render)]
return ids
class SDFdata(Dataset):
def __init__(self, phase, config, shuffle):
self.config = config
self.shuffle = shuffle
self.sdf_h5_path = config.sdf_h5_path
self.render_h5_path = config.render_h5_path
self.mesh_obj_path = config.mesh_obj_path
self.id_path = config.id_path
self.categorys = config.category.split(',')
self.gen_num_pt = config.num_sample_points
lst_dir, cats, all_cats, raw_dirs = get_all_info(self.id_path)
if 'all' in self.categorys:
self.categorys = cats
else:
used_categorys = {}
for c in self.categorys:
used_categorys[c] = cats[c]
self.categorys = used_categorys
self.views = 24
self.cats_limit = {}
self.epoch_amount = 0
self.ids = []
for cat_name, cat_id in self.categorys.items():
cat_list = os.path.join(self.id_path, cat_id + '_' + phase + '.lst')
idlist = getids(cat_list,self.sdf_h5_path,cat_id,self.views)
self.ids += idlist
if phase == 'train':
self.cats_limit[cat_id] = min(len(idlist), config.cat_limit)
else:
self.cats_limit[cat_id] = len(idlist)
self.epoch_amount += self.cats_limit[cat_id]
self.data_order = list(range(len(self.ids)))
self.order = self.data_order #self.order would be changed in each iteration
print('num of ',phase,' data:',self.epoch_amount)
def __len__(self):
return self.epoch_amount
def resample_data(self):
if self.shuffle:
self.order = self.refill_data_order()
print("data order reordered!")
def refill_data_order(self):
temp_order = copy.deepcopy(self.data_order)
cats_quota = {key: value for key, value in self.cats_limit.items()}
np.random.shuffle(temp_order)
pointer = 0
epoch_order=[]
while len(epoch_order) < self.epoch_amount:
cat_id, _, _ = self.ids[temp_order[pointer]]
if cats_quota[cat_id] > 0:
epoch_order.append(temp_order[pointer])
cats_quota[cat_id]-=1
pointer+=1
return epoch_order
def get_sdf_h5(self, sdf_h5_file, cat_id, obj):
h5_f = h5py.File(sdf_h5_file, 'r')
try:
if ('pc_sdf_sample' in h5_f.keys()
and 'sdf_params' in h5_f.keys()):
sample_sdf = h5_f['pc_sdf_sample'][:].astype(np.float32)
if sample_sdf.shape[1] == 4:
sample_pt, sample_sdf_val = sample_sdf[:, :3], sample_sdf[:, 3]
else:
sample_pt, sample_sdf_val = None, sample_sdf[:, 0]
sdf_params = h5_f['sdf_params'][:]
else:
raise Exception(cat_id, obj, "no sdf and sample")
finally:
h5_f.close()
return sample_pt, sample_sdf_val, sdf_params
def get_img(self, img_h5):
with h5py.File(img_h5, 'r') as h5_f:
trans_mat = h5_f["trans_mat"][:].astype(np.float32)
img_raw = h5_f["img_arr"][:]
img_arr = img_raw[:, :, :3]
img_arr = np.clip(img_arr, 0, 255)
img_arr = img_arr.astype(np.float32) / 255.
return img_arr, trans_mat
def __getitem__(self, index):
cat_id, sdf_name, view = self.ids[self.order[index]]
sdf_path = os.path.join(self.sdf_h5_path, cat_id, sdf_name, 'ori_sample.h5')
render_path = os.path.join(self.render_h5_path, cat_id, sdf_name, "%02d.h5" % view)
sample_pt, sample_sdf_val, sdf_params = self.get_sdf_h5(sdf_path, cat_id, sdf_name)
img, trans_mat = self.get_img(render_path)
return {'sdf_pt':sample_pt,
'sdf_val':sample_sdf_val,
'sdf_params':sdf_params,
'img': img, #HxWx4 (137x137)
'trans_mat': trans_mat, #3x4
'cat_id':cat_id,
'view_id':view,
'obj_nm':render_path.split('/')[-2]
}
| [
"copy.deepcopy",
"json.load",
"h5py.File",
"numpy.random.seed",
"os.path.exists",
"numpy.clip",
"os.path.join",
"numpy.random.shuffle"
] | [((566, 586), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (575, 586), False, 'import json\n'), ((2940, 2970), 'copy.deepcopy', 'copy.deepcopy', (['self.data_order'], {}), '(self.data_order)\n', (2953, 2970), False, 'import copy\n'), ((3055, 3084), 'numpy.random.shuffle', 'np.random.shuffle', (['temp_order'], {}), '(temp_order)\n', (3072, 3084), True, 'import numpy as np\n'), ((3488, 3515), 'h5py.File', 'h5py.File', (['sdf_h5_file', '"""r"""'], {}), "(sdf_h5_file, 'r')\n", (3497, 3515), False, 'import h5py\n'), ((4634, 4699), 'os.path.join', 'os.path.join', (['self.sdf_h5_path', 'cat_id', 'sdf_name', '"""ori_sample.h5"""'], {}), "(self.sdf_h5_path, cat_id, sdf_name, 'ori_sample.h5')\n", (4646, 4699), False, 'import os\n'), ((4722, 4791), 'os.path.join', 'os.path.join', (['self.render_h5_path', 'cat_id', 'sdf_name', "('%02d.h5' % view)"], {}), "(self.render_h5_path, cat_id, sdf_name, '%02d.h5' % view)\n", (4734, 4791), False, 'import os\n'), ((431, 447), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (445, 447), True, 'import numpy as np\n'), ((1013, 1037), 'os.path.exists', 'os.path.exists', (['sdf_path'], {}), '(sdf_path)\n', (1027, 1037), False, 'import os\n'), ((2083, 2140), 'os.path.join', 'os.path.join', (['self.id_path', "(cat_id + '_' + phase + '.lst')"], {}), "(self.id_path, cat_id + '_' + phase + '.lst')\n", (2095, 2140), False, 'import os\n'), ((4200, 4222), 'h5py.File', 'h5py.File', (['img_h5', '"""r"""'], {}), "(img_h5, 'r')\n", (4209, 4222), False, 'import h5py\n'), ((4399, 4423), 'numpy.clip', 'np.clip', (['img_arr', '(0)', '(255)'], {}), '(img_arr, 0, 255)\n', (4406, 4423), True, 'import numpy as np\n')] |
import sys
import pygame
from pygame import *
from game_objects import *
from settings import *
from network import *
from population import *
from genome import *
from level_design import levels
from input import *
import numpy as np
from time import gmtime, strftime
global cameraX, cameraY
import pandas as pd
# TODO: remove/alter all code marked with "inputs debug functionality" when inputs method is assured to work
pygame.init()
screen = pygame.display.set_mode(DISPLAY, FLAGS, DEPTH)
pygame.display.set_caption("Don't Fall Down!")
timer = pygame.time.Clock()
is_paused = False
bestFitness = 0
informationforscreen = None
today = "./saved/" + strftime("%Y_%m_%d__%H_%M_%S", gmtime())
menu_background = pygame.image.load('assets/background_menu.png').convert()
tutorial_background = pygame.image.load('assets/background_and_tutorial.png').convert()
debug_images = []
for i in range(0, 3, 1):
image = pygame.image.load('assets/debugcell{0}.png'.format(i)).convert()
image = pygame.transform.scale(image, (48, 48))
debug_images.append(image)
bg = pygame.image.load('assets/S.png').convert()
bg = pygame.transform.scale(bg, (48, 48))
platform_images = []
for i in range(1, 6, 1):
image = pygame.image.load('assets/C{0}.png'.format(i)).convert()
image = pygame.transform.scale(image, (48, 48))
platform_images.append(image)
spike = pygame.image.load('assets/spike.png').convert_alpha()
spike = pygame.transform.scale(spike, (48, 48))
def exit_game():
sys.exit(0)
def event_resume():
pygame.event.post(pygame.event.Event(resume))
def event_restart_level():
pygame.event.post(pygame.event.Event(restart_level))
def event_back_to_menu():
pygame.event.post(pygame.event.Event(back_to_menu))
def event_pause():
pygame.event.post(pygame.event.Event(pause))
def event_next_level():
pygame.event.post(pygame.event.Event(next_level))
def text_objects(text, font, color=(0, 0, 0)):
textSurface = font.render(text, True, color)
return textSurface, textSurface.get_rect()
def button(msg, x, y, w, h, ic, ac, action=None):
mouse_pos = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if x+w > mouse_pos[0] > x and y+h > mouse_pos[1] > y:
pygame.draw.rect(screen, ac, (x, y, w, h))
if click[0] == 1 and action is not None:
action()
else:
pygame.draw.rect(screen, ic, (x, y, w, h))
smallText = pygame.font.SysFont(FONT2, 20)
textSurf, textRect = text_objects(msg, smallText)
textRect.center = ((x+(w/2)), (y+(h/2)))
screen.blit(textSurf, textRect)
def tutorial():
intro = True
while intro:
for event in pygame.event.get():
# print(event)
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == back_to_menu:
return
screen.fill(WHITE)
screen.blit(tutorial_background, (0, 0))
button("Go back", 10, 10, 200, 50, PRIMARY, PRIMARY_HOVER, event_back_to_menu)
pygame.display.update()
timer.tick(15)
#inputs debug functionality
def pause_menu(test_array):
intro = True
while intro:
for event in pygame.event.get():
# print(event)
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == back_to_menu \
or event.type == resume:
return event.type
largeText = pygame.font.SysFont(FONT, 115)
TextSurf, TextRect = text_objects("Game paused", largeText, WHITE)
TextRect.center = (HALF_WIDTH, 50)
screen.blit(TextSurf, TextRect)
button("Resume", HALF_WIDTH-100, 200, 200, 50, PRIMARY, PRIMARY_HOVER, event_resume)
button("Back to menu", HALF_WIDTH-100, 320, 200, 50, DANGER, DANGER_HOVER, event_back_to_menu)
pygame.display.update()
timer.tick(15)
def you_win_menu(index):
intro = True
while intro:
for event in pygame.event.get():
# print(event)
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == restart_level \
or event.type == back_to_menu \
or event.type == next_level:
return event.type
if event.type == KEYDOWN and event.key == K_SPACE:
return next_level
text = "You win!"
if len(levels)-1 == index:
text = "You beat the game!"
else:
button("Next level", HALF_WIDTH-100, 200, 200, 50, PRIMARY, PRIMARY_HOVER, event_next_level)
largeText = pygame.font.SysFont(FONT2, 40)
TextSurf, TextRect = text_objects("Hit 'space' for Next level", largeText, WHITE)
TextRect.center = (HALF_WIDTH, WIN_HEIGHT - 40)
screen.blit(TextSurf, TextRect)
largeText = pygame.font.SysFont(FONT, 115)
TextSurf, TextRect = text_objects(text, largeText, WHITE)
TextRect.center = (HALF_WIDTH, 50)
screen.blit(TextSurf, TextRect)
button("Restart level", HALF_WIDTH - 100, 260, 200, 50, PRIMARY, PRIMARY_HOVER, event_restart_level)
button("Back to menu", HALF_WIDTH-100, 320, 200, 50, DANGER, DANGER_HOVER, event_back_to_menu)
pygame.display.update()
timer.tick(15)
def game_over_menu():
intro = True
while intro:
for event in pygame.event.get():
# print(event)
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == restart_level or event.type == back_to_menu:
return event.type
if event.type == KEYDOWN and event.key == K_SPACE:
return restart_level
text = "You loose!"
largeText = pygame.font.SysFont(FONT, 115)
TextSurf, TextRect = text_objects(text, largeText, DANGER)
TextRect.center = (HALF_WIDTH, 50)
screen.blit(TextSurf, TextRect)
button("Restart level", HALF_WIDTH - 100, 260, 200, 50, PRIMARY, PRIMARY_HOVER, event_restart_level)
button("Back to menu", HALF_WIDTH - 100, 320, 200, 50, DANGER, DANGER_HOVER, event_back_to_menu)
largeText = pygame.font.SysFont(FONT2, 40)
TextSurf, TextRect = text_objects("Hit 'space' to restart", largeText, DANGER)
TextRect.center = (HALF_WIDTH, WIN_HEIGHT - 40)
screen.blit(TextSurf, TextRect)
pygame.display.update()
timer.tick(15)
def main_menu():
intro = True
while intro:
for event in pygame.event.get():
# print(event)
if event.type == pygame.QUIT:
pygame.quit()
quit()
screen.fill(WHITE)
screen.blit(menu_background, (0, 0))
largeText = pygame.font.SysFont(FONT, 115)
TextSurf, TextRect = text_objects("Don't Fall Down!", largeText, WHITE)
TextRect.center = (HALF_WIDTH, 50)
screen.blit(TextSurf, TextRect)
button("Begin", HALF_WIDTH-100, 200, 200, 50, PRIMARY, PRIMARY_HOVER, level_menu)
#button("Tutorial", HALF_WIDTH - 100, 260, 200, 50, PRIMARY, PRIMARY_HOVER, tutorial)
button("Quit", HALF_WIDTH-100, 320, 200, 50, DANGER, DANGER_HOVER, exit_game)
largeText = pygame.font.SysFont(FONT2, 20)
TextSurf, TextRect = text_objects("Game developed by <NAME> [2017-12]", largeText, WHITE)
TextRect.center = (HALF_WIDTH, WIN_HEIGHT-30)
screen.blit(TextSurf, TextRect)
TextSurf, TextRect = text_objects("AI with evolutionary algorithm developed by <NAME> and <NAME> [2018-06]", largeText, WHITE)
TextRect.center = (HALF_WIDTH, WIN_HEIGHT-15)
screen.blit(TextSurf, TextRect)
pygame.display.update()
timer.tick(15)
def score_board():
if informationforscreen != None:
largeText = pygame.font.SysFont(FONT, 20)
TextSurf, TextRect = text_objects("Generation: {0}".format(informationforscreen['generation']), largeText, WHITE)
TextRect.top = 15
TextRect.left = WIN_WIDTH - 200
screen.blit(TextSurf, TextRect)
TextSurf, TextRect = text_objects("Bot NR: {0}".format(informationforscreen['botnumber']), largeText, WHITE)
TextRect.top = 30
TextRect.left = WIN_WIDTH - 200
screen.blit(TextSurf, TextRect)
TextSurf, TextRect = text_objects("Last fitness: {0}".format(informationforscreen['lastfitness']), largeText, WHITE)
TextRect.top = 45
TextRect.left = WIN_WIDTH - 200
screen.blit(TextSurf, TextRect)
TextSurf, TextRect = text_objects("Last Gen avg fitness: {0}".format(informationforscreen['lastgenerationaveragefitness']), largeText, WHITE)
TextRect.top = 60
TextRect.left = WIN_WIDTH - 200
screen.blit(TextSurf, TextRect)
TextSurf, TextRect = text_objects("Best fitness: {0}".format(informationforscreen['bestfitness']), largeText, WHITE)
TextRect.top = 75
TextRect.left = WIN_WIDTH - 200
screen.blit(TextSurf, TextRect)
def level_menu(level=levels[0], index=0):
localBestFitness = -1
population = Population()
population.generateRandomPopulation()
generation = 1
results = pd.DataFrame(columns=['generation', 'fitness'])
lastgenerationaveragefitness = 0
#Main Loop
while generation <= MAX_GENERATIONS :
botnmbr = 1
for i in range(population.size()):
level_output = launch_level(levels[0], population.getGenome(i))
if level_output['event'] == player_finish:
you_win_event = you_win_menu(index)
if you_win_event['event'] == restart_level:
level_menu(level, index)
elif you_win_event['event'] == next_level:
level_menu(levels[index+1], index+1)
if level_output['event'] == player_died:
game_over_event = game_over_menu()
if game_over_event == restart_level:
level_menu(level, index)
if level_output['event'] == restart_level:
score = level_output['score']
results.loc[len(results)] = [generation, score]
population.setGenomeFitness(i,score)
global informationforscreen
informationforscreen = {
'generation' : generation,
'botnumber' : botnmbr,
'lastfitness' : score,
'lastgenerationaveragefitness' : lastgenerationaveragefitness,
'bestfitness' : localBestFitness
}
if score > localBestFitness:
global bestFitness
bestFitness = score
localBestFitness = score
genome = level_output['genome']
genome.network.save(today + "/bestfitness.json")
botnmbr += 1
# global fitnessovergeneration
# fitnessovergeneration.append(population.averageFitness())
lastgenerationaveragefitness = population.averageFitness()
# global fittestovergeneration
# fittestovergeneration.append(population.findFittest().fitness)
#Evolve the population
population.evolvePopulation()
generation += 1
results.to_csv(today + '/results.csv')
def launch_level(level=levels[0], genome=None):
genome.network.fromgenes(genome.genes)
up = down = left = right = False
entities = pygame.sprite.Group()
platforms = pygame.sprite.Group()
enemies = pygame.sprite.Group()
deadly_objects = pygame.sprite.Group()
bots = pygame.sprite.Group()
#inputs debug functionality
if INPUT_OUTPUT_DEBUG == 1:
specific_bot = None
input_randomization_frame_max = 3
input_randomization_frame_countdown = input_randomization_frame_max
#inputs debug functionality
player = None
x = y = 0
STARTX = 0
# build the level
for row in level:
for col in row:
if col == "P":
p = Platform(x, y, timer, platform_images)
platforms.add(p)
entities.add(p)
if col.isdigit():
digit = int(col)
if digit < len(platform_images):
p = Platform(x, y, timer, platform_images, digit)
else:
p = Platform(x, y, timer, platform_images, len(platform_images)-1)
platforms.add(p)
entities.add(p)
if col == "F":
e = FinishBlock(x, y)
platforms.add(e)
entities.add(e)
if col == "W":
e = Water(x, y)
deadly_objects.add(e)
entities.add(e)
if col == "R":
e = Rock(x, y)
platforms.add(e)
entities.add(e)
if col == "^":
e = Spike(x, y, spike)
deadly_objects.add(e)
entities.add(e)
if col == "<":
e = Spike(x, y, pygame.transform.rotate(spike, 90))
deadly_objects.add(e)
entities.add(e)
if col == "V":
e = Spike(x, y, pygame.transform.rotate(spike, 180))
deadly_objects.add(e)
entities.add(e)
if col == ">":
e = Spike(x, y, pygame.transform.rotate(spike, -90))
deadly_objects.add(e)
entities.add(e)
if col == "U":
player = Player(x, y, platforms, deadly_objects)
entities.add(player)
if col == "B":
STARTX = x
for i in range(1):
bot = Bot(x, y, platforms, deadly_objects, enemies)
bots.add(bot)
entities.add(bot)
#inputs debug functionality
# if INPUT_OUTPUT_DEBUG == 1:
specific_bot = bot
if col == "E":
e = Enemy(x, y, platforms, timer, deadly_objects)
entities.add(e)
enemies.add(e)
x += CELL_WIDTH
y += CELL_HEIGHT
x = 0
total_level_width = len(level[0])*CELL_WIDTH
total_level_height = len(level)*CELL_HEIGHT
camera = Camera(complex_camera, total_level_width, total_level_height)
level_width = len(level[0])
level_height = len(level)
level_array = np.zeros((level_width,level_height))
in_game = True
return_object = {
'event': None,
'genome': None,
'score': None
}
while in_game:
timer.tick(4000)
for e in pygame.event.get():
if e.type == QUIT:
exit_game()
if e.type == player_finish:
return_object['event'] = restart_level
return_object['genome'] = genome
return_object['score'] = specific_bot.rect.left - STARTX + 1000
return return_object
if e.type == player_died:
return_object['event'] = player_died
return return_object
if e.type == restart_level:
return_object['event'] = restart_level
return_object['genome'] = genome
return_object['score'] = specific_bot.rect.left - STARTX
return return_object
if e.type == pause:
#inputs debug functionality
if INPUT_OUTPUT_DEBUG == 1:
if specific_bot != None:
test_array = inputs(level_array,specific_bot.rect.left,specific_bot.rect.top)
else:
test_array = inputs(level_array,0,0)
else:
test_array = None
pause_event = pause_menu(test_array)
#inputs debug functionality
if pause_event == restart_level or pause_event == back_to_menu:
return_object['event'] = pause_event
return return_object
if e.type == KEYDOWN and e.key == K_SPACE:
event_pause()
if e.type == KEYDOWN and e.key == K_UP:
up = True
if e.type == KEYDOWN and e.key == K_DOWN:
down = True
if e.type == KEYDOWN and e.key == K_LEFT:
left = True
if e.type == KEYDOWN and e.key == K_RIGHT:
right = True
if e.type == KEYUP and e.key == K_UP:
up = False
if e.type == KEYUP and e.key == K_DOWN:
down = False
if e.type == KEYUP and e.key == K_RIGHT:
right = False
if e.type == KEYUP and e.key == K_LEFT:
left = False
for y in range(48):
for x in range(48):
screen.blit(bg, (x * 48, y * 48))
if player is None and len(bots.sprites()) > 0:
camera.update(sorted(bots.sprites(), reverse=True, key=lambda b: b.rect.left)[0])
elif player is not None:
camera.update(player)
player.update(up, down, left, right, enemies)
elif len(bots.sprites()) is 0:
event_restart_level()
# update player, draw everything else
level_array.fill(0)
sprites_to_level_array(level_array,platforms,1)
sprites_to_level_array(level_array,deadly_objects,-1)
sprites_to_level_array(level_array,enemies,-1)
debug_array = inputs(level_array,specific_bot.rect.left,specific_bot.rect.top)
NNinput = debug_array.flatten()
NNinput = np.reshape(NNinput, (NNinput.shape[0],-1))
specific_bot.input_table = genome.network.feedforward(NNinput)
platforms.update()
enemies.update()
bots.update()
for e in entities:
screen.blit(e.image, camera.apply(e))
#inputs debug functionality
if INPUT_OUTPUT_DEBUG == 1:
for x in range(0,INPUT_VIEW_RANGE_X*2+1):
for y in range(0,INPUT_VIEW_RANGE_Y*2+1):
p = int(debug_array[x,y])
if -1 <= p <= 1:
screen.blit(debug_images[p+1],(x*48,y*48))
else:
screen.blit(debug_images[1],(x*48,y*48))
largeText = pygame.font.SysFont(FONT, 40)
TextSurf, TextRect = text_objects(str(p), largeText, WHITE)
TextRect.center = (x*48+24, y*48+24)
screen.blit(TextSurf, TextRect)
largeText = pygame.font.SysFont(FONT, 40)
TextSurf, TextRect = text_objects("LEFT", largeText, DANGER if specific_bot.left else WHITE)
TextRect.top = WIN_HEIGHT-50
TextRect.left = 200
screen.blit(TextSurf, TextRect)
TextSurf, TextRect = text_objects("JUMP", largeText, DANGER if specific_bot.up else WHITE)
TextRect.top = WIN_HEIGHT-50
TextRect.left = 300
screen.blit(TextSurf, TextRect)
TextSurf, TextRect = text_objects("RIGHT", largeText, DANGER if specific_bot.right else WHITE)
TextRect.top = WIN_HEIGHT-50
TextRect.left = 400
screen.blit(TextSurf, TextRect)
#inputs debug functionality
button("PAUSE", 10, WIN_HEIGHT-60, 100, 50, PRIMARY, PRIMARY_HOVER, event_pause)
score_board()
pygame.display.flip()
if __name__ == "__main__":
if len(sys.argv) != 1:
#Evaluate a single genome
if str(sys.argv[1])=="-evaluate":
print(str(sys.argv[2]))
net = load(str(sys.argv[2]))
genome = Genome(net)
global savestat
savestat = False
fitness = []
for i in range(100):
level_output = launch_level(levels[0], genome)
if level_output['event'] == restart_level:
score = level_output['score']
fitness.append(score)
print("fitness : %s " % score)
average = sum(fitness) / float(len(fitness))
printc("Average fitness : %s" % average,"red")
pygame.quit()
sys.exit()
#Show the stat of an experiment
if str(sys.argv[1])=="-stats":
pass
# showStat(str(sys.argv[2]))
else:
main_menu()
| [
"pygame.event.Event",
"pygame.event.get",
"pygame.display.update",
"pygame.mouse.get_pos",
"pandas.DataFrame",
"pygame.font.SysFont",
"pygame.display.set_mode",
"pygame.transform.scale",
"numpy.reshape",
"pygame.display.set_caption",
"pygame.quit",
"pygame.mouse.get_pressed",
"pygame.draw.re... | [((439, 452), 'pygame.init', 'pygame.init', ([], {}), '()\n', (450, 452), False, 'import pygame\n'), ((463, 509), 'pygame.display.set_mode', 'pygame.display.set_mode', (['DISPLAY', 'FLAGS', 'DEPTH'], {}), '(DISPLAY, FLAGS, DEPTH)\n', (486, 509), False, 'import pygame\n'), ((511, 557), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Don\'t Fall Down!"""'], {}), '("Don\'t Fall Down!")\n', (537, 557), False, 'import pygame\n'), ((567, 586), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (584, 586), False, 'import pygame\n'), ((1153, 1189), 'pygame.transform.scale', 'pygame.transform.scale', (['bg', '(48, 48)'], {}), '(bg, (48, 48))\n', (1175, 1189), False, 'import pygame\n'), ((1472, 1511), 'pygame.transform.scale', 'pygame.transform.scale', (['spike', '(48, 48)'], {}), '(spike, (48, 48))\n', (1494, 1511), False, 'import pygame\n'), ((1023, 1062), 'pygame.transform.scale', 'pygame.transform.scale', (['image', '(48, 48)'], {}), '(image, (48, 48))\n', (1045, 1062), False, 'import pygame\n'), ((1323, 1362), 'pygame.transform.scale', 'pygame.transform.scale', (['image', '(48, 48)'], {}), '(image, (48, 48))\n', (1345, 1362), False, 'import pygame\n'), ((1539, 1550), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1547, 1550), False, 'import sys\n'), ((2185, 2207), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (2205, 2207), False, 'import pygame\n'), ((2221, 2247), 'pygame.mouse.get_pressed', 'pygame.mouse.get_pressed', ([], {}), '()\n', (2245, 2247), False, 'import pygame\n'), ((2517, 2547), 'pygame.font.SysFont', 'pygame.font.SysFont', (['FONT2', '(20)'], {}), '(FONT2, 20)\n', (2536, 2547), False, 'import pygame\n'), ((9582, 9629), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['generation', 'fitness']"}), "(columns=['generation', 'fitness'])\n", (9594, 9629), True, 'import pandas as pd\n'), ((11937, 11958), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (11956, 11958), False, 'import pygame\n'), ((11976, 11997), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (11995, 11997), False, 'import pygame\n'), ((12013, 12034), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (12032, 12034), False, 'import pygame\n'), ((12057, 12078), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (12076, 12078), False, 'import pygame\n'), ((12091, 12112), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (12110, 12112), False, 'import pygame\n'), ((15049, 15086), 'numpy.zeros', 'np.zeros', (['(level_width, level_height)'], {}), '((level_width, level_height))\n', (15057, 15086), True, 'import numpy as np\n'), ((707, 715), 'time.gmtime', 'gmtime', ([], {}), '()\n', (713, 715), False, 'from time import gmtime, strftime\n'), ((738, 785), 'pygame.image.load', 'pygame.image.load', (['"""assets/background_menu.png"""'], {}), "('assets/background_menu.png')\n", (755, 785), False, 'import pygame\n'), ((819, 874), 'pygame.image.load', 'pygame.image.load', (['"""assets/background_and_tutorial.png"""'], {}), "('assets/background_and_tutorial.png')\n", (836, 874), False, 'import pygame\n'), ((1103, 1136), 'pygame.image.load', 'pygame.image.load', (['"""assets/S.png"""'], {}), "('assets/S.png')\n", (1120, 1136), False, 'import pygame\n'), ((1409, 1446), 'pygame.image.load', 'pygame.image.load', (['"""assets/spike.png"""'], {}), "('assets/spike.png')\n", (1426, 1446), False, 'import pygame\n'), ((1599, 1625), 'pygame.event.Event', 'pygame.event.Event', (['resume'], {}), '(resume)\n', (1617, 1625), False, 'import pygame\n'), ((1682, 1715), 'pygame.event.Event', 'pygame.event.Event', (['restart_level'], {}), '(restart_level)\n', (1700, 1715), False, 'import pygame\n'), ((1771, 1803), 'pygame.event.Event', 'pygame.event.Event', (['back_to_menu'], {}), '(back_to_menu)\n', (1789, 1803), False, 'import pygame\n'), ((1852, 1877), 'pygame.event.Event', 'pygame.event.Event', (['pause'], {}), '(pause)\n', (1870, 1877), False, 'import pygame\n'), ((1931, 1961), 'pygame.event.Event', 'pygame.event.Event', (['next_level'], {}), '(next_level)\n', (1949, 1961), False, 'import pygame\n'), ((2318, 2360), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'ac', '(x, y, w, h)'], {}), '(screen, ac, (x, y, w, h))\n', (2334, 2360), False, 'import pygame\n'), ((2455, 2497), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'ic', '(x, y, w, h)'], {}), '(screen, ic, (x, y, w, h))\n', (2471, 2497), False, 'import pygame\n'), ((2767, 2785), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2783, 2785), False, 'import pygame\n'), ((3162, 3185), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (3183, 3185), False, 'import pygame\n'), ((3332, 3350), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (3348, 3350), False, 'import pygame\n'), ((3625, 3655), 'pygame.font.SysFont', 'pygame.font.SysFont', (['FONT', '(115)'], {}), '(FONT, 115)\n', (3644, 3655), False, 'import pygame\n'), ((4028, 4051), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (4049, 4051), False, 'import pygame\n'), ((4166, 4184), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (4182, 4184), False, 'import pygame\n'), ((5104, 5134), 'pygame.font.SysFont', 'pygame.font.SysFont', (['FONT', '(115)'], {}), '(FONT, 115)\n', (5123, 5134), False, 'import pygame\n'), ((5514, 5537), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (5535, 5537), False, 'import pygame\n'), ((5649, 5667), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (5665, 5667), False, 'import pygame\n'), ((6057, 6087), 'pygame.font.SysFont', 'pygame.font.SysFont', (['FONT', '(115)'], {}), '(FONT, 115)\n', (6076, 6087), False, 'import pygame\n'), ((6482, 6512), 'pygame.font.SysFont', 'pygame.font.SysFont', (['FONT2', '(40)'], {}), '(FONT2, 40)\n', (6501, 6512), False, 'import pygame\n'), ((6710, 6733), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (6731, 6733), False, 'import pygame\n'), ((6840, 6858), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (6856, 6858), False, 'import pygame\n'), ((7083, 7113), 'pygame.font.SysFont', 'pygame.font.SysFont', (['FONT', '(115)'], {}), '(FONT, 115)\n', (7102, 7113), False, 'import pygame\n'), ((7578, 7608), 'pygame.font.SysFont', 'pygame.font.SysFont', (['FONT2', '(20)'], {}), '(FONT2, 20)\n', (7597, 7608), False, 'import pygame\n'), ((8047, 8070), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (8068, 8070), False, 'import pygame\n'), ((8178, 8207), 'pygame.font.SysFont', 'pygame.font.SysFont', (['FONT', '(20)'], {}), '(FONT, 20)\n', (8197, 8207), False, 'import pygame\n'), ((15276, 15294), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (15292, 15294), False, 'import pygame\n'), ((18344, 18387), 'numpy.reshape', 'np.reshape', (['NNinput', '(NNinput.shape[0], -1)'], {}), '(NNinput, (NNinput.shape[0], -1))\n', (18354, 18387), True, 'import numpy as np\n'), ((20222, 20243), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (20241, 20243), False, 'import pygame\n'), ((4849, 4879), 'pygame.font.SysFont', 'pygame.font.SysFont', (['FONT2', '(40)'], {}), '(FONT2, 40)\n', (4868, 4879), False, 'import pygame\n'), ((19353, 19382), 'pygame.font.SysFont', 'pygame.font.SysFont', (['FONT', '(40)'], {}), '(FONT, 40)\n', (19372, 19382), False, 'import pygame\n'), ((21017, 21030), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (21028, 21030), False, 'import pygame\n'), ((21044, 21054), 'sys.exit', 'sys.exit', ([], {}), '()\n', (21052, 21054), False, 'import sys\n'), ((2875, 2888), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (2886, 2888), False, 'import pygame\n'), ((3440, 3453), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (3451, 3453), False, 'import pygame\n'), ((4274, 4287), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (4285, 4287), False, 'import pygame\n'), ((5757, 5770), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (5768, 5770), False, 'import pygame\n'), ((6948, 6961), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (6959, 6961), False, 'import pygame\n'), ((13598, 13632), 'pygame.transform.rotate', 'pygame.transform.rotate', (['spike', '(90)'], {}), '(spike, 90)\n', (13621, 13632), False, 'import pygame\n'), ((13767, 13802), 'pygame.transform.rotate', 'pygame.transform.rotate', (['spike', '(180)'], {}), '(spike, 180)\n', (13790, 13802), False, 'import pygame\n'), ((13937, 13972), 'pygame.transform.rotate', 'pygame.transform.rotate', (['spike', '(-90)'], {}), '(spike, -90)\n', (13960, 13972), False, 'import pygame\n'), ((19092, 19121), 'pygame.font.SysFont', 'pygame.font.SysFont', (['FONT', '(40)'], {}), '(FONT, 40)\n', (19111, 19121), False, 'import pygame\n')] |
def mpl_plot_graph(ax,G,vertex_options={},edge_options={},dims=[0,1],directed=False):
"""Plots a graph G=(V,E) using matplotlib.
ax is a matplotlib Axes object.
If states have more than 2 dimensions, you can control the x-y axes
using the dims argument.
"""
import numpy as np
V,E = G
if len(V)==0:
return
X = [v[dims[0]] for v in V]
Y = [v[dims[1]] for v in V]
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
lines = []
for e in E:
x1,y1 = X[e[0]],Y[e[0]]
x2,y2 = X[e[1]],Y[e[1]]
lines.append(np.array([[x1,y1],[x2,y2]],dtype=float))
#convert normal edge options to collection options
collection_options = {}
for k,opt in edge_options.items():
if not k.endswith('s') and k not in ['alpha']:
collection_options[k+'s'] = np.asarray([opt]*len(lines))
linecoll = LineCollection(lines,zorder=2,**collection_options)
ax.add_collection(linecoll)
ax.scatter(X,Y,zorder=3,**vertex_options)
| [
"matplotlib.collections.LineCollection",
"numpy.array"
] | [((920, 973), 'matplotlib.collections.LineCollection', 'LineCollection', (['lines'], {'zorder': '(2)'}), '(lines, zorder=2, **collection_options)\n', (934, 973), False, 'from matplotlib.collections import LineCollection\n'), ((618, 661), 'numpy.array', 'np.array', (['[[x1, y1], [x2, y2]]'], {'dtype': 'float'}), '([[x1, y1], [x2, y2]], dtype=float)\n', (626, 661), True, 'import numpy as np\n')] |
"""
Dubins Path
"""
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as Rot
import CurvesGenerator.draw as draw
# class for PATH element
class PATH:
def __init__(self, L, mode, x, y, yaw):
self.L = L # total path length [float]
self.mode = mode # type of each part of the path [string]
self.x = x # final s positions [m]
self.y = y # final y positions [m]
self.yaw = yaw # final yaw angles [rad]
# _utils
def pi_2_pi(theta):
while theta > math.pi:
theta -= 2.0 * math.pi
while theta < -math.pi:
theta += 2.0 * math.pi
return theta
def mod2pi(theta):
return theta - 2.0 * math.pi * math.floor(theta / math.pi / 2.0)
def LSL(alpha, beta, dist):
sin_a = math.sin(alpha)
sin_b = math.sin(beta)
cos_a = math.cos(alpha)
cos_b = math.cos(beta)
cos_a_b = math.cos(alpha - beta)
p_lsl = 2 + dist ** 2 - 2 * cos_a_b + 2 * dist * (sin_a - sin_b)
if p_lsl < 0:
return None, None, None, ["L", "S", "L"]
else:
p_lsl = math.sqrt(p_lsl)
denominate = dist + sin_a - sin_b
t_lsl = mod2pi(-alpha + math.atan2(cos_b - cos_a, denominate))
q_lsl = mod2pi(beta - math.atan2(cos_b - cos_a, denominate))
return t_lsl, p_lsl, q_lsl, ["L", "S", "L"]
def RSR(alpha, beta, dist):
sin_a = math.sin(alpha)
sin_b = math.sin(beta)
cos_a = math.cos(alpha)
cos_b = math.cos(beta)
cos_a_b = math.cos(alpha - beta)
p_rsr = 2 + dist ** 2 - 2 * cos_a_b + 2 * dist * (sin_b - sin_a)
if p_rsr < 0:
return None, None, None, ["R", "S", "R"]
else:
p_rsr = math.sqrt(p_rsr)
denominate = dist - sin_a + sin_b
t_rsr = mod2pi(alpha - math.atan2(cos_a - cos_b, denominate))
q_rsr = mod2pi(-beta + math.atan2(cos_a - cos_b, denominate))
return t_rsr, p_rsr, q_rsr, ["R", "S", "R"]
def LSR(alpha, beta, dist):
sin_a = math.sin(alpha)
sin_b = math.sin(beta)
cos_a = math.cos(alpha)
cos_b = math.cos(beta)
cos_a_b = math.cos(alpha - beta)
p_lsr = -2 + dist ** 2 + 2 * cos_a_b + 2 * dist * (sin_a + sin_b)
if p_lsr < 0:
return None, None, None, ["L", "S", "R"]
else:
p_lsr = math.sqrt(p_lsr)
rec = math.atan2(-cos_a - cos_b, dist + sin_a + sin_b) - math.atan2(-2.0, p_lsr)
t_lsr = mod2pi(-alpha + rec)
q_lsr = mod2pi(-mod2pi(beta) + rec)
return t_lsr, p_lsr, q_lsr, ["L", "S", "R"]
def RSL(alpha, beta, dist):
sin_a = math.sin(alpha)
sin_b = math.sin(beta)
cos_a = math.cos(alpha)
cos_b = math.cos(beta)
cos_a_b = math.cos(alpha - beta)
p_rsl = -2 + dist ** 2 + 2 * cos_a_b - 2 * dist * (sin_a + sin_b)
if p_rsl < 0:
return None, None, None, ["R", "S", "L"]
else:
p_rsl = math.sqrt(p_rsl)
rec = math.atan2(cos_a + cos_b, dist - sin_a - sin_b) - math.atan2(2.0, p_rsl)
t_rsl = mod2pi(alpha - rec)
q_rsl = mod2pi(beta - rec)
return t_rsl, p_rsl, q_rsl, ["R", "S", "L"]
def RLR(alpha, beta, dist):
sin_a = math.sin(alpha)
sin_b = math.sin(beta)
cos_a = math.cos(alpha)
cos_b = math.cos(beta)
cos_a_b = math.cos(alpha - beta)
rec = (6.0 - dist ** 2 + 2.0 * cos_a_b + 2.0 * dist * (sin_a - sin_b)) / 8.0
if abs(rec) > 1.0:
return None, None, None, ["R", "L", "R"]
p_rlr = mod2pi(2 * math.pi - math.acos(rec))
t_rlr = mod2pi(alpha - math.atan2(cos_a - cos_b, dist - sin_a + sin_b) + mod2pi(p_rlr / 2.0))
q_rlr = mod2pi(alpha - beta - t_rlr + mod2pi(p_rlr))
return t_rlr, p_rlr, q_rlr, ["R", "L", "R"]
def LRL(alpha, beta, dist):
sin_a = math.sin(alpha)
sin_b = math.sin(beta)
cos_a = math.cos(alpha)
cos_b = math.cos(beta)
cos_a_b = math.cos(alpha - beta)
rec = (6.0 - dist ** 2 + 2.0 * cos_a_b + 2.0 * dist * (sin_b - sin_a)) / 8.0
if abs(rec) > 1.0:
return None, None, None, ["L", "R", "L"]
p_lrl = mod2pi(2 * math.pi - math.acos(rec))
t_lrl = mod2pi(-alpha - math.atan2(cos_a - cos_b, dist + sin_a - sin_b) + p_lrl / 2.0)
q_lrl = mod2pi(mod2pi(beta) - alpha - t_lrl + mod2pi(p_lrl))
return t_lrl, p_lrl, q_lrl, ["L", "R", "L"]
def interpolate(ind, l, m, maxc, ox, oy, oyaw, px, py, pyaw, directions):
if m == "S":
px[ind] = ox + l / maxc * math.cos(oyaw)
py[ind] = oy + l / maxc * math.sin(oyaw)
pyaw[ind] = oyaw
else:
ldx = math.sin(l) / maxc
if m == "L":
ldy = (1.0 - math.cos(l)) / maxc
elif m == "R":
ldy = (1.0 - math.cos(l)) / (-maxc)
gdx = math.cos(-oyaw) * ldx + math.sin(-oyaw) * ldy
gdy = -math.sin(-oyaw) * ldx + math.cos(-oyaw) * ldy
px[ind] = ox + gdx
py[ind] = oy + gdy
if m == "L":
pyaw[ind] = oyaw + l
elif m == "R":
pyaw[ind] = oyaw - l
if l > 0.0:
directions[ind] = 1
else:
directions[ind] = -1
return px, py, pyaw, directions
def generate_local_course(L, lengths, mode, maxc, step_size):
point_num = int(L / step_size) + len(lengths) + 3
px = [0.0 for _ in range(point_num)]
py = [0.0 for _ in range(point_num)]
pyaw = [0.0 for _ in range(point_num)]
directions = [0 for _ in range(point_num)]
ind = 1
if lengths[0] > 0.0:
directions[0] = 1
else:
directions[0] = -1
if lengths[0] > 0.0:
d = step_size
else:
d = -step_size
ll = 0.0
for m, l, i in zip(mode, lengths, range(len(mode))):
if l > 0.0:
d = step_size
else:
d = -step_size
ox, oy, oyaw = px[ind], py[ind], pyaw[ind]
ind -= 1
if i >= 1 and (lengths[i - 1] * lengths[i]) > 0:
pd = -d - ll
else:
pd = d - ll
while abs(pd) <= abs(l):
ind += 1
px, py, pyaw, directions = \
interpolate(ind, pd, m, maxc, ox, oy, oyaw, px, py, pyaw, directions)
pd += d
ll = l - pd - d # calc remain length
ind += 1
px, py, pyaw, directions = \
interpolate(ind, l, m, maxc, ox, oy, oyaw, px, py, pyaw, directions)
if len(px) <= 1:
return [], [], [], []
# remove unused data
while len(px) >= 1 and px[-1] == 0.0:
px.pop()
py.pop()
pyaw.pop()
directions.pop()
return px, py, pyaw, directions
def planning_from_origin(gx, gy, gyaw, curv, step_size):
D = math.hypot(gx, gy)
d = D * curv
theta = mod2pi(math.atan2(gy, gx))
alpha = mod2pi(-theta)
beta = mod2pi(gyaw - theta)
planners = [LSL, RSR, LSR, RSL, RLR, LRL]
best_cost = float("inf")
bt, bp, bq, best_mode = None, None, None, None
for planner in planners:
t, p, q, mode = planner(alpha, beta, d)
if t is None:
continue
cost = (abs(t) + abs(p) + abs(q))
if best_cost > cost:
bt, bp, bq, best_mode = t, p, q, mode
best_cost = cost
lengths = [bt, bp, bq]
x_list, y_list, yaw_list, directions = generate_local_course(
sum(lengths), lengths, best_mode, curv, step_size)
return x_list, y_list, yaw_list, best_mode, best_cost
def calc_dubins_path(sx, sy, syaw, gx, gy, gyaw, curv, step_size=0.1):
gx = gx - sx
gy = gy - sy
l_rot = Rot.from_euler('z', syaw).as_dcm()[0:2, 0:2]
le_xy = np.stack([gx, gy]).T @ l_rot
le_yaw = gyaw - syaw
lp_x, lp_y, lp_yaw, mode, lengths = planning_from_origin(
le_xy[0], le_xy[1], le_yaw, curv, step_size)
rot = Rot.from_euler('z', -syaw).as_dcm()[0:2, 0:2]
converted_xy = np.stack([lp_x, lp_y]).T @ rot
x_list = converted_xy[:, 0] + sx
y_list = converted_xy[:, 1] + sy
yaw_list = [pi_2_pi(i_yaw + syaw) for i_yaw in lp_yaw]
return PATH(lengths, mode, x_list, y_list, yaw_list)
def main():
# choose states pairs: (s, y, yaw)
# simulation-1
states = [(0, 0, 0), (10, 10, -90), (20, 5, 60), (30, 10, 120),
(35, -5, 30), (25, -10, -120), (15, -15, 100), (0, -10, -90)]
# simulation-2
# states = [(-3, 3, 120), (10, -7, 30), (10, 13, 30), (20, 5, -25),
# (35, 10, 180), (32, -10, 180), (5, -12, 90)]
max_c = 0.25 # max curvature
path_x, path_y, yaw = [], [], []
for i in range(len(states) - 1):
s_x = states[i][0]
s_y = states[i][1]
s_yaw = np.deg2rad(states[i][2])
g_x = states[i + 1][0]
g_y = states[i + 1][1]
g_yaw = np.deg2rad(states[i + 1][2])
path_i = calc_dubins_path(s_x, s_y, s_yaw, g_x, g_y, g_yaw, max_c)
for x, y, iyaw in zip(path_i.x, path_i.y, path_i.yaw):
path_x.append(x)
path_y.append(y)
yaw.append(iyaw)
# animation
plt.ion()
plt.figure(1)
for i in range(len(path_x)):
plt.clf()
plt.plot(path_x, path_y, linewidth=1, color='gray')
for x, y, theta in states:
draw.Arrow(x, y, np.deg2rad(theta), 2, 'blueviolet')
draw.Car(path_x[i], path_y[i], yaw[i], 1.5, 3)
plt.axis("equal")
plt.title("Simulation of Dubins Path")
plt.axis([-10, 42, -20, 20])
plt.draw()
plt.pause(0.001)
plt.pause(1)
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.title",
"math.hypot",
"math.atan2",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.draw",
"math.cos",
"matplotlib.pyplot.pause",
"numpy.stack",
"math.sqrt",
"math.sin",
"matplotlib.pyplot.ion",
"CurvesGenerator.draw.Car",
"matplotlib.pyplot.plot"... | [((806, 821), 'math.sin', 'math.sin', (['alpha'], {}), '(alpha)\n', (814, 821), False, 'import math\n'), ((834, 848), 'math.sin', 'math.sin', (['beta'], {}), '(beta)\n', (842, 848), False, 'import math\n'), ((861, 876), 'math.cos', 'math.cos', (['alpha'], {}), '(alpha)\n', (869, 876), False, 'import math\n'), ((889, 903), 'math.cos', 'math.cos', (['beta'], {}), '(beta)\n', (897, 903), False, 'import math\n'), ((918, 940), 'math.cos', 'math.cos', (['(alpha - beta)'], {}), '(alpha - beta)\n', (926, 940), False, 'import math\n'), ((1384, 1399), 'math.sin', 'math.sin', (['alpha'], {}), '(alpha)\n', (1392, 1399), False, 'import math\n'), ((1412, 1426), 'math.sin', 'math.sin', (['beta'], {}), '(beta)\n', (1420, 1426), False, 'import math\n'), ((1439, 1454), 'math.cos', 'math.cos', (['alpha'], {}), '(alpha)\n', (1447, 1454), False, 'import math\n'), ((1467, 1481), 'math.cos', 'math.cos', (['beta'], {}), '(beta)\n', (1475, 1481), False, 'import math\n'), ((1496, 1518), 'math.cos', 'math.cos', (['(alpha - beta)'], {}), '(alpha - beta)\n', (1504, 1518), False, 'import math\n'), ((1962, 1977), 'math.sin', 'math.sin', (['alpha'], {}), '(alpha)\n', (1970, 1977), False, 'import math\n'), ((1990, 2004), 'math.sin', 'math.sin', (['beta'], {}), '(beta)\n', (1998, 2004), False, 'import math\n'), ((2017, 2032), 'math.cos', 'math.cos', (['alpha'], {}), '(alpha)\n', (2025, 2032), False, 'import math\n'), ((2045, 2059), 'math.cos', 'math.cos', (['beta'], {}), '(beta)\n', (2053, 2059), False, 'import math\n'), ((2074, 2096), 'math.cos', 'math.cos', (['(alpha - beta)'], {}), '(alpha - beta)\n', (2082, 2096), False, 'import math\n'), ((2529, 2544), 'math.sin', 'math.sin', (['alpha'], {}), '(alpha)\n', (2537, 2544), False, 'import math\n'), ((2557, 2571), 'math.sin', 'math.sin', (['beta'], {}), '(beta)\n', (2565, 2571), False, 'import math\n'), ((2584, 2599), 'math.cos', 'math.cos', (['alpha'], {}), '(alpha)\n', (2592, 2599), False, 'import math\n'), ((2612, 2626), 'math.cos', 'math.cos', (['beta'], {}), '(beta)\n', (2620, 2626), False, 'import math\n'), ((2641, 2663), 'math.cos', 'math.cos', (['(alpha - beta)'], {}), '(alpha - beta)\n', (2649, 2663), False, 'import math\n'), ((3084, 3099), 'math.sin', 'math.sin', (['alpha'], {}), '(alpha)\n', (3092, 3099), False, 'import math\n'), ((3112, 3126), 'math.sin', 'math.sin', (['beta'], {}), '(beta)\n', (3120, 3126), False, 'import math\n'), ((3139, 3154), 'math.cos', 'math.cos', (['alpha'], {}), '(alpha)\n', (3147, 3154), False, 'import math\n'), ((3167, 3181), 'math.cos', 'math.cos', (['beta'], {}), '(beta)\n', (3175, 3181), False, 'import math\n'), ((3196, 3218), 'math.cos', 'math.cos', (['(alpha - beta)'], {}), '(alpha - beta)\n', (3204, 3218), False, 'import math\n'), ((3670, 3685), 'math.sin', 'math.sin', (['alpha'], {}), '(alpha)\n', (3678, 3685), False, 'import math\n'), ((3698, 3712), 'math.sin', 'math.sin', (['beta'], {}), '(beta)\n', (3706, 3712), False, 'import math\n'), ((3725, 3740), 'math.cos', 'math.cos', (['alpha'], {}), '(alpha)\n', (3733, 3740), False, 'import math\n'), ((3753, 3767), 'math.cos', 'math.cos', (['beta'], {}), '(beta)\n', (3761, 3767), False, 'import math\n'), ((3782, 3804), 'math.cos', 'math.cos', (['(alpha - beta)'], {}), '(alpha - beta)\n', (3790, 3804), False, 'import math\n'), ((6512, 6530), 'math.hypot', 'math.hypot', (['gx', 'gy'], {}), '(gx, gy)\n', (6522, 6530), False, 'import math\n'), ((8834, 8843), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (8841, 8843), True, 'import matplotlib.pyplot as plt\n'), ((8848, 8861), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (8858, 8861), True, 'import matplotlib.pyplot as plt\n'), ((9291, 9303), 'matplotlib.pyplot.pause', 'plt.pause', (['(1)'], {}), '(1)\n', (9300, 9303), True, 'import matplotlib.pyplot as plt\n'), ((1105, 1121), 'math.sqrt', 'math.sqrt', (['p_lsl'], {}), '(p_lsl)\n', (1114, 1121), False, 'import math\n'), ((1683, 1699), 'math.sqrt', 'math.sqrt', (['p_rsr'], {}), '(p_rsr)\n', (1692, 1699), False, 'import math\n'), ((2262, 2278), 'math.sqrt', 'math.sqrt', (['p_lsr'], {}), '(p_lsr)\n', (2271, 2278), False, 'import math\n'), ((2290, 2338), 'math.atan2', 'math.atan2', (['(-cos_a - cos_b)', '(dist + sin_a + sin_b)'], {}), '(-cos_a - cos_b, dist + sin_a + sin_b)\n', (2300, 2338), False, 'import math\n'), ((2341, 2364), 'math.atan2', 'math.atan2', (['(-2.0)', 'p_lsr'], {}), '(-2.0, p_lsr)\n', (2351, 2364), False, 'import math\n'), ((2829, 2845), 'math.sqrt', 'math.sqrt', (['p_rsl'], {}), '(p_rsl)\n', (2838, 2845), False, 'import math\n'), ((2857, 2904), 'math.atan2', 'math.atan2', (['(cos_a + cos_b)', '(dist - sin_a - sin_b)'], {}), '(cos_a + cos_b, dist - sin_a - sin_b)\n', (2867, 2904), False, 'import math\n'), ((2907, 2929), 'math.atan2', 'math.atan2', (['(2.0)', 'p_rsl'], {}), '(2.0, p_rsl)\n', (2917, 2929), False, 'import math\n'), ((6568, 6586), 'math.atan2', 'math.atan2', (['gy', 'gx'], {}), '(gy, gx)\n', (6578, 6586), False, 'import math\n'), ((8454, 8478), 'numpy.deg2rad', 'np.deg2rad', (['states[i][2]'], {}), '(states[i][2])\n', (8464, 8478), True, 'import numpy as np\n'), ((8557, 8585), 'numpy.deg2rad', 'np.deg2rad', (['states[i + 1][2]'], {}), '(states[i + 1][2])\n', (8567, 8585), True, 'import numpy as np\n'), ((8904, 8913), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8911, 8913), True, 'import matplotlib.pyplot as plt\n'), ((8922, 8973), 'matplotlib.pyplot.plot', 'plt.plot', (['path_x', 'path_y'], {'linewidth': '(1)', 'color': '"""gray"""'}), "(path_x, path_y, linewidth=1, color='gray')\n", (8930, 8973), True, 'import matplotlib.pyplot as plt\n'), ((9084, 9130), 'CurvesGenerator.draw.Car', 'draw.Car', (['path_x[i]', 'path_y[i]', 'yaw[i]', '(1.5)', '(3)'], {}), '(path_x[i], path_y[i], yaw[i], 1.5, 3)\n', (9092, 9130), True, 'import CurvesGenerator.draw as draw\n'), ((9140, 9157), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (9148, 9157), True, 'import matplotlib.pyplot as plt\n'), ((9166, 9204), 'matplotlib.pyplot.title', 'plt.title', (['"""Simulation of Dubins Path"""'], {}), "('Simulation of Dubins Path')\n", (9175, 9204), True, 'import matplotlib.pyplot as plt\n'), ((9213, 9241), 'matplotlib.pyplot.axis', 'plt.axis', (['[-10, 42, -20, 20]'], {}), '([-10, 42, -20, 20])\n', (9221, 9241), True, 'import matplotlib.pyplot as plt\n'), ((9250, 9260), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (9258, 9260), True, 'import matplotlib.pyplot as plt\n'), ((9269, 9285), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (9278, 9285), True, 'import matplotlib.pyplot as plt\n'), ((730, 763), 'math.floor', 'math.floor', (['(theta / math.pi / 2.0)'], {}), '(theta / math.pi / 2.0)\n', (740, 763), False, 'import math\n'), ((1189, 1226), 'math.atan2', 'math.atan2', (['(cos_b - cos_a)', 'denominate'], {}), '(cos_b - cos_a, denominate)\n', (1199, 1226), False, 'import math\n'), ((1254, 1291), 'math.atan2', 'math.atan2', (['(cos_b - cos_a)', 'denominate'], {}), '(cos_b - cos_a, denominate)\n', (1264, 1291), False, 'import math\n'), ((1766, 1803), 'math.atan2', 'math.atan2', (['(cos_a - cos_b)', 'denominate'], {}), '(cos_a - cos_b, denominate)\n', (1776, 1803), False, 'import math\n'), ((1832, 1869), 'math.atan2', 'math.atan2', (['(cos_a - cos_b)', 'denominate'], {}), '(cos_a - cos_b, denominate)\n', (1842, 1869), False, 'import math\n'), ((3408, 3422), 'math.acos', 'math.acos', (['rec'], {}), '(rec)\n', (3417, 3422), False, 'import math\n'), ((3994, 4008), 'math.acos', 'math.acos', (['rec'], {}), '(rec)\n', (4003, 4008), False, 'import math\n'), ((4455, 4466), 'math.sin', 'math.sin', (['l'], {}), '(l)\n', (4463, 4466), False, 'import math\n'), ((7437, 7455), 'numpy.stack', 'np.stack', (['[gx, gy]'], {}), '([gx, gy])\n', (7445, 7455), True, 'import numpy as np\n'), ((7683, 7705), 'numpy.stack', 'np.stack', (['[lp_x, lp_y]'], {}), '([lp_x, lp_y])\n', (7691, 7705), True, 'import numpy as np\n'), ((3451, 3498), 'math.atan2', 'math.atan2', (['(cos_a - cos_b)', '(dist - sin_a + sin_b)'], {}), '(cos_a - cos_b, dist - sin_a + sin_b)\n', (3461, 3498), False, 'import math\n'), ((4038, 4085), 'math.atan2', 'math.atan2', (['(cos_a - cos_b)', '(dist + sin_a - sin_b)'], {}), '(cos_a - cos_b, dist + sin_a - sin_b)\n', (4048, 4085), False, 'import math\n'), ((4342, 4356), 'math.cos', 'math.cos', (['oyaw'], {}), '(oyaw)\n', (4350, 4356), False, 'import math\n'), ((4391, 4405), 'math.sin', 'math.sin', (['oyaw'], {}), '(oyaw)\n', (4399, 4405), False, 'import math\n'), ((4626, 4641), 'math.cos', 'math.cos', (['(-oyaw)'], {}), '(-oyaw)\n', (4634, 4641), False, 'import math\n'), ((4650, 4665), 'math.sin', 'math.sin', (['(-oyaw)'], {}), '(-oyaw)\n', (4658, 4665), False, 'import math\n'), ((4711, 4726), 'math.cos', 'math.cos', (['(-oyaw)'], {}), '(-oyaw)\n', (4719, 4726), False, 'import math\n'), ((7380, 7405), 'scipy.spatial.transform.Rotation.from_euler', 'Rot.from_euler', (['"""z"""', 'syaw'], {}), "('z', syaw)\n", (7394, 7405), True, 'from scipy.spatial.transform import Rotation as Rot\n'), ((7618, 7644), 'scipy.spatial.transform.Rotation.from_euler', 'Rot.from_euler', (['"""z"""', '(-syaw)'], {}), "('z', -syaw)\n", (7632, 7644), True, 'from scipy.spatial.transform import Rotation as Rot\n'), ((9039, 9056), 'numpy.deg2rad', 'np.deg2rad', (['theta'], {}), '(theta)\n', (9049, 9056), True, 'import numpy as np\n'), ((4520, 4531), 'math.cos', 'math.cos', (['l'], {}), '(l)\n', (4528, 4531), False, 'import math\n'), ((4687, 4702), 'math.sin', 'math.sin', (['(-oyaw)'], {}), '(-oyaw)\n', (4695, 4702), False, 'import math\n'), ((4588, 4599), 'math.cos', 'math.cos', (['l'], {}), '(l)\n', (4596, 4599), False, 'import math\n')] |
from rockyraccoon.model.wave_fn import WaveFunction
from rockyraccoon.model.core import RaccoonWrapper
from rockyraccoon.utils.plot import plot_qml_landscape_multiclass
import matplotlib.pyplot as plt
import numpy as np
def multiclass_wave_fn():
"""
Test the wave function QML model for a simple data set with three classes.
"""
# import tensorflow as tf
# tf.enable_eager_execution()
model = WaveFunction(nclasses=3, device="default.qubit")
wrapper = RaccoonWrapper(model)
number_of_copies = 3
# PERFECT PROBLEM
X_1 = np.tile([1, 1], (number_of_copies, 1))
X_2 = np.tile([-1, -1], (number_of_copies, 1))
X_3 = np.tile([-1, 1], (number_of_copies, 1))
X_4 = np.tile([1, -1], (number_of_copies, 1))
Y_1 = np.tile([0], (number_of_copies, 1))
Y_2 = np.tile([1], (number_of_copies, 1))
Y_3 = np.tile([2], (number_of_copies, 1))
Y_4 = np.tile([2], (number_of_copies, 1))
X = np.vstack((X_1, X_2, X_3, X_4))
y = np.vstack((Y_1, Y_2, Y_3, Y_4)).flatten()
wrapper.train(X, y, maxiter=100, epsilon=0.001, tol=1e-6)
plot_qml_landscape_multiclass(X, y, wrapper, [1, 3])
plt.plot(wrapper.lh)
plt.show()
if __name__ == "__main__":
multiclass_wave_fn()
| [
"rockyraccoon.model.wave_fn.WaveFunction",
"rockyraccoon.model.core.RaccoonWrapper",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"rockyraccoon.utils.plot.plot_qml_landscape_multiclass",
"numpy.tile",
"numpy.vstack"
] | [((421, 469), 'rockyraccoon.model.wave_fn.WaveFunction', 'WaveFunction', ([], {'nclasses': '(3)', 'device': '"""default.qubit"""'}), "(nclasses=3, device='default.qubit')\n", (433, 469), False, 'from rockyraccoon.model.wave_fn import WaveFunction\n'), ((484, 505), 'rockyraccoon.model.core.RaccoonWrapper', 'RaccoonWrapper', (['model'], {}), '(model)\n', (498, 505), False, 'from rockyraccoon.model.core import RaccoonWrapper\n'), ((564, 602), 'numpy.tile', 'np.tile', (['[1, 1]', '(number_of_copies, 1)'], {}), '([1, 1], (number_of_copies, 1))\n', (571, 602), True, 'import numpy as np\n'), ((613, 653), 'numpy.tile', 'np.tile', (['[-1, -1]', '(number_of_copies, 1)'], {}), '([-1, -1], (number_of_copies, 1))\n', (620, 653), True, 'import numpy as np\n'), ((664, 703), 'numpy.tile', 'np.tile', (['[-1, 1]', '(number_of_copies, 1)'], {}), '([-1, 1], (number_of_copies, 1))\n', (671, 703), True, 'import numpy as np\n'), ((714, 753), 'numpy.tile', 'np.tile', (['[1, -1]', '(number_of_copies, 1)'], {}), '([1, -1], (number_of_copies, 1))\n', (721, 753), True, 'import numpy as np\n'), ((765, 800), 'numpy.tile', 'np.tile', (['[0]', '(number_of_copies, 1)'], {}), '([0], (number_of_copies, 1))\n', (772, 800), True, 'import numpy as np\n'), ((811, 846), 'numpy.tile', 'np.tile', (['[1]', '(number_of_copies, 1)'], {}), '([1], (number_of_copies, 1))\n', (818, 846), True, 'import numpy as np\n'), ((857, 892), 'numpy.tile', 'np.tile', (['[2]', '(number_of_copies, 1)'], {}), '([2], (number_of_copies, 1))\n', (864, 892), True, 'import numpy as np\n'), ((903, 938), 'numpy.tile', 'np.tile', (['[2]', '(number_of_copies, 1)'], {}), '([2], (number_of_copies, 1))\n', (910, 938), True, 'import numpy as np\n'), ((948, 979), 'numpy.vstack', 'np.vstack', (['(X_1, X_2, X_3, X_4)'], {}), '((X_1, X_2, X_3, X_4))\n', (957, 979), True, 'import numpy as np\n'), ((1097, 1149), 'rockyraccoon.utils.plot.plot_qml_landscape_multiclass', 'plot_qml_landscape_multiclass', (['X', 'y', 'wrapper', '[1, 3]'], {}), '(X, y, wrapper, [1, 3])\n', (1126, 1149), False, 'from rockyraccoon.utils.plot import plot_qml_landscape_multiclass\n'), ((1154, 1174), 'matplotlib.pyplot.plot', 'plt.plot', (['wrapper.lh'], {}), '(wrapper.lh)\n', (1162, 1174), True, 'import matplotlib.pyplot as plt\n'), ((1179, 1189), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1187, 1189), True, 'import matplotlib.pyplot as plt\n'), ((988, 1019), 'numpy.vstack', 'np.vstack', (['(Y_1, Y_2, Y_3, Y_4)'], {}), '((Y_1, Y_2, Y_3, Y_4))\n', (997, 1019), True, 'import numpy as np\n')] |
"""
To Do:
-Add an optional input for the networks so they can be defined in a main run script.
-Test
-Combine Training Operation
"""
from .method import Method
from .buffer import Trajectory
from .AdvantageEstimator import gae
import tensorflow as tf
import numpy as np
from utils.utils import MovingAverage
from utils.record import Record
class AE(Method):
def __init__(self,sharedModel,sess,stateShape,actionSize,scope,HPs,globalAC=None,nTrajs=1):
"""
Initializes I/O placeholders used for Tensorflow session runs.
Initializes and Actor and Critic Network to be used for the purpose of RL.
"""
#Placeholders
self.actionSize =actionSize
self.HPs = HPs
self.sess=sess
self.scope=scope
self.Model = sharedModel
self.s = tf.placeholder(tf.float32, [None] + stateShape, 'S')
self.a = tf.placeholder(tf.float32, [None], 'A')
if "StackedDim" in self.HPs:
self.s_next = tf.placeholder(tf.float32, [None] + stateShape[:-1] +[self.HPs["StackedDim"]], 'S_next')
else:
self.s_next = tf.placeholder(tf.float32, [None] + stateShape, 'S_next')
input = {"state":self.s,"action":self.a}
out = self.Model(input)
self.state_pred = out["prediction"]
self.phi = out["phi"]
if globalAC is None: # get global network
with tf.variable_scope(scope):
self.s_params = self.Model.GetVariables("Reconstruction")
else: # local net, calculate losses
self.buffer = [Trajectory(depth=5) for _ in range(nTrajs)]
with tf.variable_scope(scope+"_update"):
self.s_params = self.Model.GetVariables("Reconstruction")
with tf.name_scope('s_loss'):
if HPs["loss"] == "MSE":
self.s_loss = tf.losses.mean_squared_error(self.state_pred,self.s_next)
elif HPs["loss"] == "KL":
self.s_loss = tf.losses.KLD(self.state_pred,self.s_next)
elif HPs["loss"] == "M4E":
self.s_loss = tf.reduce_mean((self.state_pred-self.s_next)**4)
if HPs["Optimizer"] == "Adam":
self.optimizer = tf.keras.optimizers.Adam(HPs["State LR"])
elif HPs["Optimizer"] == "RMS":
self.optimizer = tf.keras.optimizers.RMSProp(HPs["State LR"])
elif HPs["Optimizer"] == "Adagrad":
self.optimizer = tf.keras.optimizers.Adagrad(HPs["State LR"])
elif HPs["Optimizer"] == "Adadelta":
self.optimizer = tf.keras.optimizers.Adadelta(HPs["State LR"])
elif HPs["Optimizer"] == "Adamax":
self.optimizer = tf.keras.optimizers.Adamax(HPs["State LR"])
elif HPs["Optimizer"] == "Nadam":
self.optimizer = tf.keras.optimizers.Nadam(HPs["State LR"])
elif HPs["Optimizer"] == "SGD":
self.optimizer = tf.keras.optimizers.SGD(HPs["State LR"])
elif HPs["Optimizer"] == "Amsgrad":
self.optimizer = tf.keras.optimizers.Nadam(HPs["State LR"],amsgrad=True)
with tf.name_scope('local_grad'):
self.s_grads = self.optimizer.get_gradients(self.s_loss, self.s_params)
with tf.name_scope('sync'):
with tf.name_scope('pull'):
self.pull_s_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.s_params, globalAC.s_params)]
with tf.name_scope('push'):
self.update_s_op = self.optimizer.apply_gradients(zip(self.s_grads, globalAC.s_params))
self.update_ops = [self.update_s_op]
self.pull_ops = [self.pull_s_params_op]
self.grads = [self.s_grads]
self.losses = [self.s_loss]
self.grad_MA = [MovingAverage(1000) for i in range(len(self.grads))]
self.loss_MA = [MovingAverage(1000) for i in range(len(self.grads))]
self.labels = ["State"]
self.sess.run(self.pull_ops) #Pulling the variables from the global network to initialize.
self.clearBuffer = False
def GetAction(self, state,episode=0,step=0,deterministic=False,debug=False):
"""
Contains the code to run the network based on an input.
"""
p = 1/self.actionSize
if len(state.shape)==3:
probs =np.full((1,self.actionSize),p)
else:
probs =np.full((state.shape[0],self.actionSize),p)
actions = np.array([np.random.choice(probs.shape[1], p=prob / sum(prob)) for prob in probs])
if debug: print(probs)
return actions , [] # return a int and extra data that needs to be fed to buffer.
def PredictState(self,state):
s = state[np.newaxis, :]
state_pred = self.sess.run([self.state_pred], {self.s: s})
return state_pred
def Update(self,HPs=None,episode=0,statistics=True):
"""
The main update function for A3C. The function pushes gradients to the global AC Network.
The second function is to Pull
"""
#Process the data from the buffer
samples=0
for i in range(len(self.buffer)):
samples +=len(self.buffer[i])
if samples < self.HPs["BatchSize"]:
return
self.clearBuffer = True
for epoch in range(self.HPs["Epochs"]):
for traj in range(len(self.buffer)):
clip = -1
# try:
# for j in range(2):
# clip = self.buffer[traj][4].index(True, clip + 1)
# except:
# clip=len(self.buffer[traj][4])
#Create a feedDict from the buffer
batches = len(self.buffer[traj][0][:clip])//self.HPs["MinibatchSize"]+1
s = np.array_split(self.buffer[traj][0][:clip], batches)
if "StackedDim" in self.HPs:
# print(-self.HPs["StackedDim"])
# print(np.stack(self.buffer[traj][3][:clip])[:,:,:,-self.HPs["StackedDim"]].shape)
# print(self.buffer[traj][3][:clip][:,:,-self.HPs["StackedDim"]])
if self.HPs["StackedDim"] > 1:
s_next = np.array_split(np.squeeze(self.buffer[traj][3][:clip])[:,:,:,-self.HPs["StackedDim"]:],3,batches)
else:
s_next = np.array_split(np.expand_dims(np.stack(self.buffer[traj][3][:clip])[:,:,:,-self.HPs["StackedDim"]],3),batches)
else:
s_next = np.array_split(self.buffer[traj][3][:clip],batches)
a = np.array_split(np.asarray(self.buffer[traj][1][:clip]).reshape(-1),batches)
for i in range(batches):
feedDict = {
self.s: s[i],
self.s_next: s_next[i],
self.a: a[i],
}
if not statistics:
self.sess.run(self.update_ops, feedDict) # local grads applied to global net.
else:
#Perform update operations
try:
out = self.sess.run(self.update_ops+self.losses+self.grads, feedDict) # local grads applied to global net.
out = np.array_split(out,3)
losses = out[1]
grads = out[2]
for i,loss in enumerate(losses):
self.loss_MA[i].append(loss)
for i,grads_i in enumerate(grads):
total_counter = 0
vanish_counter = 0
for grad in grads_i:
total_counter += np.prod(grad.shape)
vanish_counter += (np.absolute(grad)<1e-6).sum()
self.grad_MA[i].append(vanish_counter/total_counter)
except:
out = self.sess.run(self.update_ops+self.losses, feedDict) # local grads applied to global net.
out = np.array_split(out,2)
losses = out[1]
for i,loss in enumerate(losses):
self.loss_MA[i].append(loss)
self.sess.run(self.pull_ops) # global variables synched to the local net.
def GetStatistics(self):
dict ={}
for i,label in enumerate(self.labels):
dict["Training Results/Vanishing Gradient " + label] = self.grad_MA[i]()
dict["Training Results/Loss " + label] = self.loss_MA[i]()
return dict
def ClearTrajectory(self):
if self.clearBuffer:
for traj in self.buffer:
traj.clear()
self.clearBuffer=False
@property
def getVars(self):
return self.Model.getVars(self.scope)
| [
"numpy.absolute",
"tensorflow.keras.optimizers.SGD",
"numpy.prod",
"tensorflow.keras.optimizers.Adagrad",
"numpy.full",
"tensorflow.keras.optimizers.Adadelta",
"tensorflow.variable_scope",
"tensorflow.placeholder",
"tensorflow.keras.optimizers.Adam",
"tensorflow.name_scope",
"numpy.stack",
"te... | [((815, 867), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '([None] + stateShape)', '"""S"""'], {}), "(tf.float32, [None] + stateShape, 'S')\n", (829, 867), True, 'import tensorflow as tf\n'), ((885, 924), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]', '"""A"""'], {}), "(tf.float32, [None], 'A')\n", (899, 924), True, 'import tensorflow as tf\n'), ((988, 1082), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', "([None] + stateShape[:-1] + [self.HPs['StackedDim']])", '"""S_next"""'], {}), "(tf.float32, [None] + stateShape[:-1] + [self.HPs[\n 'StackedDim']], 'S_next')\n", (1002, 1082), True, 'import tensorflow as tf\n'), ((1117, 1174), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '([None] + stateShape)', '"""S_next"""'], {}), "(tf.float32, [None] + stateShape, 'S_next')\n", (1131, 1174), True, 'import tensorflow as tf\n'), ((4526, 4558), 'numpy.full', 'np.full', (['(1, self.actionSize)', 'p'], {}), '((1, self.actionSize), p)\n', (4533, 4558), True, 'import numpy as np\n'), ((4590, 4635), 'numpy.full', 'np.full', (['(state.shape[0], self.actionSize)', 'p'], {}), '((state.shape[0], self.actionSize), p)\n', (4597, 4635), True, 'import numpy as np\n'), ((1401, 1425), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (1418, 1425), True, 'import tensorflow as tf\n'), ((1635, 1671), 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope + '_update')"], {}), "(scope + '_update')\n", (1652, 1671), True, 'import tensorflow as tf\n'), ((3416, 3437), 'tensorflow.name_scope', 'tf.name_scope', (['"""sync"""'], {}), "('sync')\n", (3429, 3437), True, 'import tensorflow as tf\n'), ((3963, 3982), 'utils.utils.MovingAverage', 'MovingAverage', (['(1000)'], {}), '(1000)\n', (3976, 3982), False, 'from utils.utils import MovingAverage\n'), ((4044, 4063), 'utils.utils.MovingAverage', 'MovingAverage', (['(1000)'], {}), '(1000)\n', (4057, 4063), False, 'from utils.utils import MovingAverage\n'), ((5979, 6031), 'numpy.array_split', 'np.array_split', (['self.buffer[traj][0][:clip]', 'batches'], {}), '(self.buffer[traj][0][:clip], batches)\n', (5993, 6031), True, 'import numpy as np\n'), ((1768, 1791), 'tensorflow.name_scope', 'tf.name_scope', (['"""s_loss"""'], {}), "('s_loss')\n", (1781, 1791), True, 'import tensorflow as tf\n'), ((2280, 2321), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (["HPs['State LR']"], {}), "(HPs['State LR'])\n", (2304, 2321), True, 'import tensorflow as tf\n'), ((3277, 3304), 'tensorflow.name_scope', 'tf.name_scope', (['"""local_grad"""'], {}), "('local_grad')\n", (3290, 3304), True, 'import tensorflow as tf\n'), ((3460, 3481), 'tensorflow.name_scope', 'tf.name_scope', (['"""pull"""'], {}), "('pull')\n", (3473, 3481), True, 'import tensorflow as tf\n'), ((3621, 3642), 'tensorflow.name_scope', 'tf.name_scope', (['"""push"""'], {}), "('push')\n", (3634, 3642), True, 'import tensorflow as tf\n'), ((6723, 6775), 'numpy.array_split', 'np.array_split', (['self.buffer[traj][3][:clip]', 'batches'], {}), '(self.buffer[traj][3][:clip], batches)\n', (6737, 6775), True, 'import numpy as np\n'), ((1876, 1934), 'tensorflow.losses.mean_squared_error', 'tf.losses.mean_squared_error', (['self.state_pred', 'self.s_next'], {}), '(self.state_pred, self.s_next)\n', (1904, 1934), True, 'import tensorflow as tf\n'), ((2407, 2451), 'tensorflow.keras.optimizers.RMSProp', 'tf.keras.optimizers.RMSProp', (["HPs['State LR']"], {}), "(HPs['State LR'])\n", (2434, 2451), True, 'import tensorflow as tf\n'), ((2018, 2061), 'tensorflow.losses.KLD', 'tf.losses.KLD', (['self.state_pred', 'self.s_next'], {}), '(self.state_pred, self.s_next)\n', (2031, 2061), True, 'import tensorflow as tf\n'), ((2541, 2585), 'tensorflow.keras.optimizers.Adagrad', 'tf.keras.optimizers.Adagrad', (["HPs['State LR']"], {}), "(HPs['State LR'])\n", (2568, 2585), True, 'import tensorflow as tf\n'), ((6810, 6849), 'numpy.asarray', 'np.asarray', (['self.buffer[traj][1][:clip]'], {}), '(self.buffer[traj][1][:clip])\n', (6820, 6849), True, 'import numpy as np\n'), ((7517, 7539), 'numpy.array_split', 'np.array_split', (['out', '(3)'], {}), '(out, 3)\n', (7531, 7539), True, 'import numpy as np\n'), ((2146, 2198), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((self.state_pred - self.s_next) ** 4)'], {}), '((self.state_pred - self.s_next) ** 4)\n', (2160, 2198), True, 'import tensorflow as tf\n'), ((2676, 2721), 'tensorflow.keras.optimizers.Adadelta', 'tf.keras.optimizers.Adadelta', (["HPs['State LR']"], {}), "(HPs['State LR'])\n", (2704, 2721), True, 'import tensorflow as tf\n'), ((6419, 6458), 'numpy.squeeze', 'np.squeeze', (['self.buffer[traj][3][:clip]'], {}), '(self.buffer[traj][3][:clip])\n', (6429, 6458), True, 'import numpy as np\n'), ((8402, 8424), 'numpy.array_split', 'np.array_split', (['out', '(2)'], {}), '(out, 2)\n', (8416, 8424), True, 'import numpy as np\n'), ((2810, 2853), 'tensorflow.keras.optimizers.Adamax', 'tf.keras.optimizers.Adamax', (["HPs['State LR']"], {}), "(HPs['State LR'])\n", (2836, 2853), True, 'import tensorflow as tf\n'), ((6591, 6628), 'numpy.stack', 'np.stack', (['self.buffer[traj][3][:clip]'], {}), '(self.buffer[traj][3][:clip])\n', (6599, 6628), True, 'import numpy as np\n'), ((8020, 8039), 'numpy.prod', 'np.prod', (['grad.shape'], {}), '(grad.shape)\n', (8027, 8039), True, 'import numpy as np\n'), ((2941, 2983), 'tensorflow.keras.optimizers.Nadam', 'tf.keras.optimizers.Nadam', (["HPs['State LR']"], {}), "(HPs['State LR'])\n", (2966, 2983), True, 'import tensorflow as tf\n'), ((3069, 3109), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', (["HPs['State LR']"], {}), "(HPs['State LR'])\n", (3092, 3109), True, 'import tensorflow as tf\n'), ((3199, 3255), 'tensorflow.keras.optimizers.Nadam', 'tf.keras.optimizers.Nadam', (["HPs['State LR']"], {'amsgrad': '(True)'}), "(HPs['State LR'], amsgrad=True)\n", (3224, 3255), True, 'import tensorflow as tf\n'), ((8095, 8112), 'numpy.absolute', 'np.absolute', (['grad'], {}), '(grad)\n', (8106, 8112), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""Plot the live microphone signal(s) with matplotlib.
Matplotlib and NumPy have to be installed.
"""
import math
from matplotlib.animation import FuncAnimation
import matplotlib.pyplot as plt
import numpy as np
import rtmixer
import sounddevice as sd
window = 200 # ms
interval = 30 # ms
blocksize = 0
latency = 'low'
device = None
samplerate = None
downsample = 10 # Plot every ...th frame
qsize = 8 # Power of 2
channels = 2
def update_plot(frame):
global plotdata
while q.read_available >= stepsize:
# The ring buffer's size is a multiple of stepsize, therefore we know
# that the data is contiguous in memory (= the 2nd buffer is empty):
read, buf1, buf2 = q.get_read_buffers(stepsize)
assert read == stepsize
assert not buf2
buffer = np.frombuffer(buf1, dtype='float32')
buffer.shape = -1, channels
buffer = buffer[::downsample]
# BTW, "buffer" still uses the ring buffer's memory:
assert buffer.base.base == buf1
shift = len(buffer)
plotdata = np.roll(plotdata, -shift, axis=0)
plotdata[-shift:, :] = buffer
q.advance_read_index(stepsize)
for column, line in enumerate(lines):
line.set_ydata(plotdata[:, column])
return lines
if samplerate is None:
device_info = sd.query_devices(device, 'input')
samplerate = device_info['default_samplerate']
length = int(window * samplerate / (1000 * downsample))
# Round down to a power of two:
stepsize = 2**int(math.log2(interval * samplerate / 1000))
plotdata = np.zeros((length, channels))
fig, ax = plt.subplots()
lines = ax.plot(plotdata)
if channels > 1:
ax.legend(['channel {}'.format(c + 1) for c in range(channels)],
loc='lower left', ncol=channels)
ax.axis((0, len(plotdata), -1, 1))
ax.set_yticks([0])
ax.yaxis.grid(True)
ax.tick_params(bottom='off', top='off', labelbottom='off',
right='off', left='off', labelleft='off')
fig.tight_layout(pad=0)
stream = rtmixer.Recorder(
device=device, channels=channels, blocksize=blocksize,
latency=latency, samplerate=samplerate)
ani = FuncAnimation(fig, update_plot, interval=interval, blit=True)
with stream:
elementsize = channels * stream.samplesize
q = rtmixer.RingBuffer(elementsize, stepsize * qsize)
action = stream.record_ringbuffer(q)
plt.show()
# TODO: check for ringbuffer errors?
print('Input overflows:', action.stats.input_overflows)
| [
"matplotlib.pyplot.show",
"rtmixer.Recorder",
"numpy.roll",
"numpy.frombuffer",
"numpy.zeros",
"sounddevice.query_devices",
"matplotlib.animation.FuncAnimation",
"rtmixer.RingBuffer",
"math.log2",
"matplotlib.pyplot.subplots"
] | [((1590, 1618), 'numpy.zeros', 'np.zeros', (['(length, channels)'], {}), '((length, channels))\n', (1598, 1618), True, 'import numpy as np\n'), ((1630, 1644), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1642, 1644), True, 'import matplotlib.pyplot as plt\n'), ((2028, 2143), 'rtmixer.Recorder', 'rtmixer.Recorder', ([], {'device': 'device', 'channels': 'channels', 'blocksize': 'blocksize', 'latency': 'latency', 'samplerate': 'samplerate'}), '(device=device, channels=channels, blocksize=blocksize,\n latency=latency, samplerate=samplerate)\n', (2044, 2143), False, 'import rtmixer\n'), ((2155, 2216), 'matplotlib.animation.FuncAnimation', 'FuncAnimation', (['fig', 'update_plot'], {'interval': 'interval', 'blit': '(True)'}), '(fig, update_plot, interval=interval, blit=True)\n', (2168, 2216), False, 'from matplotlib.animation import FuncAnimation\n'), ((1345, 1378), 'sounddevice.query_devices', 'sd.query_devices', (['device', '"""input"""'], {}), "(device, 'input')\n", (1361, 1378), True, 'import sounddevice as sd\n'), ((2285, 2334), 'rtmixer.RingBuffer', 'rtmixer.RingBuffer', (['elementsize', '(stepsize * qsize)'], {}), '(elementsize, stepsize * qsize)\n', (2303, 2334), False, 'import rtmixer\n'), ((2380, 2390), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2388, 2390), True, 'import matplotlib.pyplot as plt\n'), ((829, 865), 'numpy.frombuffer', 'np.frombuffer', (['buf1'], {'dtype': '"""float32"""'}), "(buf1, dtype='float32')\n", (842, 865), True, 'import numpy as np\n'), ((1088, 1121), 'numpy.roll', 'np.roll', (['plotdata', '(-shift)'], {'axis': '(0)'}), '(plotdata, -shift, axis=0)\n', (1095, 1121), True, 'import numpy as np\n'), ((1537, 1576), 'math.log2', 'math.log2', (['(interval * samplerate / 1000)'], {}), '(interval * samplerate / 1000)\n', (1546, 1576), False, 'import math\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 27 10:55:08 2020
@author: andreypoletaev
Inputs:
file = ... Required
duration = ... Required
file_out = ... Required
com = ... Boolean, default: False. True if center-of-mass velocity.
batch = ... Batch size for parallel. Default: 20. Not used if not included.
dims = ... Dimensions. Default: "['x', 'y', 'z']" , interpreted w/ eval().
Assumptions made:
time is [picoseconds], timestep is 1 [fs]
Parallelization is done for sherlock via schwimmbad and MPI.
On sherlock, use: ml python/3.6.1 py-schwimmbad
"""
# =============================================================================
# %% Imports & constants
# =============================================================================
import sys
from schwimmbad import MPIPool
# from hop_utils import one_vacf, autocorrelation
from numpy import argwhere
import pandas as pd
from datetime import datetime as dt
import itertools
flatten = lambda l: list(itertools.chain.from_iterable(itertools.repeat(x,1) if isinstance(x,str) else x for x in l))
## column names for the cases that the file is a CoM file or a single-atom velocity file
com_col_names = ['timestep', 'x', 'y', 'z', 'vx', 'vy', 'vz']
vel_col_names = ['atom_id', 'time', 'vx', 'vy', 'vz']
## default set of dimensions to compute: all 3 dimensions
dims = ['x', 'y', 'z']
# =============================================================================
# %% Parse inputs
# =============================================================================
## Parse inputs. Format: key=value
options = dict([ (x.split('=')[0],x.split('=')[1]) for x in sys.argv[1:] ])
assert 'file' in list(options.keys()) and 'duration' in list(options.keys()), \
'please pass file= ... [path] and duration= ... [psec] as command-line options'
## destination file
assert 'file_out' in list(options.keys()), 'pass an output path, file_out= ...'
fout = options['file_out']
col_names = vel_col_names
header = 0
## read the correct options for the file to be loaded
## center-of-mass is the default: skip 2 rows
if ('com' not in list(options.keys())) or (eval(options['com']) == True) :
col_names = com_col_names
header = 2
## check dimensions
if 'dims' in list(options.keys()): dims = eval(options['dims'])
# ## read a corrected input file
try :
fin = pd.read_csv(options['file'], index_col=False)
if 'time' not in fin.columns : raise IOError
print(f'Loaded a corrected file {options["file"]} with cols {fin.columns.values}')
except :
print(f'file {options["file"]} is not a corrected one.')
## read an uncorrected input file
try :
fin = pd.read_csv(options['file'], sep=' ', skiprows=header, names=col_names, index_col=False)
## convert time from [steps] to [ps] if the input file has the former
fin['time'] = fin.timestep / 1000. ## hard-coded conversion from steps to picoseconds
except : pass
## remove unnecessary columns and set time as index
fin = fin.set_index('time')[['vx','vy','vz']]
for d in ['x','y','z'] :
if d not in dims : fin.drop(f'v{d}', axis=1, inplace=True)
## read in batch size if one is passed
batch_size = 5001
if 'batch' in list(options.keys()) : batch_size = eval(options['batch'])
## Read the longest (time) lag to be computed and a list of all lags to run.
## If the arg duration is longer than the length of simulation, truncate.
## These lags will map to processes.
max_lag = eval(options['duration'])
try:
lags = range(argwhere(fin.index.values > max_lag)[0,0])
except: lags = range(len(fin.index.values))
lag_batches = [lags[i:i+batch_size] for i in range(0,len(lags),batch_size)]
## make up a function for mapping single calls to autocorrelation
def one_autocorrelation(tau):
n = dt.now()
print(f'starting lag {tau}, time now: {n.strftime("%Y %b %d %H:%M:%S")}')
cf = dict(zip(sorted(dims),fin.apply(lambda col: col.autocorr(tau))))
print(f'computed lag {tau}, seconds taken: {(dt.now()-n).total_seconds():.2f}')
return cf
## make up a function for batch computing autocorrelations
def batch_autocorrelation(taus):
n = dt.now()
print(f'starting batch of lags {taus}, time now: {n.strftime("%Y %b %d %H:%M:%S")}')
cf = [dict(zip(sorted(dims),fin.apply(lambda col: col.autocorr(t)))) for t in taus]
print(f'computed batch of lags {taus}, seconds taken: {(dt.now()-n).total_seconds():.2f}', flush=True)
return cf
## shut down all processes except the master one that will map tasks to others
pool = MPIPool()
if not pool.is_master():
print('one worker on standby')
pool.wait()
sys.exit(0)
print('MPI master proceeding to map lags to workers...')
print(f'There are {len(lag_batches)} total batches for parallelization')
## do the actual parallel computation of the autocorrelation function
print(f'{dt.now().strftime("%Y %b %d %H:%M:%S")}, computing from {options["file"]}')
if 'batch' in list(options.keys()) : acf = flatten(pool.map(batch_autocorrelation, lag_batches))
else : acf = pool.map(one_autocorrelation, lags)
print(f'done with parallel computation, {dt.now().strftime("%Y %b %d %H:%M:%S")}')
## convert to dataframe
acf = pd.DataFrame(acf, index=fin.index.values[:len(lags)]).reset_index().rename(columns={'index':'time'})
## save output
acf.to_csv(fout, index=False, float_format='%.7g')
print(f'computed {options["file"]} and saved to {options["file_out"]}')
## close the MPI pool
pool.close()
| [
"itertools.repeat",
"pandas.read_csv",
"schwimmbad.MPIPool",
"numpy.argwhere",
"datetime.datetime.now",
"sys.exit"
] | [((4599, 4608), 'schwimmbad.MPIPool', 'MPIPool', ([], {}), '()\n', (4606, 4608), False, 'from schwimmbad import MPIPool\n'), ((2400, 2445), 'pandas.read_csv', 'pd.read_csv', (["options['file']"], {'index_col': '(False)'}), "(options['file'], index_col=False)\n", (2411, 2445), True, 'import pandas as pd\n'), ((3845, 3853), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (3851, 3853), True, 'from datetime import datetime as dt\n'), ((4205, 4213), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (4211, 4213), True, 'from datetime import datetime as dt\n'), ((4689, 4700), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4697, 4700), False, 'import sys\n'), ((2726, 2818), 'pandas.read_csv', 'pd.read_csv', (["options['file']"], {'sep': '""" """', 'skiprows': 'header', 'names': 'col_names', 'index_col': '(False)'}), "(options['file'], sep=' ', skiprows=header, names=col_names,\n index_col=False)\n", (2737, 2818), True, 'import pandas as pd\n'), ((3577, 3613), 'numpy.argwhere', 'argwhere', (['(fin.index.values > max_lag)'], {}), '(fin.index.values > max_lag)\n', (3585, 3613), False, 'from numpy import argwhere\n'), ((1061, 1083), 'itertools.repeat', 'itertools.repeat', (['x', '(1)'], {}), '(x, 1)\n', (1077, 1083), False, 'import itertools\n'), ((4920, 4928), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (4926, 4928), True, 'from datetime import datetime as dt\n'), ((5183, 5191), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (5189, 5191), True, 'from datetime import datetime as dt\n'), ((4055, 4063), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (4061, 4063), True, 'from datetime import datetime as dt\n'), ((4451, 4459), 'datetime.datetime.now', 'dt.now', ([], {}), '()\n', (4457, 4459), True, 'from datetime import datetime as dt\n')] |
#!/usr/bin/env python
"""Split the classes into two equal-sized groups to maximize accuracy."""
import json
import os
import random
import numpy as np
random.seed(0)
import logging
import sys
from visualize import apply_permutation, plot_cm, read_symbols, swap, swap_1d
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG,
stream=sys.stdout)
def calculate_split_accuracy(cm):
"""
Calculate the accuracy of the adjusted classifier.
The adjusted classifier is built by joining the first n/2 classes into one
group and the rest into another group.
"""
n = len(cm)
first = int(n / 2)
cm_small = np.zeros((2, 2))
for i in range(n):
class_i = int(i < first)
for j in range(n):
class_j = int(j < first)
cm_small[class_i][class_j] += cm[i][j]
return (float(cm_small[0][0] + cm_small[1][1]) / cm_small.sum())
def calculate_split_error(cm):
"""Calculate the error of 2 group split."""
return 1.0 - calculate_split_accuracy(cm)
def simulated_annealing(current_cm,
current_perm=None,
score=calculate_split_error,
steps=2 * 10**5,
temp=100.0,
cooling_factor=0.99,
deterministic=False):
"""
Optimize current_cm by randomly swapping elements.
Parameters
----------
current_cm : numpy array
current_perm : None or iterable, optional (default: None)
steps : int, optional (default: 2 * 10**4)
temp : float > 0.0, optional (default: 100.0)
Temperature
cooling_factor: float in (0, 1), optional (default: 0.99)
"""
assert temp > 0
assert cooling_factor > 0
assert cooling_factor < 1
n = len(current_cm)
if current_perm is None:
current_perm = list(range(n))
current_perm = np.array(current_perm)
# Debugging code
perm_exp = np.zeros((n, n), dtype=np.int)
for i in range(n):
for j in range(n):
perm_exp[i][j] = j
current_cm = apply_permutation(current_cm, current_perm)
perm_exp_current = apply_permutation(perm_exp, current_perm)
logging.debug(perm_exp_current[0])
print("apply permutation %s" % str(current_perm))
current_score = score(current_cm)
best_perm = current_perm
best_cm = current_cm
best_score = current_score
print("## Starting Score: {:0.2f}%".format(current_score * 100))
for step in range(steps):
tmp = np.array(current_cm, copy=True)
split_part = int(n / 2) - 1
i = random.randint(0, split_part)
j = random.randint(split_part + 1, n - 1)
perm = swap_1d(current_perm.copy(), i, j)
tmp = swap(tmp, i, j)
# tmp = apply_permutation(tmp, perm)
tmp_score = score(tmp)
if deterministic:
chance = 1.0
else:
chance = random.random()
temp *= 0.99
hot_prob = min(1, np.exp(-(tmp_score - current_score) / temp))
if chance <= hot_prob:
if best_score > tmp_score: # Minimize the score
best_perm = perm
best_cm = tmp
best_score = tmp_score
current_score = tmp_score
perm_exp_current = swap(perm_exp_current, i, j)
print(list(perm_exp_current[0]))
current_cm = tmp
logging.info(("Current: %0.2f%% (best: %0.2f%%, hot_prob=%0.2f%%, "
"step=%i)"),
(current_score * 100),
(best_score * 100),
(hot_prob * 100),
step)
return {'cm': best_cm, 'perm': list(perm_exp_current[0])}
def main(cm_file, perm_file, steps, labels_file):
"""Orchestrate."""
# Load confusion matrix
with open(cm_file) as f:
cm = json.load(f)
cm = np.array(cm)
# Load permutation
if os.path.isfile(perm_file):
print("loaded %s" % perm_file)
with open(perm_file) as data_file:
perm = json.load(data_file)
else:
perm = random.shuffle(list(range(len(cm))))
print("Score without perm: {:0.2f}%".format(calculate_split_error(cm) * 100))
result = simulated_annealing(cm, perm,
score=calculate_split_error,
deterministic=True,
steps=steps)
# First recursive step
# split_i = int(len(cm) / 2)
# cm = result['cm'][:split_i, :split_i]
# perm = list(range(split_i))
# result = simulated_annealing(cm, perm,
# score=calculate_split_error,
# deterministic=True,
# steps=steps)
print("Score: {}".format(calculate_split_error(result['cm'])))
print("Perm: {}".format(list(result['perm'])))
# Load labels
if os.path.isfile(labels_file):
with open(labels_file) as f:
symbols = json.load(f)
else:
symbols = read_symbols()
print("Symbols: {}".format([symbols[i] for i in result['perm']]))
plot_cm(result['cm'], zero_diagonal=True)
def get_parser():
"""Get parser object for script xy.py."""
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--cm",
dest="cm_file",
help=("path of a json file with a confusion matrix"),
metavar="cm.json",
default='confusion-matrix.json')
parser.add_argument("--perm",
dest="perm_file",
help=("path of a json file with a permutation to "
"start with"),
metavar="perm.json",
default="")
parser.add_argument("--labels",
dest="labels_file",
help=("path of a json file with a list of label "
"names"),
metavar="labels.json",
default="")
parser.add_argument("-n",
dest="n",
default=4 * 10**5,
type=int,
help="number of steps to iterate")
return parser
if __name__ == "__main__":
args = get_parser().parse_args()
main(args.cm_file, args.perm_file, args.n, args.labels_file)
| [
"visualize.plot_cm",
"visualize.apply_permutation",
"json.load",
"logging.debug",
"argparse.ArgumentParser",
"logging.basicConfig",
"random.randint",
"numpy.zeros",
"random.random",
"os.path.isfile",
"logging.info",
"random.seed",
"numpy.array",
"numpy.exp",
"visualize.swap",
"visualiz... | [((155, 169), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (166, 169), False, 'import random\n'), ((276, 388), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)s %(message)s"""', 'level': 'logging.DEBUG', 'stream': 'sys.stdout'}), "(format='%(asctime)s %(levelname)s %(message)s', level=\n logging.DEBUG, stream=sys.stdout)\n", (295, 388), False, 'import logging\n'), ((708, 724), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (716, 724), True, 'import numpy as np\n'), ((1956, 1978), 'numpy.array', 'np.array', (['current_perm'], {}), '(current_perm)\n', (1964, 1978), True, 'import numpy as np\n'), ((2016, 2046), 'numpy.zeros', 'np.zeros', (['(n, n)'], {'dtype': 'np.int'}), '((n, n), dtype=np.int)\n', (2024, 2046), True, 'import numpy as np\n'), ((2146, 2189), 'visualize.apply_permutation', 'apply_permutation', (['current_cm', 'current_perm'], {}), '(current_cm, current_perm)\n', (2163, 2189), False, 'from visualize import apply_permutation, plot_cm, read_symbols, swap, swap_1d\n'), ((2213, 2254), 'visualize.apply_permutation', 'apply_permutation', (['perm_exp', 'current_perm'], {}), '(perm_exp, current_perm)\n', (2230, 2254), False, 'from visualize import apply_permutation, plot_cm, read_symbols, swap, swap_1d\n'), ((2259, 2293), 'logging.debug', 'logging.debug', (['perm_exp_current[0]'], {}), '(perm_exp_current[0])\n', (2272, 2293), False, 'import logging\n'), ((4027, 4052), 'os.path.isfile', 'os.path.isfile', (['perm_file'], {}), '(perm_file)\n', (4041, 4052), False, 'import os\n'), ((5019, 5046), 'os.path.isfile', 'os.path.isfile', (['labels_file'], {}), '(labels_file)\n', (5033, 5046), False, 'import os\n'), ((5237, 5278), 'visualize.plot_cm', 'plot_cm', (["result['cm']"], {'zero_diagonal': '(True)'}), "(result['cm'], zero_diagonal=True)\n", (5244, 5278), False, 'from visualize import apply_permutation, plot_cm, read_symbols, swap, swap_1d\n'), ((5429, 5516), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'ArgumentDefaultsHelpFormatter'}), '(description=__doc__, formatter_class=\n ArgumentDefaultsHelpFormatter)\n', (5443, 5516), False, 'from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser\n'), ((2584, 2615), 'numpy.array', 'np.array', (['current_cm'], {'copy': '(True)'}), '(current_cm, copy=True)\n', (2592, 2615), True, 'import numpy as np\n'), ((2664, 2693), 'random.randint', 'random.randint', (['(0)', 'split_part'], {}), '(0, split_part)\n', (2678, 2693), False, 'import random\n'), ((2706, 2743), 'random.randint', 'random.randint', (['(split_part + 1)', '(n - 1)'], {}), '(split_part + 1, n - 1)\n', (2720, 2743), False, 'import random\n'), ((2808, 2823), 'visualize.swap', 'swap', (['tmp', 'i', 'j'], {}), '(tmp, i, j)\n', (2812, 2823), False, 'from visualize import apply_permutation, plot_cm, read_symbols, swap, swap_1d\n'), ((3957, 3969), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3966, 3969), False, 'import json\n'), ((3983, 3995), 'numpy.array', 'np.array', (['cm'], {}), '(cm)\n', (3991, 3995), True, 'import numpy as np\n'), ((5148, 5162), 'visualize.read_symbols', 'read_symbols', ([], {}), '()\n', (5160, 5162), False, 'from visualize import apply_permutation, plot_cm, read_symbols, swap, swap_1d\n'), ((2986, 3001), 'random.random', 'random.random', ([], {}), '()\n', (2999, 3001), False, 'import random\n'), ((3053, 3096), 'numpy.exp', 'np.exp', (['(-(tmp_score - current_score) / temp)'], {}), '(-(tmp_score - current_score) / temp)\n', (3059, 3096), True, 'import numpy as np\n'), ((3361, 3389), 'visualize.swap', 'swap', (['perm_exp_current', 'i', 'j'], {}), '(perm_exp_current, i, j)\n', (3365, 3389), False, 'from visualize import apply_permutation, plot_cm, read_symbols, swap, swap_1d\n'), ((3476, 3616), 'logging.info', 'logging.info', (['"""Current: %0.2f%% (best: %0.2f%%, hot_prob=%0.2f%%, step=%i)"""', '(current_score * 100)', '(best_score * 100)', '(hot_prob * 100)', 'step'], {}), "('Current: %0.2f%% (best: %0.2f%%, hot_prob=%0.2f%%, step=%i)',\n current_score * 100, best_score * 100, hot_prob * 100, step)\n", (3488, 3616), False, 'import logging\n'), ((4155, 4175), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (4164, 4175), False, 'import json\n'), ((5107, 5119), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5116, 5119), False, 'import json\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S_HBFPellipsoidConvergence [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_HBFPellipsoidConvergence&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ExerMVEStop).
# ## Prepare the environment
# +
import os
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
from numpy import arange, r_, min as npmin, max as npmax
from scipy.io import loadmat
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, plot, legend, xlim, ylim, scatter, ylabel, \
xlabel, xticks, yticks
plt.style.use('seaborn')
from CONFIG import GLOBAL_DB, TEMPORARY_DB
from HighBreakdownFP import HighBreakdownFP
from ARPM_utils import struct_to_dict, save_plot
from PlotTwoDimEllipsoid import PlotTwoDimEllipsoid
from Price2AdjustedPrice import Price2AdjustedPrice
from GarchResiduals import GarchResiduals
from BlowSpinFP import BlowSpinFP
from ColorCodedFP import ColorCodedFP
# -
# ## Upload the database
# +
try:
db = loadmat(os.path.join(GLOBAL_DB, 'db_Stocks'), squeeze_me=True)
except FileNotFoundError:
db = loadmat(os.path.join(TEMPORARY_DB, 'db_Stocks'), squeeze_me=True)
StocksSPX = struct_to_dict(db['StocksSPX'])
# -
# ## Compute the dividend-adjusted returns of two stocks
# +
i_ = 2
t_ = 100
_, x_1 = Price2AdjustedPrice(StocksSPX.Date.reshape(1,-1), StocksSPX.Prices[[25],:], StocksSPX.Dividends[25]) # Cisco Systems Inc
_, x_2 = Price2AdjustedPrice(StocksSPX.Date.reshape(1,-1), StocksSPX.Prices[[5],:], StocksSPX.Dividends[5]) # General Electric
date = StocksSPX.Date[1:]
x_1 = x_1[-t_:]
x_2 = x_2[-t_:]
date = date[-t_:]
# -
# ## Compute the invariants using GARCH(1,1) fit
epsi = GarchResiduals(r_[x_1,x_2])
# ## Compute the Flexible Probability profiles using Blow-Spin method
b = 1 # number of blows
s = 0 # number of spins
p, _ = BlowSpinFP(epsi, b, s)
q_ = b + s
# ## Compute HBFP-mean and HBFP-covariance
print('Computing HBFP-mean and HBFP-covariance')
p_tilde = 0.5
mu_HBFP, sigma2_HBFP, p_HBFP, v_HBFP, t_tilde = HighBreakdownFP(epsi, p, 0, p_tilde)
# ## Generate a static figure showing the ellipsoids computed at each iteration, as well as the volume/probability graph
# +
k_ = mu_HBFP.shape[1]
# color settings
c_vp = [0.2, 0.2, 0.6]
greyrange = arange(0,0.8,0.01)
# axis lim
c = .75
epslim1 = [min(epsi[0]) - c, max(epsi[0])+c]
epslim2 = [min(epsi[1]) - c, max(epsi[1])+c]
# figure settings
f = figure()
with plt.style.context("seaborn-whitegrid"):
# scatter plot of observations with ellipsoid superimposed
CM, C = ColorCodedFP(p, None, None, greyrange, 0, 1, [1, 0])
h_1 = plt.subplot2grid((4,1),(0,0),rowspan=3)
h_1.set_yticklabels([])
h_1.set_xticklabels([])
xlabel('$\epsilon_1$')
ylabel('$\epsilon_2$')
ell_2 = PlotTwoDimEllipsoid(mu_HBFP[:,[k_-1]], sigma2_HBFP[:,:,k_-1], 1, False, False, 'r')
out = scatter(epsi[0, t_tilde.astype(int)], epsi[1, t_tilde.astype(int)], s=100, facecolor='none',edgecolor=[1, 0.5,0.4], marker='o', lw=1.5, zorder=10)
for k in range(k_):
ell_1 = PlotTwoDimEllipsoid(mu_HBFP[:,[k]], sigma2_HBFP[:,:,k], 1, False, False, [0.75, 0.75, 0.75], 0.3)
scatter(epsi[0], epsi[1], 15, c=C, marker='.',cmap=CM)
leg = legend(handles=[ell_2[0][0],out,ell_1[0][0]],labels=['HBFP ellipsoid','outliers','iterative ellipsoids'])
xlim(epslim1)
ylim(epslim2)
plt.grid(True)
h_2 = plt.subplot2grid((4,1),(3,0))
h_2.set_facecolor('w')
for k in range(k_):
plot([p_HBFP[k], p_HBFP[k]], [v_HBFP[k], v_HBFP[k]],color=c_vp,marker='*',markersize= 3,markerfacecolor= c_vp)
xlim([npmin(p_HBFP[1:]), npmax(p_HBFP)])
ylim([npmin(v_HBFP) - (npmax(v_HBFP) - npmin(v_HBFP)) / 10, npmax(v_HBFP[:-1])])
xlabel('probability')
ylabel('volume')
plt.grid(False)
plt.tight_layout();
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
| [
"ColorCodedFP.ColorCodedFP",
"matplotlib.pyplot.subplot2grid",
"matplotlib.pyplot.style.use",
"GarchResiduals.GarchResiduals",
"numpy.arange",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"os.path.join",
"os.path.abspath",
"numpy.max",
"BlowSpinFP.BlowSpinFP",
"matplotlib.pypl... | [((991, 1015), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (1004, 1015), True, 'import matplotlib.pyplot as plt\n'), ((1597, 1628), 'ARPM_utils.struct_to_dict', 'struct_to_dict', (["db['StocksSPX']"], {}), "(db['StocksSPX'])\n", (1611, 1628), False, 'from ARPM_utils import struct_to_dict, save_plot\n'), ((2111, 2139), 'GarchResiduals.GarchResiduals', 'GarchResiduals', (['r_[x_1, x_2]'], {}), '(r_[x_1, x_2])\n', (2125, 2139), False, 'from GarchResiduals import GarchResiduals\n'), ((2268, 2290), 'BlowSpinFP.BlowSpinFP', 'BlowSpinFP', (['epsi', 'b', 's'], {}), '(epsi, b, s)\n', (2278, 2290), False, 'from BlowSpinFP import BlowSpinFP\n'), ((2459, 2495), 'HighBreakdownFP.HighBreakdownFP', 'HighBreakdownFP', (['epsi', 'p', '(0)', 'p_tilde'], {}), '(epsi, p, 0, p_tilde)\n', (2474, 2495), False, 'from HighBreakdownFP import HighBreakdownFP\n'), ((2698, 2718), 'numpy.arange', 'arange', (['(0)', '(0.8)', '(0.01)'], {}), '(0, 0.8, 0.01)\n', (2704, 2718), False, 'from numpy import arange, r_, min as npmin, max as npmax\n'), ((2850, 2858), 'matplotlib.pyplot.figure', 'figure', ([], {}), '()\n', (2856, 2858), False, 'from matplotlib.pyplot import figure, plot, legend, xlim, ylim, scatter, ylabel, xlabel, xticks, yticks\n'), ((719, 757), 'os.path.abspath', 'path.abspath', (['"""../../functions-legacy"""'], {}), "('../../functions-legacy')\n", (731, 757), True, 'import os.path as path\n'), ((2864, 2902), 'matplotlib.pyplot.style.context', 'plt.style.context', (['"""seaborn-whitegrid"""'], {}), "('seaborn-whitegrid')\n", (2881, 2902), True, 'import matplotlib.pyplot as plt\n'), ((2979, 3031), 'ColorCodedFP.ColorCodedFP', 'ColorCodedFP', (['p', 'None', 'None', 'greyrange', '(0)', '(1)', '[1, 0]'], {}), '(p, None, None, greyrange, 0, 1, [1, 0])\n', (2991, 3031), False, 'from ColorCodedFP import ColorCodedFP\n'), ((3042, 3085), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, 1)', '(0, 0)'], {'rowspan': '(3)'}), '((4, 1), (0, 0), rowspan=3)\n', (3058, 3085), True, 'import matplotlib.pyplot as plt\n'), ((3142, 3165), 'matplotlib.pyplot.xlabel', 'xlabel', (['"""$\\\\epsilon_1$"""'], {}), "('$\\\\epsilon_1$')\n", (3148, 3165), False, 'from matplotlib.pyplot import figure, plot, legend, xlim, ylim, scatter, ylabel, xlabel, xticks, yticks\n'), ((3169, 3192), 'matplotlib.pyplot.ylabel', 'ylabel', (['"""$\\\\epsilon_2$"""'], {}), "('$\\\\epsilon_2$')\n", (3175, 3192), False, 'from matplotlib.pyplot import figure, plot, legend, xlim, ylim, scatter, ylabel, xlabel, xticks, yticks\n'), ((3204, 3299), 'PlotTwoDimEllipsoid.PlotTwoDimEllipsoid', 'PlotTwoDimEllipsoid', (['mu_HBFP[:, [k_ - 1]]', 'sigma2_HBFP[:, :, k_ - 1]', '(1)', '(False)', '(False)', '"""r"""'], {}), "(mu_HBFP[:, [k_ - 1]], sigma2_HBFP[:, :, k_ - 1], 1, \n False, False, 'r')\n", (3223, 3299), False, 'from PlotTwoDimEllipsoid import PlotTwoDimEllipsoid\n'), ((3587, 3642), 'matplotlib.pyplot.scatter', 'scatter', (['epsi[0]', 'epsi[1]', '(15)'], {'c': 'C', 'marker': '"""."""', 'cmap': 'CM'}), "(epsi[0], epsi[1], 15, c=C, marker='.', cmap=CM)\n", (3594, 3642), False, 'from matplotlib.pyplot import figure, plot, legend, xlim, ylim, scatter, ylabel, xlabel, xticks, yticks\n'), ((3652, 3766), 'matplotlib.pyplot.legend', 'legend', ([], {'handles': '[ell_2[0][0], out, ell_1[0][0]]', 'labels': "['HBFP ellipsoid', 'outliers', 'iterative ellipsoids']"}), "(handles=[ell_2[0][0], out, ell_1[0][0]], labels=['HBFP ellipsoid',\n 'outliers', 'iterative ellipsoids'])\n", (3658, 3766), False, 'from matplotlib.pyplot import figure, plot, legend, xlim, ylim, scatter, ylabel, xlabel, xticks, yticks\n'), ((3762, 3775), 'matplotlib.pyplot.xlim', 'xlim', (['epslim1'], {}), '(epslim1)\n', (3766, 3775), False, 'from matplotlib.pyplot import figure, plot, legend, xlim, ylim, scatter, ylabel, xlabel, xticks, yticks\n'), ((3780, 3793), 'matplotlib.pyplot.ylim', 'ylim', (['epslim2'], {}), '(epslim2)\n', (3784, 3793), False, 'from matplotlib.pyplot import figure, plot, legend, xlim, ylim, scatter, ylabel, xlabel, xticks, yticks\n'), ((3798, 3812), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3806, 3812), True, 'import matplotlib.pyplot as plt\n'), ((3823, 3855), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(4, 1)', '(3, 0)'], {}), '((4, 1), (3, 0))\n', (3839, 3855), True, 'import matplotlib.pyplot as plt\n'), ((4157, 4178), 'matplotlib.pyplot.xlabel', 'xlabel', (['"""probability"""'], {}), "('probability')\n", (4163, 4178), False, 'from matplotlib.pyplot import figure, plot, legend, xlim, ylim, scatter, ylabel, xlabel, xticks, yticks\n'), ((4183, 4199), 'matplotlib.pyplot.ylabel', 'ylabel', (['"""volume"""'], {}), "('volume')\n", (4189, 4199), False, 'from matplotlib.pyplot import figure, plot, legend, xlim, ylim, scatter, ylabel, xlabel, xticks, yticks\n'), ((4204, 4219), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (4212, 4219), True, 'import matplotlib.pyplot as plt\n'), ((4224, 4242), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4240, 4242), True, 'import matplotlib.pyplot as plt\n'), ((1428, 1464), 'os.path.join', 'os.path.join', (['GLOBAL_DB', '"""db_Stocks"""'], {}), "(GLOBAL_DB, 'db_Stocks')\n", (1440, 1464), False, 'import os\n'), ((3485, 3589), 'PlotTwoDimEllipsoid.PlotTwoDimEllipsoid', 'PlotTwoDimEllipsoid', (['mu_HBFP[:, [k]]', 'sigma2_HBFP[:, :, k]', '(1)', '(False)', '(False)', '[0.75, 0.75, 0.75]', '(0.3)'], {}), '(mu_HBFP[:, [k]], sigma2_HBFP[:, :, k], 1, False, False,\n [0.75, 0.75, 0.75], 0.3)\n', (3504, 3589), False, 'from PlotTwoDimEllipsoid import PlotTwoDimEllipsoid\n'), ((3912, 4028), 'matplotlib.pyplot.plot', 'plot', (['[p_HBFP[k], p_HBFP[k]]', '[v_HBFP[k], v_HBFP[k]]'], {'color': 'c_vp', 'marker': '"""*"""', 'markersize': '(3)', 'markerfacecolor': 'c_vp'}), "([p_HBFP[k], p_HBFP[k]], [v_HBFP[k], v_HBFP[k]], color=c_vp, marker='*',\n markersize=3, markerfacecolor=c_vp)\n", (3916, 4028), False, 'from matplotlib.pyplot import figure, plot, legend, xlim, ylim, scatter, ylabel, xlabel, xticks, yticks\n'), ((1526, 1565), 'os.path.join', 'os.path.join', (['TEMPORARY_DB', '"""db_Stocks"""'], {}), "(TEMPORARY_DB, 'db_Stocks')\n", (1538, 1565), False, 'import os\n'), ((4033, 4050), 'numpy.min', 'npmin', (['p_HBFP[1:]'], {}), '(p_HBFP[1:])\n', (4038, 4050), True, 'from numpy import arange, r_, min as npmin, max as npmax\n'), ((4052, 4065), 'numpy.max', 'npmax', (['p_HBFP'], {}), '(p_HBFP)\n', (4057, 4065), True, 'from numpy import arange, r_, min as npmin, max as npmax\n'), ((4132, 4150), 'numpy.max', 'npmax', (['v_HBFP[:-1]'], {}), '(v_HBFP[:-1])\n', (4137, 4150), True, 'from numpy import arange, r_, min as npmin, max as npmax\n'), ((4078, 4091), 'numpy.min', 'npmin', (['v_HBFP'], {}), '(v_HBFP)\n', (4083, 4091), True, 'from numpy import arange, r_, min as npmin, max as npmax\n'), ((4095, 4108), 'numpy.max', 'npmax', (['v_HBFP'], {}), '(v_HBFP)\n', (4100, 4108), True, 'from numpy import arange, r_, min as npmin, max as npmax\n'), ((4111, 4124), 'numpy.min', 'npmin', (['v_HBFP'], {}), '(v_HBFP)\n', (4116, 4124), True, 'from numpy import arange, r_, min as npmin, max as npmax\n')] |
"""
Application of SPOD to backward-facing step 2D slice pressure data by DDES.
Details of the data can be found in the following:
<NAME>., <NAME>., & <NAME>. (2022). Detached Eddy Simulation: Recent
Development and Application to Compressor Tip Leakage Flow. ASME Journal
of Turbomachinery, 144(1), 011009.
<NAME> (<EMAIL>)
Last update: 24-Sep-2021
"""
# -------------------------------------------------------------------------
# 0. Import libraries
# standard python libraries
import sys
import time
import matplotlib.pyplot as plt
from matplotlib import cm
import imageio
import psutil
import os
import numpy as np
import h5py
import pylab
import io
# SPOD path
current_path = os.getcwd()
parrent_path = os.path.dirname(current_path)
SPOD_path = parrent_path
# SPOD library
sys.path.insert(0, SPOD_path)
import spod
# -------------------------------------------------------------------------
# 1. Load input data for SPOD
# data shape: nrow = nt (total number of snapshots)
# ncol = ngrid*nvar (number of grid point * number of variable)
# grid shape: nrow = ngrid (number of grid)
# ncol = 3 (e.g., x/H, y/H, control volume size/H**3)
# In this case, nt = 800, ngrid = 3580, and nvar = 1 (e.g., p)
# -------------------------------------------------------------------------
# Sec. 1 start time
start_sec1 = time.time()
# data path
data_path = os.path.join(current_path, 'bstep_data')
# option to save SPOD results
save_fig = True # postprocess figs
save_path = data_path
# load data from h5 format
h5f = h5py.File(os.path.join(data_path,'bstepDDES.h5'),'r')
grid = h5f['grid'][:] # grid points
dt = h5f['dt'][0] # unit in seconds
ng = np.int(grid.shape[0]) # number of grid point
data = h5f['data'][:,0:ng] # only p is loaded to RAM
nt = data.shape[0] # number of snap shot
nx = data.shape[1] # number of grid point * number of variable
nvar = np.int(nx/ng) # number of variables
h5f.close()
# calculate weight
weight_grid = grid[:,2] # control volume weighted
weight_grid = weight_grid/np.mean(weight_grid) # normalized by mean values for better scaling
weight_phy = np.ones(ng) # sigle variable does not require weights from physics
weight = weight_grid*weight_phy # element-wise multiplation
# Sec. 1 end time
end_sec1 = time.time()
print('--------------------------------------' )
print('SPOD input data summary:' )
print('--------------------------------------' )
print('number of snapshot :', nt )
print('number of grid point :', ng )
print('number of variable :', nvar )
print('--------------------------------------' )
print('SPOD input data loaded!' )
print('Time lapsed: %.2f s'%(end_sec1-start_sec1) )
print('--------------------------------------' )
# -------------------------------------------------------------------------
# 2. Run SPOD
# function spod.spod(data_matrix,timestep)
# -------------------------------------------------------------------------
# Sec. 2 start time
start_sec2 = time.time()
# main function
spod.spod(data, dt, save_path, weight, method='fast')
# Sec. 2 end time
end_sec2 = time.time()
print('--------------------------------------' )
print('SPOD main calculation finished!' )
print('Time lapsed: %.2f s'%(end_sec2-start_sec2) )
print('--------------------------------------' )
# -------------------------------------------------------------------------
# 3. Read SPOD result
# -------------------------------------------------------------------------
# Sec. 3 start time
start_sec3 = time.time()
# load data from h5 format
SPOD_LPf = h5py.File(os.path.join(save_path,'SPOD_LPf.h5'),'r')
L = SPOD_LPf['L'][:,:] # modal energy E(f, M)
P = SPOD_LPf['P'][:,:,:] # mode shape
f = SPOD_LPf['f'][:] # frequency
SPOD_LPf.close()
# Sec. 3 end time
end_sec3 = time.time()
print('--------------------------------------' )
print('SPOD results read in!' )
print('Time lapsed: %.2f s'%(end_sec3-start_sec3) )
print('--------------------------------------' )
# -------------------------------------------------------------------------
# 4. Plot SPOD result
# Figs: 1. f-mode energy;
# 2. mode shape at given mode number and frequency
# 3. animation of original flow field
# 4. animation of reconstructed flow field
# -------------------------------------------------------------------------
# Sec. 4 start time
start_sec4 = time.time()
# -------------------------------------------------------------------------
### 4.0 pre-defined function
params={
'axes.labelsize': '20',
'xtick.labelsize': '16',
'ytick.labelsize': '16',
'lines.linewidth': 1.5,
'legend.fontsize': '14',
'figure.figsize': '8, 6' # set figure size
}
pylab.rcParams.update(params)
def figure_format(xtitle, ytitle, zoom, legend):
plt.xlabel(xtitle)
plt.ylabel(ytitle)
plt.axis(zoom)
if legend != 'None':
plt.legend(loc=legend)
def bstep_contour(q, qlevels, qname, x, y, colormap=cm.coolwarm):
'''
Purpose: template for backward-facing step 2D contour plot
'''
cntr = plt.tricontourf(x,y,q, qlevels,cmap=colormap,extend='both')
# colorbar
plt.colorbar(cntr,ticks=np.linspace(qlevels[0],qlevels[-1],3),shrink=0.8,extendfrac='auto',\
orientation='horizontal', pad=0.25, label=qname)
# wall boundary
plt.fill_between([-4,0,0,30], [1,1,0,0], -1, facecolor='whitesmoke')
plt.plot([-4,0,0,30], [1,1,0,0],color='black',linewidth=1)
# figure format
figure_format('x/H','y/H', [-2,6,-0.5,2],'None')
return fig
def bstep_contour_anim(t_start, t_end, t_delta, dt, ani_save_name, q, qlevels,
qname, x, y, colormap=cm.coolwarm):
'''
Purpose: plot and save animation of backward-facing step 2D contour plot
'''
with imageio.get_writer(os.path.join(save_path,ani_save_name), mode='I') as writer:
# loop over snapshots
for ti in range(t_start,t_end,t_delta):
plt.figure(figsize=(6,4))
bstep_contour(q[ti,:], qlevels, qname, x, y)
plt.text(-1.7,-0.35,'t = %.4f s'%(ti*dt), fontsize=14)
# convert Matplotib figure to png file
buf = io.BytesIO()
plt.savefig(buf, format='png', dpi=100)
buf.seek(0)
# read png in and plot gif
image = imageio.imread(buf)
writer.append_data(image)
# release RAM
plt.close()
return
# -------------------------------------------------------------------------
### 4.1 Energy spectrum
fig = spod.plot_spectrum(f,L,hl_idx=5)
# figure format
figure_format(xtitle='Frequency (Hz)', ytitle='SPOD mode energy',
zoom=[10**0, 2*10**2, 10**0, 10**8], legend='best')
if save_fig:
plt.savefig(os.path.join(save_path,'Spectrum.png'), dpi=300, bbox_inches='tight')
plt.close()
print('Plot spectrum finished')
# -------------------------------------------------------------------------
### 4.2 Mode shape
plot_modes = [[0,5],
[3,5]] # [[M1,f1],[M2,f2],...] to be plotted
# plot mode shape contour
for i in range(len(plot_modes)):
Mi = plot_modes[i][0]
fi = plot_modes[i][1]
fig = plt.figure(figsize=(6,4))
fig = bstep_contour(np.real(P[fi,:,Mi]), np.arange(-0.05,0.055,0.005), r'$\phi_p$',
grid[:,0], grid[:,1])
plt.text(-1.7,-0.35,'Mode '+str(Mi+1)+', f = %.2f Hz'%(f[fi]), fontsize=14)
if save_fig:
plt.savefig(os.path.join(save_path,'M'+str(Mi)+'f'+str(fi)+
'_p_mode_shape.png'), dpi=300,
bbox_inches='tight')
plt.close()
print('Plot mode shape finished')
# -------------------------------------------------------------------------
### 4.3 Original flow field
data_mean = np.mean(data,axis=0) # time-averaged data
# plot snapshot flow field
plot_snapshot = [0,10] # [t1,t2,...] to be plotted
for i in range(len(plot_snapshot)):
ti = plot_snapshot[i]
fig = plt.figure(figsize=(6,4))
fig = bstep_contour(data[ti,:]-data_mean, np.arange(-300,330,30),
r'$p-\bar{p} $ (Pa)', grid[:,0], grid[:,1])
plt.text(-1.7,-0.35,'t = %.4f s'%(ti*dt), fontsize=14)
if save_fig:
plt.savefig(os.path.join(save_path,'t'+str(ti)+'_p.png'), dpi=300, bbox_inches='tight')
plt.close()
# plot animation of flow field
t_start = 0
t_end = np.int(nt/10)
t_delta = 1
if save_fig:
bstep_contour_anim(t_start, t_end, t_delta, dt, ani_save_name='ori_p_anim.gif',
q=data-data_mean, qlevels=np.arange(-300,330,30),
qname=r'$p-\bar{p} $ (Pa)', x=grid[:,0], y=grid[:,1],
colormap=cm.coolwarm)
print('Plot original flow field finished')
# -------------------------------------------------------------------------
### 4.4 Reconstructed flow field
# time series to be reconstructed
t_start = 0
t_end = np.int(nt/10)
t_delta = 1
# modes and frequencies used for reconstruction
Ms = np.arange(0,L.shape[1])
fs = np.arange(0,f.shape[0])
# plot animation of reconstructed flow field
if save_fig:
# reconstruction function
data_rec = spod.reconstruct_time_method(data-data_mean,dt,f,P,Ms,fs,weight=weight)
bstep_contour_anim(t_start, t_end, t_delta, dt,
ani_save_name='rec_p_anim.gif',
q=data_rec, qlevels=np.arange(-300,330,30),
qname=r'$p-\bar{p} $ (Pa)', x=grid[:,0], y=grid[:,1],
colormap=cm.coolwarm)
print('Plot reconstructed flow field finished')
# Sec. 4 end time
end_sec4 = time.time()
print('--------------------------------------' )
print('SPOD results postprocessed!' )
print('Figs saved to the directory:' )
print( save_path )
print('Time lapsed: %.2f s'%(end_sec4-start_sec4) )
print('--------------------------------------' )
# -------------------------------------------------------------------------
# -1. print memory usage
# -------------------------------------------------------------------------
process = psutil.Process(os.getpid())
RAM_usage = np.around(process.memory_info().rss/1024**3, decimals=2) # unit in GBs
print('Total memory usage is: %.2f GB'%RAM_usage)
# End | [
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"matplotlib.pyplot.tricontourf",
"matplotlib.pyplot.fill_between",
"os.path.join",
"matplotlib.pyplot.close",
"os.path.dirname",
"spod.plot_spectrum",
"numpy.int",
"numpy.real",
"numpy.linspace",
"io.BytesIO",
"pylab... | [((691, 702), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (700, 702), False, 'import os\n'), ((718, 747), 'os.path.dirname', 'os.path.dirname', (['current_path'], {}), '(current_path)\n', (733, 747), False, 'import os\n'), ((792, 821), 'sys.path.insert', 'sys.path.insert', (['(0)', 'SPOD_path'], {}), '(0, SPOD_path)\n', (807, 821), False, 'import sys\n'), ((1352, 1363), 'time.time', 'time.time', ([], {}), '()\n', (1361, 1363), False, 'import time\n'), ((1389, 1429), 'os.path.join', 'os.path.join', (['current_path', '"""bstep_data"""'], {}), "(current_path, 'bstep_data')\n", (1401, 1429), False, 'import os\n'), ((1706, 1727), 'numpy.int', 'np.int', (['grid.shape[0]'], {}), '(grid.shape[0])\n', (1712, 1727), True, 'import numpy as np\n'), ((1937, 1952), 'numpy.int', 'np.int', (['(nx / ng)'], {}), '(nx / ng)\n', (1943, 1952), True, 'import numpy as np\n'), ((2196, 2207), 'numpy.ones', 'np.ones', (['ng'], {}), '(ng)\n', (2203, 2207), True, 'import numpy as np\n'), ((2380, 2391), 'time.time', 'time.time', ([], {}), '()\n', (2389, 2391), False, 'import time\n'), ((3158, 3169), 'time.time', 'time.time', ([], {}), '()\n', (3167, 3169), False, 'import time\n'), ((3187, 3240), 'spod.spod', 'spod.spod', (['data', 'dt', 'save_path', 'weight'], {'method': '"""fast"""'}), "(data, dt, save_path, weight, method='fast')\n", (3196, 3240), False, 'import spod\n'), ((3271, 3282), 'time.time', 'time.time', ([], {}), '()\n', (3280, 3282), False, 'import time\n'), ((3702, 3713), 'time.time', 'time.time', ([], {}), '()\n', (3711, 3713), False, 'import time\n'), ((3980, 3991), 'time.time', 'time.time', ([], {}), '()\n', (3989, 3991), False, 'import time\n'), ((4587, 4598), 'time.time', 'time.time', ([], {}), '()\n', (4596, 4598), False, 'import time\n'), ((4885, 4914), 'pylab.rcParams.update', 'pylab.rcParams.update', (['params'], {}), '(params)\n', (4906, 4914), False, 'import pylab\n'), ((6740, 6774), 'spod.plot_spectrum', 'spod.plot_spectrum', (['f', 'L'], {'hl_idx': '(5)'}), '(f, L, hl_idx=5)\n', (6758, 6774), False, 'import spod\n'), ((7977, 7998), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (7984, 7998), True, 'import numpy as np\n'), ((8583, 8598), 'numpy.int', 'np.int', (['(nt / 10)'], {}), '(nt / 10)\n', (8589, 8598), True, 'import numpy as np\n'), ((9112, 9127), 'numpy.int', 'np.int', (['(nt / 10)'], {}), '(nt / 10)\n', (9118, 9127), True, 'import numpy as np\n'), ((9192, 9216), 'numpy.arange', 'np.arange', (['(0)', 'L.shape[1]'], {}), '(0, L.shape[1])\n', (9201, 9216), True, 'import numpy as np\n'), ((9221, 9245), 'numpy.arange', 'np.arange', (['(0)', 'f.shape[0]'], {}), '(0, f.shape[0])\n', (9230, 9245), True, 'import numpy as np\n'), ((9788, 9799), 'time.time', 'time.time', ([], {}), '()\n', (9797, 9799), False, 'import time\n'), ((1565, 1604), 'os.path.join', 'os.path.join', (['data_path', '"""bstepDDES.h5"""'], {}), "(data_path, 'bstepDDES.h5')\n", (1577, 1604), False, 'import os\n'), ((2113, 2133), 'numpy.mean', 'np.mean', (['weight_grid'], {}), '(weight_grid)\n', (2120, 2133), True, 'import numpy as np\n'), ((3764, 3802), 'os.path.join', 'os.path.join', (['save_path', '"""SPOD_LPf.h5"""'], {}), "(save_path, 'SPOD_LPf.h5')\n", (3776, 3802), False, 'import os\n'), ((4969, 4987), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xtitle'], {}), '(xtitle)\n', (4979, 4987), True, 'import matplotlib.pyplot as plt\n'), ((4992, 5010), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ytitle'], {}), '(ytitle)\n', (5002, 5010), True, 'import matplotlib.pyplot as plt\n'), ((5015, 5029), 'matplotlib.pyplot.axis', 'plt.axis', (['zoom'], {}), '(zoom)\n', (5023, 5029), True, 'import matplotlib.pyplot as plt\n'), ((5244, 5307), 'matplotlib.pyplot.tricontourf', 'plt.tricontourf', (['x', 'y', 'q', 'qlevels'], {'cmap': 'colormap', 'extend': '"""both"""'}), "(x, y, q, qlevels, cmap=colormap, extend='both')\n", (5259, 5307), True, 'import matplotlib.pyplot as plt\n'), ((5508, 5582), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['[-4, 0, 0, 30]', '[1, 1, 0, 0]', '(-1)'], {'facecolor': '"""whitesmoke"""'}), "([-4, 0, 0, 30], [1, 1, 0, 0], -1, facecolor='whitesmoke')\n", (5524, 5582), True, 'import matplotlib.pyplot as plt\n'), ((5581, 5647), 'matplotlib.pyplot.plot', 'plt.plot', (['[-4, 0, 0, 30]', '[1, 1, 0, 0]'], {'color': '"""black"""', 'linewidth': '(1)'}), "([-4, 0, 0, 30], [1, 1, 0, 0], color='black', linewidth=1)\n", (5589, 5647), True, 'import matplotlib.pyplot as plt\n'), ((7026, 7037), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7035, 7037), True, 'import matplotlib.pyplot as plt\n'), ((7370, 7396), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (7380, 7396), True, 'import matplotlib.pyplot as plt\n'), ((8172, 8198), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (8182, 8198), True, 'import matplotlib.pyplot as plt\n'), ((8340, 8400), 'matplotlib.pyplot.text', 'plt.text', (['(-1.7)', '(-0.35)', "('t = %.4f s' % (ti * dt))"], {'fontsize': '(14)'}), "(-1.7, -0.35, 't = %.4f s' % (ti * dt), fontsize=14)\n", (8348, 8400), True, 'import matplotlib.pyplot as plt\n'), ((9356, 9435), 'spod.reconstruct_time_method', 'spod.reconstruct_time_method', (['(data - data_mean)', 'dt', 'f', 'P', 'Ms', 'fs'], {'weight': 'weight'}), '(data - data_mean, dt, f, P, Ms, fs, weight=weight)\n', (9384, 9435), False, 'import spod\n'), ((10319, 10330), 'os.getpid', 'os.getpid', ([], {}), '()\n', (10328, 10330), False, 'import os\n'), ((5063, 5085), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': 'legend'}), '(loc=legend)\n', (5073, 5085), True, 'import matplotlib.pyplot as plt\n'), ((6952, 6991), 'os.path.join', 'os.path.join', (['save_path', '"""Spectrum.png"""'], {}), "(save_path, 'Spectrum.png')\n", (6964, 6991), False, 'import os\n'), ((7420, 7441), 'numpy.real', 'np.real', (['P[fi, :, Mi]'], {}), '(P[fi, :, Mi])\n', (7427, 7441), True, 'import numpy as np\n'), ((7441, 7471), 'numpy.arange', 'np.arange', (['(-0.05)', '(0.055)', '(0.005)'], {}), '(-0.05, 0.055, 0.005)\n', (7450, 7471), True, 'import numpy as np\n'), ((7809, 7820), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7818, 7820), True, 'import matplotlib.pyplot as plt\n'), ((8244, 8268), 'numpy.arange', 'np.arange', (['(-300)', '(330)', '(30)'], {}), '(-300, 330, 30)\n', (8253, 8268), True, 'import numpy as np\n'), ((8517, 8528), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8526, 8528), True, 'import matplotlib.pyplot as plt\n'), ((5348, 5387), 'numpy.linspace', 'np.linspace', (['qlevels[0]', 'qlevels[-1]', '(3)'], {}), '(qlevels[0], qlevels[-1], 3)\n', (5359, 5387), True, 'import numpy as np\n'), ((5990, 6028), 'os.path.join', 'os.path.join', (['save_path', 'ani_save_name'], {}), '(save_path, ani_save_name)\n', (6002, 6028), False, 'import os\n'), ((6140, 6166), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (6150, 6166), True, 'import matplotlib.pyplot as plt\n'), ((6235, 6295), 'matplotlib.pyplot.text', 'plt.text', (['(-1.7)', '(-0.35)', "('t = %.4f s' % (ti * dt))"], {'fontsize': '(14)'}), "(-1.7, -0.35, 't = %.4f s' % (ti * dt), fontsize=14)\n", (6243, 6295), True, 'import matplotlib.pyplot as plt\n'), ((6360, 6372), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (6370, 6372), False, 'import io\n'), ((6385, 6424), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buf'], {'format': '"""png"""', 'dpi': '(100)'}), "(buf, format='png', dpi=100)\n", (6396, 6424), True, 'import matplotlib.pyplot as plt\n'), ((6509, 6528), 'imageio.imread', 'imageio.imread', (['buf'], {}), '(buf)\n', (6523, 6528), False, 'import imageio\n'), ((6606, 6617), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6615, 6617), True, 'import matplotlib.pyplot as plt\n'), ((8756, 8780), 'numpy.arange', 'np.arange', (['(-300)', '(330)', '(30)'], {}), '(-300, 330, 30)\n', (8765, 8780), True, 'import numpy as np\n'), ((9570, 9594), 'numpy.arange', 'np.arange', (['(-300)', '(330)', '(30)'], {}), '(-300, 330, 30)\n', (9579, 9594), True, 'import numpy as np\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 21 15:55:41 2017
@author: weif
"""
import pafy
import pandas as pd
import numpy as np
from pydub import AudioSegment
import soundfile as sf
#%%
#csv_header = ['YTID', 'start_sec', 'end_sec','l1','l2','l3','l4','l5','l6','l7','l8','l9','l10','l11']
#csv_header = [0,1,2,3,4,5,6,7,8,9,10,11,12,13]
#videolist = pd.read_csv('../balanced_train_segments.csv',skiprows=2, header=None,names = ['id',"start", "end", "0", "1","2","3","4","5","6","7","8","9","10","11","12","13" ]) #skip_blank_lines=5
videolist = pd.read_csv('../eval_segments.csv',skiprows=2, header=None,names = ['id',"start", "end", "0", "1","2","3","4","5","6","7","8","9","10","11","12","13" ]) #skip_blank_lines=5
(row,col)=videolist.shape
Is_wanted= np.zeros((row,1),dtype=np.bool)
num=0
for i in range(0,row):
for j in range (3,col):
if type(videolist.iat[i,j])== str:
if '/m/07pbtc8' in videolist.iat[i,j]:
Is_wanted[i]=True
label='Footsteps'
print(np.sum(Is_wanted))
data_df=pd.DataFrame([], columns=['name','format','start_sec','end_sec','length','bitrate','address','label'])
num=0
#%%
for i in np.arange(0,row):
print(i)
print(Is_wanted[i])
if Is_wanted[i]==True:
url = "https://www.youtube.com/watch?v="+videolist.iat[i,0]
try:
video=pafy.new(url)
streams=video.audiostreams
k=streams[0]
except (IOError,OSError,ValueError):
continue
try:
k.download(filepath= ( str(num) ) )
except (IOError,OSError,ValueError):
continue
df1=pd.DataFrame([[str(num), k.extension, videolist.iat[i,1], videolist.iat[i,2], video.length, k.bitrate, videolist.iat[i,0],label]], columns=['name','format','start_sec','end_sec','length','bitrate','address','label'])
data_df=data_df.append(df1)
num=num+1
# i_now=i+1 # for restarting from error
#data_df.to_csv(label+'_balanced_train.csv')
data_df.to_csv(label+'_eval.csv')
#%%
AudioSegment.ffmpeg="/Users/weif/Documents/ffmpeg"
AudioSegment.converter = r"/Users/weif/Documents/ffmpeg"
(row,col)=data_df.shape
for i in range(0,row):
# filename=data_df.iat[i,0]+'.'+data_df.iat[i,1]
filename=str(data_df.iat[i,0])
start_sec = float(data_df.iat[i,2])
stop_sec =float(data_df.iat[i,3])
duration = int(stop_sec-start_sec)
filename1=label+'_2_'+str(duration)+'s_'+str(data_df.iat[i,0])+'.ogg'
AudioSegment.from_file(filename)[int(1000* start_sec):int(1000*stop_sec)].export(filename1, format ='ogg' )
| [
"pandas.DataFrame",
"numpy.sum",
"pandas.read_csv",
"pafy.new",
"numpy.zeros",
"numpy.arange",
"pydub.AudioSegment.from_file"
] | [((581, 753), 'pandas.read_csv', 'pd.read_csv', (['"""../eval_segments.csv"""'], {'skiprows': '(2)', 'header': 'None', 'names': "['id', 'start', 'end', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',\n '10', '11', '12', '13']"}), "('../eval_segments.csv', skiprows=2, header=None, names=['id',\n 'start', 'end', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10',\n '11', '12', '13'])\n", (592, 753), True, 'import pandas as pd\n'), ((795, 828), 'numpy.zeros', 'np.zeros', (['(row, 1)'], {'dtype': 'np.bool'}), '((row, 1), dtype=np.bool)\n', (803, 828), True, 'import numpy as np\n'), ((1157, 1270), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {'columns': "['name', 'format', 'start_sec', 'end_sec', 'length', 'bitrate', 'address',\n 'label']"}), "([], columns=['name', 'format', 'start_sec', 'end_sec',\n 'length', 'bitrate', 'address', 'label'])\n", (1169, 1270), True, 'import pandas as pd\n'), ((1280, 1297), 'numpy.arange', 'np.arange', (['(0)', 'row'], {}), '(0, row)\n', (1289, 1297), True, 'import numpy as np\n'), ((1101, 1118), 'numpy.sum', 'np.sum', (['Is_wanted'], {}), '(Is_wanted)\n', (1107, 1118), True, 'import numpy as np\n'), ((1461, 1474), 'pafy.new', 'pafy.new', (['url'], {}), '(url)\n', (1469, 1474), False, 'import pafy\n'), ((2616, 2648), 'pydub.AudioSegment.from_file', 'AudioSegment.from_file', (['filename'], {}), '(filename)\n', (2638, 2648), False, 'from pydub import AudioSegment\n')] |
from typing import Tuple
import numpy as np
from docknet.data_generator.data_generator import DataGenerator
from docknet.util.geometry import random_to_polar, polar_to_cartesian
class ClusterDataGenerator(DataGenerator):
"""
The cluster data generator generates two classes (0 and 1) of 2D vectors distributed as follows:
0XX
XXX
XX1
"""
def unitary_cluster(self, x: np.array):
polar = random_to_polar(x)
polar[0] = 11.**polar[0] / 10. - 0.6
f = polar_to_cartesian(polar)
return f
def func0(self, x: np.array):
"""
Generator function of 2D vectors of class 0 (the upper-left cluster)
:param x: a 2D random generated vector
:return: the corresponding individual of class 0
"""
f = self.unitary_cluster(x) * self.cluster_diameter + self.cluster0_origin
return f
def func1(self, x: np.array):
"""
Generator function of 2D vectors of class 1 (the bottom-right cluster)
:param x: a 2D random generated vector
:return: the corresponding individual of class 1
"""
f = self.unitary_cluster(x) * self.cluster_diameter + self.cluster1_origin
return f
def __init__(self, x0_range: Tuple[float, float], x1_range: Tuple[float, float]):
"""
Initializes the cluster data generator
:param x0_range: tuple of minimum and maximum x values
:param x1_range: tuple of minimum and maximum y values
"""
super().__init__((self.func0, self.func1))
x_length = x0_range[1] - x0_range[0]
y_length = x1_range[1] - x1_range[0]
x_center = (x0_range[0] + x0_range[1]) / 2
y_center = (x1_range[0] + x1_range[1]) / 2
self.cluster0_origin = np.array([x_center - x_length / 3, y_center + y_length / 3])
self.cluster1_origin = np.array([x_center + x_length / 3, y_center - y_length / 3])
self.cluster_diameter = np.array([x_length / 3, y_length / 3])
| [
"docknet.util.geometry.polar_to_cartesian",
"numpy.array",
"docknet.util.geometry.random_to_polar"
] | [((440, 458), 'docknet.util.geometry.random_to_polar', 'random_to_polar', (['x'], {}), '(x)\n', (455, 458), False, 'from docknet.util.geometry import random_to_polar, polar_to_cartesian\n'), ((516, 541), 'docknet.util.geometry.polar_to_cartesian', 'polar_to_cartesian', (['polar'], {}), '(polar)\n', (534, 541), False, 'from docknet.util.geometry import random_to_polar, polar_to_cartesian\n'), ((1799, 1859), 'numpy.array', 'np.array', (['[x_center - x_length / 3, y_center + y_length / 3]'], {}), '([x_center - x_length / 3, y_center + y_length / 3])\n', (1807, 1859), True, 'import numpy as np\n'), ((1891, 1951), 'numpy.array', 'np.array', (['[x_center + x_length / 3, y_center - y_length / 3]'], {}), '([x_center + x_length / 3, y_center - y_length / 3])\n', (1899, 1951), True, 'import numpy as np\n'), ((1984, 2022), 'numpy.array', 'np.array', (['[x_length / 3, y_length / 3]'], {}), '([x_length / 3, y_length / 3])\n', (1992, 2022), True, 'import numpy as np\n')] |
from __future__ import division, print_function
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
MU = 1.05
SIG = np.sqrt(0.105)
def lognormal(x, mu=MU, sig=SIG):
coeff = 1. / (x * sig * np.sqrt(2*np.pi))
expon = - (np.log(x)-mu)**2 / (2*sig**2)
return coeff * np.exp(expon)
xs = np.linspace(1e-2, 10, 1000)
ys = lognormal(xs)/np.max(lognormal(xs))
plt.clf()
exponents = [1., 0.5, 0.1]
if __name__ == '__main__':
for exp in exponents:
plt.plot(xs, ys**exp, label=str(exp))
plt.legend(loc='best')
plt.savefig("virial-plot.png")
| [
"numpy.log",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.legend",
"matplotlib.use",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] | [((92, 106), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (99, 106), True, 'import matplotlib as mpl\n'), ((156, 170), 'numpy.sqrt', 'np.sqrt', (['(0.105)'], {}), '(0.105)\n', (163, 170), True, 'import numpy as np\n'), ((336, 363), 'numpy.linspace', 'np.linspace', (['(0.01)', '(10)', '(1000)'], {}), '(0.01, 10, 1000)\n', (347, 363), True, 'import numpy as np\n'), ((405, 414), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (412, 414), True, 'import matplotlib.pyplot as plt\n'), ((545, 567), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (555, 567), True, 'import matplotlib.pyplot as plt\n'), ((572, 602), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""virial-plot.png"""'], {}), "('virial-plot.png')\n", (583, 602), True, 'import matplotlib.pyplot as plt\n'), ((316, 329), 'numpy.exp', 'np.exp', (['expon'], {}), '(expon)\n', (322, 329), True, 'import numpy as np\n'), ((234, 252), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (241, 252), True, 'import numpy as np\n'), ((267, 276), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (273, 276), True, 'import numpy as np\n')] |
# numpy_wrapper
#
# A wrapper around the NumPy library that makes it easier to use for simple
# array mathematics.
import numpy
def new(num_rows, num_cols):
return numpy.zeros((num_rows, num_cols), dtype=numpy.int32)
def average(arrays_to_average):
return numpy.mean(numpy.array(arrays_to_average), axis=0)
def get_indices(array):
return numpy.transpose(array.nonzero())
| [
"numpy.array",
"numpy.zeros"
] | [((170, 222), 'numpy.zeros', 'numpy.zeros', (['(num_rows, num_cols)'], {'dtype': 'numpy.int32'}), '((num_rows, num_cols), dtype=numpy.int32)\n', (181, 222), False, 'import numpy\n'), ((278, 308), 'numpy.array', 'numpy.array', (['arrays_to_average'], {}), '(arrays_to_average)\n', (289, 308), False, 'import numpy\n')] |
import os
import sys
import numpy as np
import cv2
import logging
from wavedata.tools.obj_detection import obj_utils
import perspective_utils as p_utils
import matching_utils
import trust_utils
import config as cfg
import std_utils
import constants as const
from tools.visualization import vis_matches
# Compute and save message evals for each vehicle
# Files get saved to the base directory under message_evaluations
# The format is:
# Message ID, Confidence, Certainty, Evaluator ID
def compute_message_evals():
std_utils.delete_all_subdirs(cfg.MSG_EVALS_SUBDIR)
# First for the ego vehicle
compute_perspect_eval(cfg.DATASET_DIR, const.ego_id())
# Then for all the alternate perspectives
for entity_str in const.valid_perspectives():
perspect_dir = os.path.join(cfg.ALT_PERSP_DIR, entity_str)
compute_perspect_eval(perspect_dir, int(entity_str))
print("Finished computing message evals.")
def aggregate_message_evals():
std_utils.delete_all_subdirs(cfg.AGG_MSG_EVALS_SUBDIR)
# First for the ego vehicle
aggregate_persp_msg_evals(cfg.DATASET_DIR, const.ego_id())
# Then for all the alternate perspectives
for entity_str in const.valid_perspectives():
perspect_dir = os.path.join(cfg.ALT_PERSP_DIR, entity_str)
aggregate_persp_msg_evals(perspect_dir, int(entity_str))
print("Finished aggregating message evals.")
def compute_perspect_eval(perspect_dir, persp_id):
logging.info("**********************************************************************")
logging.info("Computing evaluations for perspective: %d", persp_id)
velo_dir = perspect_dir + '/velodyne'
matching_dir = perspect_dir + '/matching_test'
# Do this for every sample index
velo_files = os.listdir(velo_dir)
for file in velo_files:
filepath = velo_dir + '/' + file
idx = int(os.path.splitext(file)[0])
if idx < cfg.MIN_IDX or idx > cfg.MAX_IDX:
continue
logging.debug("**********************************Index: %d", idx)
# Load predictions from own and nearby vehicles
# First object in list will correspond to the ego_entity_id
# We want to area filter here because vehicles shouldn't be evaluating detections they can't perceive
perspect_trust_objs = p_utils.get_all_detections(idx, persp_id, results=cfg.USE_RESULTS, filter_area=True)
# Add fake detections to perspect_preds
# Find matching pairs
# Returns a list of lists of objects which have been matched
matching_objs = matching_utils.match_iou3ds(perspect_trust_objs, only_ego_matches=False)
if cfg.VISUALIZE_MATCHES:
alt_persp = persp_id != const.ego_id()
vis_matches.visualize_matches(matching_objs, idx, \
cfg.USE_RESULTS, alt_persp, persp_id)
# Print matching objects to test with visualization
# out_file = matching_dir + '/{:06d}.txt'.format(idx)
# if os.path.exists(out_file):
# os.remove(out_file)
# else:
# logging.debug("Cannot delete the file as it doesn't exists")
# for match_list in matching_objs:
# if len(match_list) > 1:
# objs = trust_utils.strip_objs(match_list)
# save_filtered_objs(objs, idx, matching_dir)
# Calculate trust from received detections
trust_utils.get_message_trust_values(matching_objs, perspect_dir, persp_id, idx)
# Visualize evaluations by setting config option to True
if cfg.VISUALIZE_MSG_EVALS:
alt_persp = persp_id != const.ego_id()
vis_matches.visualize_matches(matching_objs, idx, \
cfg.USE_RESULTS, alt_persp, persp_id, \
vis_eval_scores=True)
save_msg_evals(matching_objs, idx, persp_id)
def save_msg_evals(msg_trusts, idx, persp_id):
logging.debug("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!Save msg evals in trust")
if msg_trusts is None:
logging.debug("Msg trusts is none")
return
for matched_msgs in msg_trusts:
logging.debug("Outputting list of matched objects")
for trust_obj in matched_msgs:
if not cfg.INCLUDE_OWN_DETECTION_IN_EVAL and trust_obj.detector_id == persp_id:
#Do not evaluate own detections
logging.debug("Not saving eval for own detection.")
continue
# Fill the array to write
msg_trust_output = np.zeros([1, 6])
msg_trust_output[0,0] = trust_obj.det_idx
msg_trust_output[0,1] = trust_obj.obj.score
msg_trust_output[0,2] = trust_obj.detector_certainty
msg_trust_output[0,3] = trust_obj.evaluator_id
msg_trust_output[0,4] = trust_obj.evaluator_certainty
msg_trust_output[0,5] = trust_obj.evaluator_score
logging.debug("********************Saving trust val to id: %d at idx: %d", trust_obj.detector_id, idx)
# Save to text file
file_path = p_utils.get_folder(trust_obj.detector_id) + '/{}/{:06d}.txt'.format(cfg.MSG_EVALS_SUBDIR,idx)
logging.debug("Writing msg evals to file: %s", file_path)
std_utils.make_dir(file_path)
with open(file_path, 'a+') as f:
np.savetxt(f, msg_trust_output,
newline='\r\n', fmt='%i %f %f %i %f %f')
def load_msg_evals(persp_dir, idx):
# Define the list
msg_evals = []
if idx < 0:
return []
filepath = persp_dir + '/' + cfg.MSG_EVALS_SUBDIR + '/{:06d}.txt'.format(idx)
if not os.path.isfile(filepath):
return []
# Extract the list
if os.stat(filepath).st_size == 0:
return []
p = np.loadtxt(filepath, delimiter=' ',
dtype=str,
usecols=np.arange(start=0, step=1, stop=6))
# Check if the output is single dimensional or multi dimensional
if len(p.shape) > 1:
label_num = p.shape[0]
else:
label_num = 1
for idx in np.arange(label_num):
trust_obj = trust_utils.MessageEvaluation()
if label_num > 1:
trust_obj.det_idx = int(p[idx,0])
trust_obj.det_score = float(p[idx,1])
trust_obj.det_certainty = float(p[idx,2])
trust_obj.evaluator_id = int(p[idx,3])
trust_obj.evaluator_certainty = float(p[idx,4])
trust_obj.evaluator_score = float(p[idx,5])
else:
trust_obj.det_idx = int(p[0])
trust_obj.det_score = float(p[1])
trust_obj.det_certainty = float(p[2])
trust_obj.evaluator_id = int(p[3])
trust_obj.evaluator_certainty = float(p[4])
trust_obj.evaluator_score = float(p[5])
msg_evals.append(trust_obj)
return msg_evals
# Computes total msg_evals
def aggregate_persp_msg_evals(persp_dir, persp_id):
logging.info("**********************************************************************")
logging.info("Aggregating msg evaluations for perspective: %d", persp_id)
velo_dir = persp_dir + '/velodyne'
# Do this for every sample index
velo_files = os.listdir(velo_dir)
for file in velo_files:
filepath = velo_dir + '/' + file
idx = int(os.path.splitext(file)[0])
if idx < cfg.MIN_IDX or idx > cfg.MAX_IDX:
continue
logging.debug("**********************************Index: %d", idx)
msg_evals = load_msg_evals(persp_dir, idx)
eval_lists = {}
for msg_eval in msg_evals:
if msg_eval.det_idx in eval_lists:
eval_lists[msg_eval.det_idx].append(msg_eval)
else:
eval_lists[msg_eval.det_idx] = [msg_eval]
#print("Perspective and index: ", persp_id, idx)
#print(eval_lists)
eval_count = 0
trust_sum = 0
logging.debug(eval_lists)
for det_idx, eval_list in eval_lists.items():
msg_trust = 0
if cfg.AGG_AVG:
num = 0
den = 0
logging.debug("det_idx: %d", det_idx)
logging.debug("Eval list: {}".format(eval_list))
logging.debug("Eval list len: %d", len(eval_list))
for eval_item in eval_list:
num += eval_item.evaluator_certainty * eval_item.evaluator_score
den += eval_item.evaluator_certainty
eval_count += 1
if den != 0:
msg_trust = num / den
else:
for eval_item in eval_list:
# Aggregate additively
msg_trust += eval_item.evaluator_certainty * eval_item.evaluator_score
#TODO add option for +/- message aggregation
save_agg_msg_eval(persp_id, idx, det_idx, msg_trust)
def save_agg_msg_eval(persp_id, idx, det_idx, msg_trust):
# Fill the array to write
msg_trust_output = np.zeros([1, 3])
msg_trust_output[0,0] = persp_id
msg_trust_output[0,1] = det_idx
msg_trust_output[0,2] = msg_trust
logging.debug("Saving msg trust agg val to id: %d at det_idx: %d for idx: %d with trust: %d", persp_id, det_idx, idx, msg_trust)
# Save to text file
file_path = os.path.join(os.path.join(p_utils.get_folder(persp_id), cfg.AGG_MSG_EVALS_SUBDIR), '{:06d}.txt'.format(idx))
logging.debug("Writing msg evals to file: %s", file_path)
std_utils.make_dir(file_path)
with open(file_path, 'a+') as f:
np.savetxt(f, msg_trust_output,
newline='\r\n', fmt='%i %i %f')
# Loads all aggregated msg evals into a dictionary
# Key 1: persp_id
# Key 2: det_idx
def load_agg_msg_evals(idx):
# Then for all the alternate perspectives
msg_evals_dict = {}
msg_evals_dict[const.ego_id()] = load_agg_msg_evals_from_persp(cfg.DATASET_DIR, idx)
for entity_str in const.valid_perspectives():
perspect_dir = os.path.join(cfg.ALT_PERSP_DIR, entity_str)
msg_evals_dict[int(entity_str)] = load_agg_msg_evals_from_persp(perspect_dir, idx)
return msg_evals_dict
def load_agg_msg_evals_from_persp(persp_dir, idx):
# Define the list
msg_evals_dict = {}
if idx < 0:
return []
filepath = persp_dir + '/' + cfg.AGG_MSG_EVALS_SUBDIR + '/{:06d}.txt'.format(idx)
if not os.path.isfile(filepath):
return []
# Extract the list
if os.stat(filepath).st_size == 0:
return []
logging.debug("Loading agg from: %s", filepath)
p = np.loadtxt(filepath, delimiter=' ',
dtype=str,
usecols=np.arange(start=0, step=1, stop=3))
# Check if the output is single dimensional or multi dimensional
if len(p.shape) > 1:
label_num = p.shape[0]
else:
label_num = 1
for idx in np.arange(label_num):
msg_eval = trust_utils.AggregatedMessageEvaluation()
if label_num > 1:
msg_eval.persp_id = int(p[idx,0])
msg_eval.det_idx = int(p[idx,1])
msg_eval.msg_trust = float(p[idx,2])
msg_evals_dict[int(p[idx,1])] = msg_eval.msg_trust
else:
msg_eval.persp_id = int(p[0])
msg_eval.det_idx = int(p[1])
msg_eval.msg_trust = float(p[2])
msg_evals_dict[int(p[1])] = msg_eval.msg_trust
logging.debug("Inserting key: %d", msg_eval.det_idx)
return msg_evals_dict
# Function for outputting objects for visualization tests
def save_filtered_objs(gt_objs, idx, out_dir):
out_file = out_dir + '/{:06d}.txt'.format(idx)
with open(out_file, 'a+') as f:
if gt_objs is None:
return
for obj in gt_objs:
kitti_text_3d = '{} {} {} {} {:d} {:d} {:d} {:d} {} {} {} {} {} {} {}'.format(obj.type,
obj.truncation, obj.occlusion, obj.alpha, int(obj.x1), int(obj.y1), int(obj.x2),
int(obj.y2), obj.h, obj.w, obj.l, obj.t[0], obj.t[1], obj.t[2], obj.ry)
f.write('%s\n' % kitti_text_3d) | [
"os.path.isfile",
"perspective_utils.get_folder",
"numpy.arange",
"std_utils.delete_all_subdirs",
"constants.ego_id",
"os.path.join",
"perspective_utils.get_all_detections",
"trust_utils.AggregatedMessageEvaluation",
"std_utils.make_dir",
"trust_utils.get_message_trust_values",
"numpy.savetxt",
... | [((521, 571), 'std_utils.delete_all_subdirs', 'std_utils.delete_all_subdirs', (['cfg.MSG_EVALS_SUBDIR'], {}), '(cfg.MSG_EVALS_SUBDIR)\n', (549, 571), False, 'import std_utils\n'), ((733, 759), 'constants.valid_perspectives', 'const.valid_perspectives', ([], {}), '()\n', (757, 759), True, 'import constants as const\n'), ((973, 1027), 'std_utils.delete_all_subdirs', 'std_utils.delete_all_subdirs', (['cfg.AGG_MSG_EVALS_SUBDIR'], {}), '(cfg.AGG_MSG_EVALS_SUBDIR)\n', (1001, 1027), False, 'import std_utils\n'), ((1193, 1219), 'constants.valid_perspectives', 'const.valid_perspectives', ([], {}), '()\n', (1217, 1219), True, 'import constants as const\n'), ((1460, 1551), 'logging.info', 'logging.info', (['"""**********************************************************************"""'], {}), "(\n '**********************************************************************')\n", (1472, 1551), False, 'import logging\n'), ((1551, 1618), 'logging.info', 'logging.info', (['"""Computing evaluations for perspective: %d"""', 'persp_id'], {}), "('Computing evaluations for perspective: %d', persp_id)\n", (1563, 1618), False, 'import logging\n'), ((1767, 1787), 'os.listdir', 'os.listdir', (['velo_dir'], {}), '(velo_dir)\n', (1777, 1787), False, 'import os\n'), ((3979, 4058), 'logging.debug', 'logging.debug', (['"""!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!Save msg evals in trust"""'], {}), "('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!Save msg evals in trust')\n", (3992, 4058), False, 'import logging\n'), ((6146, 6166), 'numpy.arange', 'np.arange', (['label_num'], {}), '(label_num)\n', (6155, 6166), True, 'import numpy as np\n'), ((7015, 7106), 'logging.info', 'logging.info', (['"""**********************************************************************"""'], {}), "(\n '**********************************************************************')\n", (7027, 7106), False, 'import logging\n'), ((7106, 7179), 'logging.info', 'logging.info', (['"""Aggregating msg evaluations for perspective: %d"""', 'persp_id'], {}), "('Aggregating msg evaluations for perspective: %d', persp_id)\n", (7118, 7179), False, 'import logging\n'), ((7274, 7294), 'os.listdir', 'os.listdir', (['velo_dir'], {}), '(velo_dir)\n', (7284, 7294), False, 'import os\n'), ((9084, 9100), 'numpy.zeros', 'np.zeros', (['[1, 3]'], {}), '([1, 3])\n', (9092, 9100), True, 'import numpy as np\n'), ((9217, 9355), 'logging.debug', 'logging.debug', (['"""Saving msg trust agg val to id: %d at det_idx: %d for idx: %d with trust: %d"""', 'persp_id', 'det_idx', 'idx', 'msg_trust'], {}), "(\n 'Saving msg trust agg val to id: %d at det_idx: %d for idx: %d with trust: %d'\n , persp_id, det_idx, idx, msg_trust)\n", (9230, 9355), False, 'import logging\n'), ((9499, 9556), 'logging.debug', 'logging.debug', (['"""Writing msg evals to file: %s"""', 'file_path'], {}), "('Writing msg evals to file: %s', file_path)\n", (9512, 9556), False, 'import logging\n'), ((9561, 9590), 'std_utils.make_dir', 'std_utils.make_dir', (['file_path'], {}), '(file_path)\n', (9579, 9590), False, 'import std_utils\n'), ((10017, 10043), 'constants.valid_perspectives', 'const.valid_perspectives', ([], {}), '()\n', (10041, 10043), True, 'import constants as const\n'), ((10591, 10638), 'logging.debug', 'logging.debug', (['"""Loading agg from: %s"""', 'filepath'], {}), "('Loading agg from: %s', filepath)\n", (10604, 10638), False, 'import logging\n'), ((10950, 10970), 'numpy.arange', 'np.arange', (['label_num'], {}), '(label_num)\n', (10959, 10970), True, 'import numpy as np\n'), ((648, 662), 'constants.ego_id', 'const.ego_id', ([], {}), '()\n', (660, 662), True, 'import constants as const\n'), ((784, 827), 'os.path.join', 'os.path.join', (['cfg.ALT_PERSP_DIR', 'entity_str'], {}), '(cfg.ALT_PERSP_DIR, entity_str)\n', (796, 827), False, 'import os\n'), ((1108, 1122), 'constants.ego_id', 'const.ego_id', ([], {}), '()\n', (1120, 1122), True, 'import constants as const\n'), ((1244, 1287), 'os.path.join', 'os.path.join', (['cfg.ALT_PERSP_DIR', 'entity_str'], {}), '(cfg.ALT_PERSP_DIR, entity_str)\n', (1256, 1287), False, 'import os\n'), ((1984, 2049), 'logging.debug', 'logging.debug', (['"""**********************************Index: %d"""', 'idx'], {}), "('**********************************Index: %d', idx)\n", (1997, 2049), False, 'import logging\n'), ((2315, 2403), 'perspective_utils.get_all_detections', 'p_utils.get_all_detections', (['idx', 'persp_id'], {'results': 'cfg.USE_RESULTS', 'filter_area': '(True)'}), '(idx, persp_id, results=cfg.USE_RESULTS,\n filter_area=True)\n', (2341, 2403), True, 'import perspective_utils as p_utils\n'), ((2573, 2645), 'matching_utils.match_iou3ds', 'matching_utils.match_iou3ds', (['perspect_trust_objs'], {'only_ego_matches': '(False)'}), '(perspect_trust_objs, only_ego_matches=False)\n', (2600, 2645), False, 'import matching_utils\n'), ((3427, 3512), 'trust_utils.get_message_trust_values', 'trust_utils.get_message_trust_values', (['matching_objs', 'perspect_dir', 'persp_id', 'idx'], {}), '(matching_objs, perspect_dir, persp_id, idx\n )\n', (3463, 3512), False, 'import trust_utils\n'), ((4094, 4129), 'logging.debug', 'logging.debug', (['"""Msg trusts is none"""'], {}), "('Msg trusts is none')\n", (4107, 4129), False, 'import logging\n'), ((4190, 4241), 'logging.debug', 'logging.debug', (['"""Outputting list of matched objects"""'], {}), "('Outputting list of matched objects')\n", (4203, 4241), False, 'import logging\n'), ((5709, 5733), 'os.path.isfile', 'os.path.isfile', (['filepath'], {}), '(filepath)\n', (5723, 5733), False, 'import os\n'), ((6188, 6219), 'trust_utils.MessageEvaluation', 'trust_utils.MessageEvaluation', ([], {}), '()\n', (6217, 6219), False, 'import trust_utils\n'), ((7491, 7556), 'logging.debug', 'logging.debug', (['"""**********************************Index: %d"""', 'idx'], {}), "('**********************************Index: %d', idx)\n", (7504, 7556), False, 'import logging\n'), ((7992, 8017), 'logging.debug', 'logging.debug', (['eval_lists'], {}), '(eval_lists)\n', (8005, 8017), False, 'import logging\n'), ((9636, 9699), 'numpy.savetxt', 'np.savetxt', (['f', 'msg_trust_output'], {'newline': "'\\r\\n'", 'fmt': '"""%i %i %f"""'}), "(f, msg_trust_output, newline='\\r\\n', fmt='%i %i %f')\n", (9646, 9699), True, 'import numpy as np\n'), ((9925, 9939), 'constants.ego_id', 'const.ego_id', ([], {}), '()\n', (9937, 9939), True, 'import constants as const\n'), ((10068, 10111), 'os.path.join', 'os.path.join', (['cfg.ALT_PERSP_DIR', 'entity_str'], {}), '(cfg.ALT_PERSP_DIR, entity_str)\n', (10080, 10111), False, 'import os\n'), ((10461, 10485), 'os.path.isfile', 'os.path.isfile', (['filepath'], {}), '(filepath)\n', (10475, 10485), False, 'import os\n'), ((10991, 11032), 'trust_utils.AggregatedMessageEvaluation', 'trust_utils.AggregatedMessageEvaluation', ([], {}), '()\n', (11030, 11032), False, 'import trust_utils\n'), ((11472, 11524), 'logging.debug', 'logging.debug', (['"""Inserting key: %d"""', 'msg_eval.det_idx'], {}), "('Inserting key: %d', msg_eval.det_idx)\n", (11485, 11524), False, 'import logging\n'), ((2744, 2835), 'tools.visualization.vis_matches.visualize_matches', 'vis_matches.visualize_matches', (['matching_objs', 'idx', 'cfg.USE_RESULTS', 'alt_persp', 'persp_id'], {}), '(matching_objs, idx, cfg.USE_RESULTS,\n alt_persp, persp_id)\n', (2773, 2835), False, 'from tools.visualization import vis_matches\n'), ((3673, 3786), 'tools.visualization.vis_matches.visualize_matches', 'vis_matches.visualize_matches', (['matching_objs', 'idx', 'cfg.USE_RESULTS', 'alt_persp', 'persp_id'], {'vis_eval_scores': '(True)'}), '(matching_objs, idx, cfg.USE_RESULTS,\n alt_persp, persp_id, vis_eval_scores=True)\n', (3702, 3786), False, 'from tools.visualization import vis_matches\n'), ((4584, 4600), 'numpy.zeros', 'np.zeros', (['[1, 6]'], {}), '([1, 6])\n', (4592, 4600), True, 'import numpy as np\n'), ((4976, 5082), 'logging.debug', 'logging.debug', (['"""********************Saving trust val to id: %d at idx: %d"""', 'trust_obj.detector_id', 'idx'], {}), "('********************Saving trust val to id: %d at idx: %d',\n trust_obj.detector_id, idx)\n", (4989, 5082), False, 'import logging\n'), ((5241, 5298), 'logging.debug', 'logging.debug', (['"""Writing msg evals to file: %s"""', 'file_path'], {}), "('Writing msg evals to file: %s', file_path)\n", (5254, 5298), False, 'import logging\n'), ((5311, 5340), 'std_utils.make_dir', 'std_utils.make_dir', (['file_path'], {}), '(file_path)\n', (5329, 5340), False, 'import std_utils\n'), ((5784, 5801), 'os.stat', 'os.stat', (['filepath'], {}), '(filepath)\n', (5791, 5801), False, 'import os\n'), ((5936, 5970), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'step': '(1)', 'stop': '(6)'}), '(start=0, step=1, stop=6)\n', (5945, 5970), True, 'import numpy as np\n'), ((9412, 9440), 'perspective_utils.get_folder', 'p_utils.get_folder', (['persp_id'], {}), '(persp_id)\n', (9430, 9440), True, 'import perspective_utils as p_utils\n'), ((10536, 10553), 'os.stat', 'os.stat', (['filepath'], {}), '(filepath)\n', (10543, 10553), False, 'import os\n'), ((10740, 10774), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'step': '(1)', 'stop': '(3)'}), '(start=0, step=1, stop=3)\n', (10749, 10774), True, 'import numpy as np\n'), ((1876, 1898), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (1892, 1898), False, 'import os\n'), ((2717, 2731), 'constants.ego_id', 'const.ego_id', ([], {}), '()\n', (2729, 2731), True, 'import constants as const\n'), ((3646, 3660), 'constants.ego_id', 'const.ego_id', ([], {}), '()\n', (3658, 3660), True, 'import constants as const\n'), ((4437, 4488), 'logging.debug', 'logging.debug', (['"""Not saving eval for own detection."""'], {}), "('Not saving eval for own detection.')\n", (4450, 4488), False, 'import logging\n'), ((5135, 5176), 'perspective_utils.get_folder', 'p_utils.get_folder', (['trust_obj.detector_id'], {}), '(trust_obj.detector_id)\n', (5153, 5176), True, 'import perspective_utils as p_utils\n'), ((5402, 5474), 'numpy.savetxt', 'np.savetxt', (['f', 'msg_trust_output'], {'newline': "'\\r\\n'", 'fmt': '"""%i %f %f %i %f %f"""'}), "(f, msg_trust_output, newline='\\r\\n', fmt='%i %f %f %i %f %f')\n", (5412, 5474), True, 'import numpy as np\n'), ((7383, 7405), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (7399, 7405), False, 'import os\n'), ((8190, 8227), 'logging.debug', 'logging.debug', (['"""det_idx: %d"""', 'det_idx'], {}), "('det_idx: %d', det_idx)\n", (8203, 8227), False, 'import logging\n')] |
import PIL.Image
import dnnlib
import dnnlib.tflib as tflib
import tensorflow as tf
import PIL.ImageFile
import scipy.ndimage
import numpy as np
import PIL.Image
import dnnlib
import dnnlib.tflib as tflib
import os
import re
import sys
sys.path.append(".")
sys.path.append("..")
import pretrained_networks
def Align_face_image(src_file, output_size=1024, transform_size=4096,
enable_padding=True):
print('aligning image...')
import dlib
img_ = dlib.load_rgb_image(src_file)
print("Image Shape :", img_.shape)
frontal_face = dlib.cnn_face_detection_model_v1("mmod_human_face_detector.dat") # cnn model
shape_ = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat") # same as ffhq dataset
dets = frontal_face(img_, 1)
for i, d in enumerate(dets):
print("Detection {}: Left: {} Top: {} Right: {} Bottom: {} Confidence: {}".format(i, d.rect.left(),
d.rect.top(),
d.rect.right(),
d.rect.bottom(),
d.confidence))
shape = shape_(img_, d.rect)
print("Part 0: {}, Part 1: {} ...".format(shape.part(0).x, shape.part(1)))
# Parse landmarks.
# pylint: disable=unused-variable
lm_chin = np.array([[shape.part(i).x, shape.part(i).y] for i in range(17)])
lm_eyebrow_left = np.array([[shape.part(i).x, shape.part(i).y] for i in range(17, 22)])
lm_eyebrow_right = np.array([[shape.part(i).x, shape.part(i).y] for i in range(22, 27)])
lm_nose = np.array([[shape.part(i).x, shape.part(i).y] for i in range(27, 31)])
lm_nostrils = np.array([[shape.part(i).x, shape.part(i).y] for i in range(31, 36)])
lm_eye_left = np.array([[shape.part(i).x, shape.part(i).y] for i in range(36, 42)])
lm_eye_right = np.array([[shape.part(i).x, shape.part(i).y] for i in range(42, 48)])
lm_mouth_outer = np.array([[shape.part(i).x, shape.part(i).y] for i in range(48, 60)])
lm_mouth_inner = np.array([[shape.part(i).x, shape.part(i).y] for i in range(60, 68)])
# Calculate auxiliary vectors.
eye_left = np.mean(lm_eye_left, axis=0)
eye_right = np.mean(lm_eye_right, axis=0)
eye_avg = (eye_left + eye_right) * 0.5
eye_to_eye = eye_right - eye_left
mouth_left = lm_mouth_outer[0]
mouth_right = lm_mouth_outer[6]
mouth_avg = (mouth_left + mouth_right) * 0.5
eye_to_mouth = mouth_avg - eye_avg
# Choose oriented crop rectangle.
x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
x /= np.hypot(*x)
x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
y = np.flipud(x) * [-1, 1]
c = eye_avg + eye_to_mouth * 0.1
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
qsize = np.hypot(*x) * 2
# Load in-the-wild image.
if not os.path.isfile(src_file):
print('\nCannot find source image. Please run "--wilds" before "--align".')
return
img = PIL.Image.open(src_file)
# Shrink.
shrink = int(np.floor(qsize / output_size * 0.5))
if shrink > 1:
rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
img = img.resize(rsize, PIL.Image.ANTIALIAS)
quad /= shrink
qsize /= shrink
# Crop.
border = max(int(np.rint(qsize * 0.1)), 3)
crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
int(np.ceil(max(quad[:, 1]))))
crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]),
min(crop[3] + border, img.size[1]))
if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
img = img.crop(crop)
quad -= crop[0:2]
# Pad.
pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
int(np.ceil(max(quad[:, 1]))))
pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0),
max(pad[3] - img.size[1] + border, 0))
if enable_padding and max(pad) > border - 4:
pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
h, w, _ = img.shape
y, x, _ = np.ogrid[:h, :w, :1]
mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]),
1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3]))
blur = qsize * 0.02
img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0)
img = PIL.Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB')
quad += pad[:2]
# Transform.
img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(),
PIL.Image.BILINEAR)
if output_size < transform_size:
img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS)
img.save(src_file)
def gram_matrix(input_tensor):
# We make the image channels first
channels = int(input_tensor.shape[-1])
a = tf.reshape(input_tensor, [-1, channels])
n = tf.shape(a)[0]
gram = tf.matmul(a, a, transpose_a=True)
return gram / tf.cast(n, tf.float32)
def get_style_loss(base_style, gram_target):
"""Expects two images of dimension h, w, c"""
# height, width, num filters of each laye
base_style = tf.reshape(base_style, [base_style.shape[1], base_style.shape[2], base_style.shape[3]])
height, width, channels = base_style.get_shape().as_list()
gram_style = gram_matrix(base_style)
return tf.reduce_mean(tf.square(gram_style - gram_target))
#----------------------------------------------------------------------------
def generate_im_official(network_pkl='gdrive:networks/stylegan2-ffhq-config-f.pkl', seeds=[22], truncation_psi=0.5):
print('Loading networks from "%s"...' % network_pkl)
_G, _D, Gs = pretrained_networks.load_networks(network_pkl)
noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]
Gs_kwargs = dnnlib.EasyDict()
Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
Gs_kwargs.randomize_noise = False
if truncation_psi is not None:
Gs_kwargs.truncation_psi = truncation_psi
for seed_idx, seed in enumerate(seeds):
print('Generating image for seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))
rnd = np.random.RandomState(seed)
z = rnd.randn(1, *Gs.input_shape[1:]) # [minibatch, component]
tflib.set_vars({var: rnd.randn(*var.shape.as_list()) for var in noise_vars}) # [height, width]
images = Gs.run(z, None, **Gs_kwargs) # [minibatch, height, width, channel]
PIL.Image.fromarray(images[0], 'RGB').save(dnnlib.make_run_dir_path('seed%04d.png' % seed))
def generate_im_from_random_seed(Gs, seed=22, truncation_psi=0.5):
seeds = [seed]
noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]
Gs_kwargs = dnnlib.EasyDict()
Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
Gs_kwargs.randomize_noise = False
if truncation_psi is not None:
Gs_kwargs.truncation_psi = truncation_psi
for seed_idx, seed in enumerate(seeds):
print('Generating image for seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))
rnd = np.random.RandomState(seed)
z = rnd.randn(1, *Gs.input_shape[1:]) # [minibatch, component]
tflib.set_vars({var: rnd.randn(*var.shape.as_list()) for var in noise_vars}) # [height, width]
images = Gs.run(z, None, **Gs_kwargs) # [minibatch, height, width, channel]
# PIL.Image.fromarray(images[0], 'RGB').save(dnnlib.make_run_dir_path('seed%04d.png' % seed))
return images
class Build_model:
def __init__(self, network_pkl):
print('Loading networks from "%s"...' % network_pkl)
_G, _D, Gs = pretrained_networks.load_networks(network_pkl)
print('loaded')
self.Gs = Gs
self.Gs_syn_kwargs = dnnlib.EasyDict()
self.Gs_syn_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
self.Gs_syn_kwargs.randomize_noise = False
self.Gs_syn_kwargs.minibatch_size = 4
self.noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]
rnd = np.random.RandomState(0)
tflib.set_vars({var: rnd.randn(*var.shape.as_list()) for var in self.noise_vars})
def generate_im_from_random_seed(self, seed=22, truncation_psi=0.5):
Gs = self.Gs
seeds = [seed]
noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]
Gs_kwargs = dnnlib.EasyDict()
Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
Gs_kwargs.randomize_noise = False
if truncation_psi is not None:
Gs_kwargs.truncation_psi = truncation_psi
for seed_idx, seed in enumerate(seeds):
print('Generating image for seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))
rnd = np.random.RandomState(seed)
z = rnd.randn(1, *Gs.input_shape[1:]) # [minibatch, component]
tflib.set_vars({var: rnd.randn(*var.shape.as_list()) for var in noise_vars}) # [height, width]
images = Gs.run(z, None, **Gs_kwargs) # [minibatch, height, width, channel]
# PIL.Image.fromarray(images[0], 'RGB').save(dnnlib.make_run_dir_path('seed%04d.png' % seed))
return images
def generate_im_from_z_space(self, z, truncation_psi=0.5):
Gs = self.Gs
Gs_kwargs = dnnlib.EasyDict()
Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
Gs_kwargs.randomize_noise = False
if truncation_psi is not None:
Gs_kwargs.truncation_psi = truncation_psi # [height, width]
images = Gs.run(z, None, **Gs_kwargs)
# PIL.Image.fromarray(images[0], 'RGB').save(dnnlib.make_run_dir_path('test_from_z.png'))
return images
def generate_im_from_w_space(self, w):
images = self.Gs.components.synthesis.run(w, **self.Gs_syn_kwargs)
# PIL.Image.fromarray(images[0], 'RGB').save(dnnlib.make_run_dir_path('test_from_w.png'))
return images
# def load_network(random_weights=False):
# URL_FFHQ = 'https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ'
# tflib.init_tf()
#
# with dnnlib.util.open_url(URL_FFHQ, cache_dir=config.cache_dir) as f:
# G, D, Gs = pickle.load(f)
# if random_weights:
# Gs.reset_vars()
# return Gs
if __name__ == "__main__":
Our_model = Build_model()
# Our_model.generate_im_from_random_seed(10)
# Our_model.generate_im_from_random_seed(50)
rnd = np.random.RandomState(10)
# z = rnd.randn(1, *Our_model.Gs.input_shape[1:])
z = rnd.randn(2, 512)
w = Our_model.Gs.components.mapping.run(z, None)
w_avg = Our_model.Gs.get_var('dlatent_avg')
w = w_avg + (w - w_avg) * 0.5
Our_model.generate_im_from_w_space(w)
| [
"dlib.load_rgb_image",
"tensorflow.reshape",
"dnnlib.EasyDict",
"numpy.floor",
"numpy.clip",
"pretrained_networks.load_networks",
"tensorflow.matmul",
"os.path.isfile",
"numpy.mean",
"dlib.cnn_face_detection_model_v1",
"dlib.shape_predictor",
"sys.path.append",
"numpy.random.RandomState",
... | [((241, 261), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (256, 261), False, 'import sys\n'), ((262, 283), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (277, 283), False, 'import sys\n'), ((484, 513), 'dlib.load_rgb_image', 'dlib.load_rgb_image', (['src_file'], {}), '(src_file)\n', (503, 513), False, 'import dlib\n'), ((573, 637), 'dlib.cnn_face_detection_model_v1', 'dlib.cnn_face_detection_model_v1', (['"""mmod_human_face_detector.dat"""'], {}), "('mmod_human_face_detector.dat')\n", (605, 637), False, 'import dlib\n'), ((664, 725), 'dlib.shape_predictor', 'dlib.shape_predictor', (['"""shape_predictor_68_face_landmarks.dat"""'], {}), "('shape_predictor_68_face_landmarks.dat')\n", (684, 725), False, 'import dlib\n'), ((5834, 5874), 'tensorflow.reshape', 'tf.reshape', (['input_tensor', '[-1, channels]'], {}), '(input_tensor, [-1, channels])\n', (5844, 5874), True, 'import tensorflow as tf\n'), ((5909, 5942), 'tensorflow.matmul', 'tf.matmul', (['a', 'a'], {'transpose_a': '(True)'}), '(a, a, transpose_a=True)\n', (5918, 5942), True, 'import tensorflow as tf\n'), ((6143, 6234), 'tensorflow.reshape', 'tf.reshape', (['base_style', '[base_style.shape[1], base_style.shape[2], base_style.shape[3]]'], {}), '(base_style, [base_style.shape[1], base_style.shape[2],\n base_style.shape[3]])\n', (6153, 6234), True, 'import tensorflow as tf\n'), ((6673, 6719), 'pretrained_networks.load_networks', 'pretrained_networks.load_networks', (['network_pkl'], {}), '(network_pkl)\n', (6706, 6719), False, 'import pretrained_networks\n'), ((6842, 6859), 'dnnlib.EasyDict', 'dnnlib.EasyDict', ([], {}), '()\n', (6857, 6859), False, 'import dnnlib\n'), ((7824, 7841), 'dnnlib.EasyDict', 'dnnlib.EasyDict', ([], {}), '()\n', (7839, 7841), False, 'import dnnlib\n'), ((11728, 11753), 'numpy.random.RandomState', 'np.random.RandomState', (['(10)'], {}), '(10)\n', (11749, 11753), True, 'import numpy as np\n'), ((2428, 2456), 'numpy.mean', 'np.mean', (['lm_eye_left'], {'axis': '(0)'}), '(lm_eye_left, axis=0)\n', (2435, 2456), True, 'import numpy as np\n'), ((2477, 2506), 'numpy.mean', 'np.mean', (['lm_eye_right'], {'axis': '(0)'}), '(lm_eye_right, axis=0)\n', (2484, 2506), True, 'import numpy as np\n'), ((2886, 2898), 'numpy.hypot', 'np.hypot', (['*x'], {}), '(*x)\n', (2894, 2898), True, 'import numpy as np\n'), ((3067, 3121), 'numpy.stack', 'np.stack', (['[c - x - y, c - x + y, c + x + y, c + x - y]'], {}), '([c - x - y, c - x + y, c + x + y, c + x - y])\n', (3075, 3121), True, 'import numpy as np\n'), ((5883, 5894), 'tensorflow.shape', 'tf.shape', (['a'], {}), '(a)\n', (5891, 5894), True, 'import tensorflow as tf\n'), ((5961, 5983), 'tensorflow.cast', 'tf.cast', (['n', 'tf.float32'], {}), '(n, tf.float32)\n', (5968, 5983), True, 'import tensorflow as tf\n'), ((6362, 6397), 'tensorflow.square', 'tf.square', (['(gram_style - gram_target)'], {}), '(gram_style - gram_target)\n', (6371, 6397), True, 'import tensorflow as tf\n'), ((7224, 7251), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (7245, 7251), True, 'import numpy as np\n'), ((8206, 8233), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (8227, 8233), True, 'import numpy as np\n'), ((8756, 8802), 'pretrained_networks.load_networks', 'pretrained_networks.load_networks', (['network_pkl'], {}), '(network_pkl)\n', (8789, 8802), False, 'import pretrained_networks\n'), ((8877, 8894), 'dnnlib.EasyDict', 'dnnlib.EasyDict', ([], {}), '()\n', (8892, 8894), False, 'import dnnlib\n'), ((9226, 9250), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (9247, 9250), True, 'import numpy as np\n'), ((9591, 9608), 'dnnlib.EasyDict', 'dnnlib.EasyDict', ([], {}), '()\n', (9606, 9608), False, 'import dnnlib\n'), ((10537, 10554), 'dnnlib.EasyDict', 'dnnlib.EasyDict', ([], {}), '()\n', (10552, 10554), False, 'import dnnlib\n'), ((2988, 3000), 'numpy.flipud', 'np.flipud', (['x'], {}), '(x)\n', (2997, 3000), True, 'import numpy as np\n'), ((3138, 3150), 'numpy.hypot', 'np.hypot', (['*x'], {}), '(*x)\n', (3146, 3150), True, 'import numpy as np\n'), ((3207, 3231), 'os.path.isfile', 'os.path.isfile', (['src_file'], {}), '(src_file)\n', (3221, 3231), False, 'import os\n'), ((3419, 3454), 'numpy.floor', 'np.floor', (['(qsize / output_size * 0.5)'], {}), '(qsize / output_size * 0.5)\n', (3427, 3454), True, 'import numpy as np\n'), ((7561, 7608), 'dnnlib.make_run_dir_path', 'dnnlib.make_run_dir_path', (["('seed%04d.png' % seed)"], {}), "('seed%04d.png' % seed)\n", (7585, 7608), False, 'import dnnlib\n'), ((10001, 10028), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (10022, 10028), True, 'import numpy as np\n'), ((2839, 2862), 'numpy.flipud', 'np.flipud', (['eye_to_mouth'], {}), '(eye_to_mouth)\n', (2848, 2862), True, 'import numpy as np\n'), ((2916, 2937), 'numpy.hypot', 'np.hypot', (['*eye_to_eye'], {}), '(*eye_to_eye)\n', (2924, 2937), True, 'import numpy as np\n'), ((2945, 2968), 'numpy.hypot', 'np.hypot', (['*eye_to_mouth'], {}), '(*eye_to_mouth)\n', (2953, 2968), True, 'import numpy as np\n'), ((3740, 3760), 'numpy.rint', 'np.rint', (['(qsize * 0.1)'], {}), '(qsize * 0.1)\n', (3747, 3760), True, 'import numpy as np\n'), ((4693, 4708), 'numpy.float32', 'np.float32', (['img'], {}), '(img)\n', (4703, 4708), True, 'import numpy as np\n'), ((5163, 5198), 'numpy.clip', 'np.clip', (['(mask * 3.0 + 1.0)', '(0.0)', '(1.0)'], {}), '(mask * 3.0 + 1.0, 0.0, 1.0)\n', (5170, 5198), True, 'import numpy as np\n'), ((5256, 5279), 'numpy.clip', 'np.clip', (['mask', '(0.0)', '(1.0)'], {}), '(mask, 0.0, 1.0)\n', (5263, 5279), True, 'import numpy as np\n'), ((4645, 4665), 'numpy.rint', 'np.rint', (['(qsize * 0.3)'], {}), '(qsize * 0.3)\n', (4652, 4665), True, 'import numpy as np\n'), ((5219, 5246), 'numpy.median', 'np.median', (['img'], {'axis': '(0, 1)'}), '(img, axis=(0, 1))\n', (5228, 5246), True, 'import numpy as np\n'), ((5335, 5347), 'numpy.rint', 'np.rint', (['img'], {}), '(img)\n', (5342, 5347), True, 'import numpy as np\n'), ((4889, 4902), 'numpy.float32', 'np.float32', (['x'], {}), '(x)\n', (4899, 4902), True, 'import numpy as np\n'), ((4913, 4934), 'numpy.float32', 'np.float32', (['(w - 1 - x)'], {}), '(w - 1 - x)\n', (4923, 4934), True, 'import numpy as np\n'), ((4993, 5006), 'numpy.float32', 'np.float32', (['y'], {}), '(y)\n', (5003, 5006), True, 'import numpy as np\n'), ((5017, 5038), 'numpy.float32', 'np.float32', (['(h - 1 - y)'], {}), '(h - 1 - y)\n', (5027, 5038), True, 'import numpy as np\n')] |
'''Numba tools
This is a colection of functions used for numba functions
that work for targets cpu as well as cuda
'''
from __future__ import print_function
__all__ = ['myjit',
'conjugate_transpose',
'conjugate',
'matrix_dot_matrix',
'matrix_dot_vector',
'clear_matrix',
'copy_matrix',
'cuda',
'ctype',
'ftype',
'WHERE',
]
__version__ = '0.1'
__author__ = '<NAME> (<EMAIL>)'
import numpy as np
import inspect
from numba import jit, float64, complex64, int32, float32, complex128, guvectorize
import math, cmath
from pisa import FTYPE, TARGET
if TARGET is None:
raise NotImplementedError(
'Numba not supported.'
)
# the `WHERE` variable is for usage with smart arrays
if TARGET == 'cuda':
from numba import cuda
if FTYPE == np.float64:
ctype = complex128
ftype = float64
elif FTYPE == np.float32:
ctype = complex64
ftype = float32
WHERE='gpu'
else:
if FTYPE == np.float64:
ctype = np.complex128
ftype = np.float64
elif FTYPE == np.float32:
ctype = np.complex64
ftype = np.float32
cuda = lambda: None
cuda.jit = lambda x: x
WHERE='host'
def myjit(f):
'''
f : function
Decorator to assign the right jit for different targets
In case of non-cuda targets, all instances of `cuda.local.array`
are replaced by `np.empty`. This is a dirty fix, hopefully in the
near future numba will support numpy array allocation and this will
not be necessary anymore
'''
if TARGET == 'cuda':
return cuda.jit(f, device=True)
else:
source = inspect.getsource(f).splitlines()
assert '@myjit' in source[0]
source = '\n'.join(source[1:]) + '\n'
source = source.replace('cuda.local.array', 'np.empty')
exec(source)
fun = eval(f.__name__)
newfun = jit(fun, nopython=True)
# needs to be exported to globals
globals()[f.__name__] = newfun
return newfun
@myjit
def conjugate_transpose(A, B):
'''
A : 2d array
B : 2d array
B is the conjugate transpose of A
'''
for i in range(A.shape[0]):
for j in range(A.shape[1]):
B[i,j] = A[j,i].conjugate()
@myjit
def conjugate(A, B):
'''
A : 2d array
B : 2d array
B is the conjugate of A
'''
for i in range(A.shape[0]):
for j in range(A.shape[1]):
B[i,j] = A[i,j].conjugate()
@myjit
def matrix_dot_matrix(A, B, C):
'''
dot-product of two 2d arrays
C = A * B
'''
for j in range(B.shape[1]):
for i in range(A.shape[0]):
C[i,j] = 0.
for n in range(C.shape[0]):
C[i,j] += A[i,n] * B[n,j]
def test_matrix_dot_matrix():
A = np.linspace(1., 8., 9).reshape(3,3)
B = np.linspace(1., 8., 9).reshape(3,3)
C = np.zeros((3,3))
matrix_dot_matrix(A, B, C)
assert np.array_equal(C, np.dot(A, B))
@myjit
def matrix_dot_vector(A, v, w):
'''
dot-product of a 2d array and a vector
w = A * v
'''
for i in range(A.shape[0]):
w[i] = 0.
for j in range(A.shape[1]):
w[i] += A[i,j] * v[j]
def test_matrix_dot_vector():
A = np.linspace(1., 8., 9).reshape(3,3)
v = np.linspace(1., 3., 3)
w = np.zeros((3))
matrix_dot_vector(A, v, w)
assert np.array_equal(w, np.dot(A, v))
@myjit
def clear_matrix(A):
'''
clear out 2d array
'''
for i in range(A.shape[0]):
for j in range(A.shape[1]):
A[i,j] = 0.
def test_clear_matrix():
A = np.ones((3,3))
clear_matrix(A)
assert np.array_equal(A, np.zeros((3,3)))
@myjit
def copy_matrix(A, B):
'''
copy elemnts of 2d array A to array B
'''
for i in range(A.shape[0]):
for j in range(A.shape[1]):
B[i,j] = A[i,j]
def test_copy_matrix():
A = np.ones((3,3))
B = np.zeros((3,3))
copy_matrix(A, B)
assert np.array_equal(A, B)
if __name__=='__main__':
assert TARGET == 'cpu', "Cannot test functions on GPU, set PISA_TARGET to 'cpu'"
test_matrix_dot_matrix()
test_matrix_dot_vector()
test_clear_matrix()
test_copy_matrix()
| [
"numpy.zeros",
"numpy.ones",
"numba.jit",
"numba.cuda.jit",
"numpy.linspace",
"numpy.array_equal",
"numpy.dot",
"inspect.getsource"
] | [((2953, 2969), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (2961, 2969), True, 'import numpy as np\n'), ((3359, 3383), 'numpy.linspace', 'np.linspace', (['(1.0)', '(3.0)', '(3)'], {}), '(1.0, 3.0, 3)\n', (3370, 3383), True, 'import numpy as np\n'), ((3390, 3401), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3398, 3401), True, 'import numpy as np\n'), ((3672, 3687), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (3679, 3687), True, 'import numpy as np\n'), ((3971, 3986), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (3978, 3986), True, 'import numpy as np\n'), ((3994, 4010), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (4002, 4010), True, 'import numpy as np\n'), ((4043, 4063), 'numpy.array_equal', 'np.array_equal', (['A', 'B'], {}), '(A, B)\n', (4057, 4063), True, 'import numpy as np\n'), ((1668, 1692), 'numba.cuda.jit', 'cuda.jit', (['f'], {'device': '(True)'}), '(f, device=True)\n', (1676, 1692), False, 'from numba import cuda\n'), ((1970, 1993), 'numba.jit', 'jit', (['fun'], {'nopython': '(True)'}), '(fun, nopython=True)\n', (1973, 1993), False, 'from numba import jit, float64, complex64, int32, float32, complex128, guvectorize\n'), ((3029, 3041), 'numpy.dot', 'np.dot', (['A', 'B'], {}), '(A, B)\n', (3035, 3041), True, 'import numpy as np\n'), ((3464, 3476), 'numpy.dot', 'np.dot', (['A', 'v'], {}), '(A, v)\n', (3470, 3476), True, 'import numpy as np\n'), ((3736, 3752), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (3744, 3752), True, 'import numpy as np\n'), ((2865, 2889), 'numpy.linspace', 'np.linspace', (['(1.0)', '(8.0)', '(9)'], {}), '(1.0, 8.0, 9)\n', (2876, 2889), True, 'import numpy as np\n'), ((2909, 2933), 'numpy.linspace', 'np.linspace', (['(1.0)', '(8.0)', '(9)'], {}), '(1.0, 8.0, 9)\n', (2920, 2933), True, 'import numpy as np\n'), ((3315, 3339), 'numpy.linspace', 'np.linspace', (['(1.0)', '(8.0)', '(9)'], {}), '(1.0, 8.0, 9)\n', (3326, 3339), True, 'import numpy as np\n'), ((1720, 1740), 'inspect.getsource', 'inspect.getsource', (['f'], {}), '(f)\n', (1737, 1740), False, 'import inspect\n')] |
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["integrated_intensity",
"intensity_weighted_velocity",
"intensity_weighted_dispersion",
"peak_pixel",
"quadratic"]
import numpy as np
try:
from scipy.ndimage.filters import gaussian_filter1d, _gaussian_kernel1d
except ImportError:
gaussian_filter1d = None
def _read_mask_path(mask_path, data):
"""Read in the mask and make sure it is the same shape as the data."""
if mask_path is not None:
from astropy.io import fits
extension = mask_path.split('.')[-1].lower()
if extension == 'fits':
mask = np.squeeze(fits.getdata(mask_path))
elif extension == 'npy':
mask = np.load(mask_path)
else:
raise ValueError("Mask must be a `.fits` or `.npy` file.")
if mask.shape != data.shape:
raise ValueError("Mismatch in mask and data shape.")
mask = np.where(np.isfinite(mask), mask, 0.0)
else:
mask = np.ones(data.shape)
return mask.astype('bool')
def _threshold_mask(data, mask, rms=None, threshold=0.0):
"""
Combines the provided mask with a sigma mask.
Args:
data (ndarray): The data cube of intensities or flux densities.
mask (ndarray): User provided boolean mask from ``_read_mask_path``.
rms (Optional[ndarray/float]): Either an array or a single value of the
uncertainty in the image. We assume that the uncertainty is
constant along the spectrum.
threshold (Optional[float]): The level of sigma clipping to apply to
the data (based on the provided uncertainty).
Returns:
mask (ndarray): Boolean mask of which pixels to include.
"""
if rms is None or threshold <= 0.0:
return mask.astype('bool')
rms = np.atleast_1d(rms)
if rms.ndim == 2:
sigma_mask = abs(data) >= (threshold * rms)[None, :, :]
else:
sigma_mask = abs(data) >= threshold * rms
return np.logical_and(mask, sigma_mask).astype('bool')
def get_intensity_weights(data, mask):
"""
Returns the weights used for intensity weighted averages. Includes a small
level of noise so that the weights do not add up to zero along the spectral
axis.
Args:
data (ndarray): The data cube of intensities or flux densities.
mask (ndarray): The boolean mask of pixels to include.
Returns:
weights (ndarray): Array of same shape as data with the non-normalized
intensity weights with masked regions containing values ~1e-10.
"""
noise = 1e-10 * np.random.rand(data.size).reshape(data.shape)
return np.where(mask, abs(data), noise)
def integrated_intensity(data, dx=1.0, rms=None, threshold=0.0, mask_path=None,
axis=0):
"""
Returns the integrated intensity (commonly known as the zeroth moment).
Args:
data (ndarray): The data cube as an array with at least one dimension.
dx (Optional[float]): The pixel scale of the ``axis'' dimension.
rms (Optional[float]): The uncertainty on the intensities given by
``data``. All uncertainties are assumed to be the same. If not
provided, the uncertainty on the centroid will not be estimated.
threshold (Optional[float]): All pixel values below this value will not
be included in the calculation of the intensity weighted average.
mask_path (Optional[ndarray]): A path to a boolean mask to apply to the
data. Must be in either ``.fits`` or ``.npy`` format and must be in
the same shape as ``data``.
axis (Optional[int]): The axis along which the centroid should be
estimated. By default this will be the zeroth axis.
Returns:
m0 (ndarray): The integrated intensity along the ``axis'' dimension in
each pixel. The units will be [data] * [dx], so typically
Jy/beam m/s (or equivalently mJy/beam km/s).
dm0 (ndarray): The uncertainty on ``m0'' if an rms is given, otherwise
None.
"""
mask = _read_mask_path(mask_path=mask_path, data=data)
data = np.moveaxis(data, axis, 0)
mask = np.moveaxis(mask, axis, 0)
mask = _threshold_mask(data=data, mask=mask, rms=rms, threshold=threshold)
npix = np.sum(mask, axis=0)
m0 = np.trapz(data * mask, dx=dx, axis=0)
if rms is None:
return np.where(npix > 1, m0, np.nan)
dm0 = dx * rms * npix**0.5 * np.ones(m0.shape)
return np.where(npix > 1, m0, np.nan), np.where(npix > 1, dm0, np.nan)
def intensity_weighted_velocity(data, x0=0.0, dx=1.0, rms=None, threshold=None,
mask_path=None, axis=0):
"""
Returns the intensity weighted average velocity (commonly known as the
first moment)
Args:
data (ndarray): The data cube as an array with at least one dimension.
x0 (Optional[float]): The wavelength/frequency/velocity/etc. value for
the zeroth pixel in the ``axis'' dimension.
dx (Optional[float]): The pixel scale of the ``axis'' dimension.
rms (Optional[float]): The uncertainty on the intensities given by
``data``, assumed to be constant along the spectral axis. Can be
either a 2D map or a single value.
threshold (Optional[float]): All pixel values below this value will not
be included in the calculation of the intensity weighted average.
mask_path (Optional[ndarray]): A path to a boolean mask to apply to the
data. Must be in either ``.fits`` or ``.npy`` format and must be in
the same shape as ``data``.
axis (Optional[int]): The axis along which the centroid should be
estimated. By default this will be the zeroth axis.
Returns:
x_max (ndarray): The centroid of the brightest line along the ``axis''
dimension in each pixel.
x_max_sig (ndarray): The uncertainty on ``x_max'' if an uncertainty is
given, otherwise None.
"""
mask = _read_mask_path(mask_path=mask_path, data=data)
data = np.moveaxis(data, axis, 0)
mask = np.moveaxis(mask, axis, 0)
mask = _threshold_mask(data=data, mask=mask, rms=rms, threshold=threshold)
npix = np.sum(mask, axis=0)
weights = get_intensity_weights(data, mask)
vpix = dx * np.arange(data.shape[0]) + x0
vpix = vpix[:, None, None] * np.ones(data.shape)
# Intensity weighted velocity.
m1 = np.average(vpix, weights=weights, axis=0)
if rms is None:
return np.where(npix > 1, m1, np.nan), None
# Calculate uncertainty if rms provided.
dm1 = (vpix - m1[None, :, :]) * rms / np.sum(weights, axis=0)
dm1 = np.sqrt(np.sum(dm1**2, axis=0))
return np.where(npix > 1, m1, np.nan), np.where(npix > 1, dm1, np.nan)
def intensity_weighted_dispersion(data, x0=0.0, dx=1.0, rms=None,
threshold=None, mask_path=None, axis=0):
"""
Returns the intensity weighted velocity dispersion (second moment).
"""
# Calculate the intensity weighted velocity first.
m1 = intensity_weighted_velocity(data=data, x0=x0, dx=dx, rms=rms,
threshold=threshold, mask_path=mask_path,
axis=axis)[0]
# Rearrange the data to what we need.
mask = _read_mask_path(mask_path=mask_path, data=data)
data = np.moveaxis(data, axis, 0)
mask = np.moveaxis(mask, axis, 0)
mask = _threshold_mask(data=data, mask=mask, rms=rms, threshold=threshold)
npix = np.sum(mask, axis=0)
weights = get_intensity_weights(data, mask)
npix_mask = np.where(npix > 1, 1, np.nan)
vpix = dx * np.arange(data.shape[0]) + x0
vpix = vpix[:, None, None] * np.ones(data.shape)
# Intensity weighted dispersion.
m1 = m1[None, :, :] * np.ones(data.shape)
m2 = np.sum(weights * (vpix - m1)**2, axis=0) / np.sum(weights, axis=0)
m2 = np.sqrt(m2)
if rms is None:
return m2 * npix_mask, None
# Calculate the uncertainties.
dm2 = ((vpix - m1)**2 - m2**2) * rms / np.sum(weights, axis=0)
dm2 = np.sqrt(np.sum(dm2**2, axis=0)) / 2. / m2
return m2 * npix_mask, dm2 * npix_mask
def peak_pixel(data, x0=0.0, dx=1.0, axis=0):
"""
Returns the velocity of the peak channel for each pixel, and the pixel
value.
Args:
data (ndarray): The data cube as an array with at least one dimension.
x0 (Optional[float]): The wavelength/frequency/velocity/etc. value for
the zeroth pixel in the ``axis'' dimension.
dx (Optional[float]): The pixel scale of the ``axis'' dimension.
axis (Optional[int]): The axis along which the centroid should be
estimated. By default this will be the zeroth axis.
Returns:
x_max (ndarray): The centroid of the brightest line along the ``axis''
dimension in each pixel.
x_max_sig (ndarray): The uncertainty on ``x_max''.
y_max (ndarray): The predicted value of the intensity at maximum.
"""
x_max = np.argmax(data, axis=axis)
y_max = np.max(data, axis=axis)
return x0 + dx * x_max, 0.5 * dx, y_max
def quadratic(data, uncertainty=None, axis=0, x0=0.0, dx=1.0, linewidth=None):
"""
Compute the quadratic estimate of the centroid of a line in a data cube.
The use case that we expect is a data cube with spatiotemporal coordinates
in all but one dimension. The other dimension (given by the ``axis``
parameter) will generally be wavelength, frequency, or velocity. This
function estimates the centroid of the *brightest* line along the ``axis''
dimension, in each spatiotemporal pixel.
Following Vakili & Hogg we allow for the option for the data to be smoothed
prior to the parabolic fitting. The recommended kernel is a Gaussian of
comparable width to the line. However, for low noise data, this is not
always necessary.
Args:
data (ndarray): The data cube as an array with at least one dimension.
uncertainty (Optional[ndarray or float]): The uncertainty on the
intensities given by ``data``. If this is a scalar, all
uncertainties are assumed to be the same. If this is an array, it
must have the same shape as ``data'' and give the uncertainty on
each intensity. If not provided, the uncertainty on the centroid
will not be estimated.
axis (Optional[int]): The axis along which the centroid should be
estimated. By default this will be the zeroth axis.
x0 (Optional[float]): The wavelength/frequency/velocity/etc. value for
the zeroth pixel in the ``axis'' dimension.
dx (Optional[float]): The pixel scale of the ``axis'' dimension.
linewidth (Optional [float]): Estimated standard deviation of the line
in units of pixels.
Returns:
x_max (ndarray): The centroid of the brightest line along the ``axis''
dimension in each pixel.
x_max_sig (ndarray or None): The uncertainty on ``x_max''. If
``uncertainty'' was not provided, this will be ``None''.
y_max (ndarray): The predicted value of the intensity at maximum.
y_max_sig (ndarray or None): The uncertainty on ``y_max''. If
``uncertainty'' was not provided, this will be ``None''.
"""
# Cast the data to a numpy array
data = np.moveaxis(np.atleast_1d(data), axis, 0)
shape = data.shape[1:]
data = np.reshape(data, (len(data), -1))
# Find the maximum velocity pixel in each spatial pixel
idx = np.argmax(data, axis=0)
# Smooth the data if asked
truncate = 4.0
if linewidth is not None:
if gaussian_filter1d is None:
raise ImportError("scipy is required for smoothing")
data = gaussian_filter1d(data, linewidth, axis=0, truncate=truncate)
# Deal with edge effects by keeping track of which pixels are right on the
# edge of the range
idx_bottom = idx == 0
idx_top = idx == len(data) - 1
idx = np.clip(idx, 1, len(data)-2)
# Extract the maximum and neighboring pixels
f_minus = data[(idx-1, range(data.shape[1]))]
f_max = data[(idx, range(data.shape[1]))]
f_plus = data[(idx+1, range(data.shape[1]))]
# Work out the polynomial coefficients
a0 = 13. * f_max / 12. - (f_plus + f_minus) / 24.
a1 = 0.5 * (f_plus - f_minus)
a2 = 0.5 * (f_plus + f_minus - 2*f_max)
# Compute the maximum of the quadratic
x_max = idx - 0.5 * a1 / a2
y_max = a0 - 0.25 * a1**2 / a2
# Set sensible defaults for the edge cases
if len(data.shape) > 1:
x_max[idx_bottom] = 0
x_max[idx_top] = len(data) - 1
y_max[idx_bottom] = f_minus[idx_bottom]
y_max[idx_top] = f_plus[idx_top]
else:
if idx_bottom:
x_max = 0
y_max = f_minus
elif idx_top:
x_max = len(data) - 1
y_max = f_plus
# If no uncertainty was provided, end now
if uncertainty is None:
return (
np.reshape(x0 + dx * x_max, shape), None,
np.reshape(y_max, shape), None,
np.reshape(2. * a2, shape), None)
# Compute the uncertainty
try:
uncertainty = float(uncertainty) + np.zeros_like(data)
except TypeError:
# An array of errors was provided
uncertainty = np.moveaxis(np.atleast_1d(uncertainty), axis, 0)
if uncertainty.shape[0] != data.shape[0] or \
shape != uncertainty.shape[1:]:
raise ValueError("the data and uncertainty must have the same "
"shape")
uncertainty = np.reshape(uncertainty, (len(uncertainty), -1))
# Update the uncertainties for the smoothed data:
# sigma_smooth = sqrt(norm * k**2 x sigma_n**2)
if linewidth is not None:
# The updated uncertainties need to be updated by convolving with the
# square of the kernel with which the data were smoothed. Then, this
# needs to be properly normalized. See the scipy source for the
# details of this normalization:
# https://github.com/scipy/scipy/blob/master/scipy/ndimage/filters.py
sigma = linewidth / np.sqrt(2)
lw = int(truncate * linewidth + 0.5)
norm = np.sum(_gaussian_kernel1d(linewidth, 0, lw)**2)
norm /= np.sum(_gaussian_kernel1d(sigma, 0, lw))
uncertainty = np.sqrt(norm * gaussian_filter1d(
uncertainty**2, sigma, axis=0))
df_minus = uncertainty[(idx-1, range(uncertainty.shape[1]))]**2
df_max = uncertainty[(idx, range(uncertainty.shape[1]))]**2
df_plus = uncertainty[(idx+1, range(uncertainty.shape[1]))]**2
x_max_var = 0.0625*(a1**2*(df_minus + df_plus) +
a1*a2*(df_minus - df_plus) +
a2**2*(4.0*df_max + df_minus + df_plus))/a2**4
y_max_var = 0.015625*(a1**4*(df_minus + df_plus) +
2.0*a1**3*a2*(df_minus - df_plus) +
4.0*a1**2*a2**2*(df_minus + df_plus) +
64.0*a2**4*df_max)/a2**4
return (
np.reshape(x0 + dx * x_max, shape),
np.reshape(dx * np.sqrt(x_max_var), shape),
np.reshape(y_max, shape),
np.reshape(np.sqrt(y_max_var), shape))
| [
"numpy.moveaxis",
"scipy.ndimage.filters.gaussian_filter1d",
"numpy.sum",
"numpy.load",
"numpy.argmax",
"scipy.ndimage.filters._gaussian_kernel1d",
"numpy.ones",
"numpy.arange",
"numpy.zeros_like",
"astropy.io.fits.getdata",
"numpy.isfinite",
"numpy.max",
"numpy.reshape",
"numpy.trapz",
... | [((1882, 1900), 'numpy.atleast_1d', 'np.atleast_1d', (['rms'], {}), '(rms)\n', (1895, 1900), True, 'import numpy as np\n'), ((4238, 4264), 'numpy.moveaxis', 'np.moveaxis', (['data', 'axis', '(0)'], {}), '(data, axis, 0)\n', (4249, 4264), True, 'import numpy as np\n'), ((4276, 4302), 'numpy.moveaxis', 'np.moveaxis', (['mask', 'axis', '(0)'], {}), '(mask, axis, 0)\n', (4287, 4302), True, 'import numpy as np\n'), ((4393, 4413), 'numpy.sum', 'np.sum', (['mask'], {'axis': '(0)'}), '(mask, axis=0)\n', (4399, 4413), True, 'import numpy as np\n'), ((4423, 4459), 'numpy.trapz', 'np.trapz', (['(data * mask)'], {'dx': 'dx', 'axis': '(0)'}), '(data * mask, dx=dx, axis=0)\n', (4431, 4459), True, 'import numpy as np\n'), ((6207, 6233), 'numpy.moveaxis', 'np.moveaxis', (['data', 'axis', '(0)'], {}), '(data, axis, 0)\n', (6218, 6233), True, 'import numpy as np\n'), ((6245, 6271), 'numpy.moveaxis', 'np.moveaxis', (['mask', 'axis', '(0)'], {}), '(mask, axis, 0)\n', (6256, 6271), True, 'import numpy as np\n'), ((6362, 6382), 'numpy.sum', 'np.sum', (['mask'], {'axis': '(0)'}), '(mask, axis=0)\n', (6368, 6382), True, 'import numpy as np\n'), ((6575, 6616), 'numpy.average', 'np.average', (['vpix'], {'weights': 'weights', 'axis': '(0)'}), '(vpix, weights=weights, axis=0)\n', (6585, 6616), True, 'import numpy as np\n'), ((7519, 7545), 'numpy.moveaxis', 'np.moveaxis', (['data', 'axis', '(0)'], {}), '(data, axis, 0)\n', (7530, 7545), True, 'import numpy as np\n'), ((7557, 7583), 'numpy.moveaxis', 'np.moveaxis', (['mask', 'axis', '(0)'], {}), '(mask, axis, 0)\n', (7568, 7583), True, 'import numpy as np\n'), ((7674, 7694), 'numpy.sum', 'np.sum', (['mask'], {'axis': '(0)'}), '(mask, axis=0)\n', (7680, 7694), True, 'import numpy as np\n'), ((7759, 7788), 'numpy.where', 'np.where', (['(npix > 1)', '(1)', 'np.nan'], {}), '(npix > 1, 1, np.nan)\n', (7767, 7788), True, 'import numpy as np\n'), ((8057, 8068), 'numpy.sqrt', 'np.sqrt', (['m2'], {}), '(m2)\n', (8064, 8068), True, 'import numpy as np\n'), ((9184, 9210), 'numpy.argmax', 'np.argmax', (['data'], {'axis': 'axis'}), '(data, axis=axis)\n', (9193, 9210), True, 'import numpy as np\n'), ((9223, 9246), 'numpy.max', 'np.max', (['data'], {'axis': 'axis'}), '(data, axis=axis)\n', (9229, 9246), True, 'import numpy as np\n'), ((11741, 11764), 'numpy.argmax', 'np.argmax', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (11750, 11764), True, 'import numpy as np\n'), ((1053, 1072), 'numpy.ones', 'np.ones', (['data.shape'], {}), '(data.shape)\n', (1060, 1072), True, 'import numpy as np\n'), ((4495, 4525), 'numpy.where', 'np.where', (['(npix > 1)', 'm0', 'np.nan'], {}), '(npix > 1, m0, np.nan)\n', (4503, 4525), True, 'import numpy as np\n'), ((4559, 4576), 'numpy.ones', 'np.ones', (['m0.shape'], {}), '(m0.shape)\n', (4566, 4576), True, 'import numpy as np\n'), ((4588, 4618), 'numpy.where', 'np.where', (['(npix > 1)', 'm0', 'np.nan'], {}), '(npix > 1, m0, np.nan)\n', (4596, 4618), True, 'import numpy as np\n'), ((4620, 4651), 'numpy.where', 'np.where', (['(npix > 1)', 'dm0', 'np.nan'], {}), '(npix > 1, dm0, np.nan)\n', (4628, 4651), True, 'import numpy as np\n'), ((6510, 6529), 'numpy.ones', 'np.ones', (['data.shape'], {}), '(data.shape)\n', (6517, 6529), True, 'import numpy as np\n'), ((6777, 6800), 'numpy.sum', 'np.sum', (['weights'], {'axis': '(0)'}), '(weights, axis=0)\n', (6783, 6800), True, 'import numpy as np\n'), ((6819, 6843), 'numpy.sum', 'np.sum', (['(dm1 ** 2)'], {'axis': '(0)'}), '(dm1 ** 2, axis=0)\n', (6825, 6843), True, 'import numpy as np\n'), ((6854, 6884), 'numpy.where', 'np.where', (['(npix > 1)', 'm1', 'np.nan'], {}), '(npix > 1, m1, np.nan)\n', (6862, 6884), True, 'import numpy as np\n'), ((6886, 6917), 'numpy.where', 'np.where', (['(npix > 1)', 'dm1', 'np.nan'], {}), '(npix > 1, dm1, np.nan)\n', (6894, 6917), True, 'import numpy as np\n'), ((7868, 7887), 'numpy.ones', 'np.ones', (['data.shape'], {}), '(data.shape)\n', (7875, 7887), True, 'import numpy as np\n'), ((7952, 7971), 'numpy.ones', 'np.ones', (['data.shape'], {}), '(data.shape)\n', (7959, 7971), True, 'import numpy as np\n'), ((7981, 8023), 'numpy.sum', 'np.sum', (['(weights * (vpix - m1) ** 2)'], {'axis': '(0)'}), '(weights * (vpix - m1) ** 2, axis=0)\n', (7987, 8023), True, 'import numpy as np\n'), ((8024, 8047), 'numpy.sum', 'np.sum', (['weights'], {'axis': '(0)'}), '(weights, axis=0)\n', (8030, 8047), True, 'import numpy as np\n'), ((8204, 8227), 'numpy.sum', 'np.sum', (['weights'], {'axis': '(0)'}), '(weights, axis=0)\n', (8210, 8227), True, 'import numpy as np\n'), ((11568, 11587), 'numpy.atleast_1d', 'np.atleast_1d', (['data'], {}), '(data)\n', (11581, 11587), True, 'import numpy as np\n'), ((11964, 12025), 'scipy.ndimage.filters.gaussian_filter1d', 'gaussian_filter1d', (['data', 'linewidth'], {'axis': '(0)', 'truncate': 'truncate'}), '(data, linewidth, axis=0, truncate=truncate)\n', (11981, 12025), False, 'from scipy.ndimage.filters import gaussian_filter1d, _gaussian_kernel1d\n'), ((15296, 15330), 'numpy.reshape', 'np.reshape', (['(x0 + dx * x_max)', 'shape'], {}), '(x0 + dx * x_max, shape)\n', (15306, 15330), True, 'import numpy as np\n'), ((15392, 15416), 'numpy.reshape', 'np.reshape', (['y_max', 'shape'], {}), '(y_max, shape)\n', (15402, 15416), True, 'import numpy as np\n'), ((998, 1015), 'numpy.isfinite', 'np.isfinite', (['mask'], {}), '(mask)\n', (1009, 1015), True, 'import numpy as np\n'), ((2058, 2090), 'numpy.logical_and', 'np.logical_and', (['mask', 'sigma_mask'], {}), '(mask, sigma_mask)\n', (2072, 2090), True, 'import numpy as np\n'), ((6447, 6471), 'numpy.arange', 'np.arange', (['data.shape[0]'], {}), '(data.shape[0])\n', (6456, 6471), True, 'import numpy as np\n'), ((6652, 6682), 'numpy.where', 'np.where', (['(npix > 1)', 'm1', 'np.nan'], {}), '(npix > 1, m1, np.nan)\n', (6660, 6682), True, 'import numpy as np\n'), ((7805, 7829), 'numpy.arange', 'np.arange', (['data.shape[0]'], {}), '(data.shape[0])\n', (7814, 7829), True, 'import numpy as np\n'), ((13216, 13250), 'numpy.reshape', 'np.reshape', (['(x0 + dx * x_max)', 'shape'], {}), '(x0 + dx * x_max, shape)\n', (13226, 13250), True, 'import numpy as np\n'), ((13270, 13294), 'numpy.reshape', 'np.reshape', (['y_max', 'shape'], {}), '(y_max, shape)\n', (13280, 13294), True, 'import numpy as np\n'), ((13314, 13341), 'numpy.reshape', 'np.reshape', (['(2.0 * a2)', 'shape'], {}), '(2.0 * a2, shape)\n', (13324, 13341), True, 'import numpy as np\n'), ((13431, 13450), 'numpy.zeros_like', 'np.zeros_like', (['data'], {}), '(data)\n', (13444, 13450), True, 'import numpy as np\n'), ((14386, 14396), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (14393, 14396), True, 'import numpy as np\n'), ((14528, 14560), 'scipy.ndimage.filters._gaussian_kernel1d', '_gaussian_kernel1d', (['sigma', '(0)', 'lw'], {}), '(sigma, 0, lw)\n', (14546, 14560), False, 'from scipy.ndimage.filters import gaussian_filter1d, _gaussian_kernel1d\n'), ((15437, 15455), 'numpy.sqrt', 'np.sqrt', (['y_max_var'], {}), '(y_max_var)\n', (15444, 15455), True, 'import numpy as np\n'), ((691, 714), 'astropy.io.fits.getdata', 'fits.getdata', (['mask_path'], {}), '(mask_path)\n', (703, 714), False, 'from astropy.io import fits\n'), ((768, 786), 'numpy.load', 'np.load', (['mask_path'], {}), '(mask_path)\n', (775, 786), True, 'import numpy as np\n'), ((2667, 2692), 'numpy.random.rand', 'np.random.rand', (['data.size'], {}), '(data.size)\n', (2681, 2692), True, 'import numpy as np\n'), ((8246, 8270), 'numpy.sum', 'np.sum', (['(dm2 ** 2)'], {'axis': '(0)'}), '(dm2 ** 2, axis=0)\n', (8252, 8270), True, 'import numpy as np\n'), ((13551, 13577), 'numpy.atleast_1d', 'np.atleast_1d', (['uncertainty'], {}), '(uncertainty)\n', (13564, 13577), True, 'import numpy as np\n'), ((14464, 14500), 'scipy.ndimage.filters._gaussian_kernel1d', '_gaussian_kernel1d', (['linewidth', '(0)', 'lw'], {}), '(linewidth, 0, lw)\n', (14482, 14500), False, 'from scipy.ndimage.filters import gaussian_filter1d, _gaussian_kernel1d\n'), ((14599, 14649), 'scipy.ndimage.filters.gaussian_filter1d', 'gaussian_filter1d', (['(uncertainty ** 2)', 'sigma'], {'axis': '(0)'}), '(uncertainty ** 2, sigma, axis=0)\n', (14616, 14649), False, 'from scipy.ndimage.filters import gaussian_filter1d, _gaussian_kernel1d\n'), ((15356, 15374), 'numpy.sqrt', 'np.sqrt', (['x_max_var'], {}), '(x_max_var)\n', (15363, 15374), True, 'import numpy as np\n')] |
"""
Created on 2019-12-20
@author: <NAME> - github.com/rpadrino - IMDEA Networks
"""
#imports
from __future__ import division
import math
#from mod_read_crop_files import *
#from mod_tracker import *
#from mod_bin_classifier import *
from mod_multiclass_classifier import *
import os
from os import listdir
from os.path import isfile, join
import shutil #for moving files
import numpy as np
import pandas as pd
import argparse
import sys
###### USE ########
# import:
# from mod_central_classification import *
#
# Main function (examples):
# centralClassification()
# centralClassification('guess')
# centralClassification('guess', './classification/', True)
# centralClassification(input_path='./classification/')
#
# optional parameters
# cameras_element: to provide the camera element to be processed: top, bottom. Default: guess
# input_path: to provide where the local results (cameras) are. Default: ./classification/
# debug: use for debugging [False by default].
# config. and vars.
input_classification_folder = "./classification/" # the folder will contain the local results and other files from the species classification per each camera in subfolders
###output_classification_folder = "./classification/output/" # the folder will contain the final results and other files for the species classification
output_classification_folder = "output/" # the folder will contain the final results and other files for the species classification
input_classification_csv_file = "camera_classification.csv"
## CSV content: ......
input_classification_path_to_crops_file = "path_crops_folder.txt"
output_classification_best_file = "best_classification.txt"
output_detection_best_file = "best_detection.txt"
#output_classification_boundary_file = "boundary_targets.txt"
#recalculate values - boundary areas
boundary_area_left = int( ( 2048 * 10.5 ) / 70.5 ) #305pixels == 10.5degrees of overlapping
boundary_area_right = 2048 - boundary_area_left
working_in_folder_path = None
working_out_folder_path = None
###### functions ########
def getTimestampFromFilename(strr):
strr_sub = strr.split('frame')[0]
if strr_sub.startswith('IMG_'): #just in case
strr_sub = strr_sub[4:]
if strr_sub.endswith('-'): #just in case
strr_sub = strr_sub[:-1]
if strr_sub.endswith('_'): #just in case
strr_sub = strr_sub[:-1]
return strr_sub
def checkCamerasElementParameter(element): #change name
cam_elements = {
"top": True,
"bottom": True,
"guess": True,
"all": False
}
return cam_elements.get(element, False)
def getIPsubnetByCamerasElement(element):
cam_elements = {
"top": 20,
"bottom": 40
}
return cam_elements.get(element, -1)
def getSpeciesNameByNumber(element):
cam_elements = {
0: "albacore",
1: 'amberjack',
2: 'atl_mackerel',
3: 'dorado',
4: 'med_mackerel',
5: 'swordfish',
6: 'others'
}
return cam_elements.get(element, -1)
#check
def getCameraFolderName(element, ncam):
subnet_name = getIPsubnetByCamerasElement(element)
camera_folder = ''
if subnet_name is not -1:
#if subnet_name and (ncam > 0 and ncam < 7):
if subnet_name and (ncam > 0 and ncam < 256):
#./cameraXX.Y/
camera_folder = "camera%d.%d/" % ( subnet_name, ncam )
return camera_folder
def getCameraNamesFolderList(cameras_element):
folder_list = []
if checkCamerasElementParameter(cameras_element):
for ii in range(1,7): #for the number of cameras in each level (cameras_element)
camera_folder = getCameraFolderName( cameras_element, ii )
if camera_folder is not '':
folder_list.append( camera_folder )
return folder_list
def checkCamerasElement(cameras_element): #change name
cameras_element_present = False
for camera_folder in getCameraNamesFolderList(cameras_element):
camera_folder_wpath = join( working_in_folder_path, camera_folder)
if os.path.isdir( camera_folder_wpath ):
if isfile( join( camera_folder_wpath, input_classification_csv_file) ):
cameras_element_present = True
break
return cameras_element_present
def getPathToCropsPerCameraFromFile(camera_name):
path_crops_folder_file = join(working_in_folder_path, camera_name, input_classification_path_to_crops_file)
path_crops_folder = open( path_crops_folder_file ).read().replace('\n','').replace('\r','')
return path_crops_folder
def getFilenameWithPath(strr, prefix): #call: df['acb'].apply( getFilenameWithPath, prefix='/path/to' )
return join(prefix, strr)
def getNumberOfDifferentSpecies(df): #count different fishes
#species_list_ids = df['artifact-multi-decision'].unique()
df_speciesid_count = pd.DataFrame( columns=('species', 'count') )
if len(df):
#for species_index in( species_list_ids ):
for ii, species_index in enumerate( df['artifact-multi-decision'].unique() ): #sorted() <-- without enumeration
df_specie = df[ df['artifact-multi-decision'] == species_index ]
number_fishes_per_specie = df_specie['artifact-id'].nunique()
df_speciesid_count.loc[ 0 if pd.isnull( df_speciesid_count.index.max() ) else df_speciesid_count.index.max() + 1 ] = [int(species_index)] + [int(number_fishes_per_specie)]
return df_speciesid_count #df: 'species', 'count'. ex.: 2->24, 3->11,...
def saveResults(df, folder_path, output_file):
return df.to_csv( join( folder_path, output_file) , index=False, header=False) #,encoding='utf-8'
def saveNumberOfFishesPerSpecies(df_list, folder_path):
for index, row in df_list.iterrows(): ## the column 'species' in not accesible, but it is as indexes
##species_index = row['species']
species_index = index
species_name = getSpeciesNameByNumber( species_index )
number_fishes = row['count']
output_file = None
status = None
try:
#output_file = open( '%s/%s.txt' % (working_out_folder_path, species_name), 'w') ##join()
output_file = open( '%s/%s.txt' % (folder_path, species_name), 'w') ##join()
output_file.write('%d\r\n' % (int(number_fishes) ) )
output_file.close()
if status is None and not status:
status = True
except IOError:
status = False
finally:
if output_file is not None:
output_file.close() #just in case
return status
def getArtifactInBoundaryBasic(df):
artifacts_in_boundary = df[ ( df['x1'] <= boundary_area_left ) | ( df['x2'] >= boundary_area_right ) ]
return artifact_in_boundary.reset_index(level=0, drop=True).copy()
def getArtifactInBoundary(df):
list_columns = np.array(['filename','x1','y1','x2','y2','artifact-multi-decision','ncam'])
##list_columns_final_order = np.array(['ncam','timestamp','x1','y1','x2','y2','species_name','artifact-multi-decision'])
list_columns_final_order = np.array(['ncam','filename','x1','y1','x2','y2','species_name','artifact-multi-decision'])
artifact_in_boundary = df[ ( df['x1'] <= boundary_area_left ) | ( df['x2'] >= boundary_area_right ) ]
artifact_in_boundary = artifact_in_boundary[list_columns].reset_index(level=0, drop=True).copy()
# add column with species names
artifact_in_boundary['species_name'] = artifact_in_boundary['artifact-multi-decision'].apply( getSpeciesNameByNumber )
# add column with timestamp from filenames
#artifact_in_boundary['timestamp'] = artifact_in_boundary['filename'].apply( getTimestampFromFilename )
#delete column "filename" - not needed
#artifact_in_boundary.drop("filename", axis=1, inplace=True)
return artifact_in_boundary[ list_columns_final_order ]
def processCamerasElement(cameras_element):
if checkCamerasElementParameter(cameras_element):
df = pd.DataFrame() # columns=('filename', 'frame', 'x1', 'y1', 'x2', 'y2', 'detection-acc',)
#filename, frame, x1, y1, x2, y2, detection-acc, bin-prob-neg, bin-prob-pos, multi-prob-1, multi-prob-2, multi-prob-3, multi-prob-4, multi-prob-5, multi-prob-6, artifact-bin-prob-pos, artifact-bin-prob-neg, artifact-bin-decision, artifact-multi-prob-1, artifact-multi-prob-2, artifact-multi-prob-3, artifact-multi-prob-4, artifact-multi-prob-5, artifact-multi-prob-6, artifact-multi-decision
df_wFishesPerSpecie = pd.DataFrame()
folder_counter = 0
for camera_folder in getCameraNamesFolderList(cameras_element):
camera_folder_wpath = join( working_in_folder_path, camera_folder)
ncam = camera_folder.split('.')[-1]
ncam = int( ncam[:-1] ) #remove last '/'
if os.path.isdir( camera_folder_wpath ):
# ncam = camera_folder.split('.')[-1]
# ncam = int( ncam[:-1] ) #remove last '/'
if isfile( join( camera_folder_wpath, input_classification_csv_file) ):
df_cam = pd.read_csv( join( camera_folder_wpath, input_classification_csv_file), header='infer' )
#df_cam['ncam'] = np.array([ ncam ] * len(df) )
df_cam['ncam'] = ncam
##df_cam.reset_index(level=0, drop=True, inplace=True) #not here, after concat
##
df_wFishesPerSpecieAndCam = getNumberOfDifferentSpecies( df_cam ) #df: 'species', 'count'. ex.: 2->24, 3->11,..
if len(df_wFishesPerSpecieAndCam):
df_wFishesPerSpecieAndCam['ncam'] = ncam
df_wFishesPerSpecie = pd.concat([df_wFishesPerSpecie, df_wFishesPerSpecieAndCam], axis = 0)
df_wFishesPerSpecie.reset_index(level=0, drop=True, inplace=True) #df: 'species', 'count'. ex.: 2->24, 3->11,..2->3, 3->....
if len(df):
df = pd.concat([df, df_cam], axis = 0)
df.reset_index(level=0, drop=True, inplace=True)
else:
df = df_cam.copy()
del df_cam
folder_counter += 1
else:
print('CSV from camera %d not Found [%s].' % (ncam, cameras_element) )
##ifp-end-csv-camera-exists
else:
print('CSV from camera %d not Found [%s].' % (ncam, cameras_element) )
## if-end-isdir
## for-end-cameras-read-csv
if len(df): #or ## len(df) ## folder_counter > 0
#NUMBER OF FISHES FROM ALL CAMERAS
# group per species
if len(df_wFishesPerSpecie):
df_wFishesPerSpecieNoCam = df_wFishesPerSpecie.copy()
df_wFishesPerSpecieNoCam.drop("ncam", axis=1, inplace=True)
number_fishes_per_specie = df_wFishesPerSpecieNoCam.groupby(['species']).sum()[['count']] ## , as_index=False
#problem: groupby('species') is removing 'count' column.
#number_fishes_per_specie = df_wFishesPerSpecie.groupby('species').sum()[['count']] ## , as_index=False
#df: 'species', 'count'. ex.: 2->24, 3->11,..2->3, 3->....
#save one file per species with the number.
saving_result_per_species = saveNumberOfFishesPerSpecies( number_fishes_per_specie, working_out_folder_path )
## df: 'species', 'count' (totals)
else:
print('Dataframe with number of fishes per species in empty. Nothing to save.')
print('')
# saveResults( pd.DataFrame() , working_out_folder_path, 'nodata_species.txt')
#BEST DETECTION
#function define in 'mod_multiclass_classifier.py'
#df_bestDetection = getBestArtifact(df) #filename, acc, species
df_bestDetection = getBestArtifactFromSegmentation(df) #filename, acc, species
df_bestDetection['species-name'] = df_bestDetection['species'].apply( getSpeciesNameByNumber )
list_columns_final_order = np.array(['filename','acc','species-name','species'])
#saving_result_best = saveResults( df_bestDetection[ list_columns_final_order ], working_out_folder_path, output_classification_best_file)
saving_result_best = saveResults( df_bestDetection[ list_columns_final_order ], working_out_folder_path, output_detection_best_file)
#copy best file
filename_best_result = df_bestDetection.head(1)['filename'].to_numpy()[0]
camera_best_result = df[ df['filename'] == filename_best_result ].head(1)['ncam']
camera_folder_best_result = getCameraFolderName( cameras_element, camera_best_result.to_numpy()[0] )
filename_best_result_wPath = join(working_in_folder_path ,camera_folder_best_result, filename_best_result)
if isfile( filename_best_result_wPath):
shutil.copy( filename_best_result_wPath , working_out_folder_path)
else:
print("It was not possible to find file for best results: %s" % (filename_best_result_wPath) )
print("")
#BOUNDARY AREAS
artifacts_in_boundary = getArtifactInBoundary( df ) #camera, timestamp, coords, species_name, species_index
#save boundary results
saving_boundary_result = "" ##VS None and +=
for ii, cam_index in enumerate( df['ncam'].unique() ):
camera_name = getCameraFolderName(cameras_element, cam_index)
#saving_boundary_result +=
artifacts_in_boundary_per_cam = artifacts_in_boundary[ artifacts_in_boundary['ncam'] == cam_index ].reset_index(level=0, drop=True).copy()
path_for_filename = getPathToCropsPerCameraFromFile(camera_name)
artifacts_in_boundary_per_cam['filename'] = artifacts_in_boundary_per_cam['filename'].apply( getFilenameWithPath, prefix=path_for_filename )
saveResults( artifacts_in_boundary_per_cam, working_out_folder_path , "%s_boundary.txt" % camera_name[:-1] )
#saveResults( artifacts_in_boundary[ artifacts_in_boundary['ncam'] == cam_index ], working_out_folder_path , "%s_boundary.txt" % camera_name[:-1] )
#FILE: cameraXX.Y_boundary.txt
#FORMAT: camera, timestamp, coords, species, species_index (each detection one line)
## for-end
statusCameraElement = True
else:
print('Input CSVs are empty. Nothing to process.')
print('')
saveResults( pd.DataFrame() , working_out_folder_path, 'nodata.txt')
statusCameraElement = False
## if-end-len-df
else: ## checkCamerasElementParameter-check-allowed-elements
print('Cameras element unknown.')
print("'--cameras' parameter was not properly provided.")
print('')
statusCameraElement = False
return statusCameraElement
def cleanFilesInFolder(folder_path):
for file_name in listdir( folder_path ):
file_name_wPath = join( folder_path, file_name )
if isfile( file_name_wPath ):
os.remove( file_name_wPath )
###### functions-end ########
###### main-function ########
def centralClassification(cameras_element='guess', input_path='./classification/', debug=False):
# call:
# centralClassification(), centralClassification('guess'), centralClassification('guess', './classification/', True), centralClassification(input_path='./classification/')
#optional parameters
#cameras_element: Parameter to provide the camera element to be processed: top, bottom. Default: guess
#input_path: Parameter to provide where the local results (cameras) are. Default: ./classification/
#debug: Use for debugging [False by default].
global output_classification_folder
global working_in_folder_path
global working_out_folder_path
if input_path != '':
input_classification_folder = input_path
if not input_classification_folder.endswith('/'): #just in case
input_classification_folder += '/'
output_classification_folder = input_classification_folder + output_classification_folder
# cameras subfolder name format:
# ./classification/cameraXX.Y
# ./classification/camera20.6
# ./classification/camera40.2
# paths
working_in_folder_path = os.path.abspath( input_classification_folder )
working_out_folder_path = os.path.abspath( output_classification_folder )
# main
print( "Starting classification..." )
t_start_new = datetime.datetime.now()
t_start_str_new = "[ %s ]" % t_start_new.strftime("%Y/%m/%d - %H:%M")
print( t_start_str_new )
print( "Input folder: %s\n" % (working_in_folder_path) )
#checking output folder
if not os.path.exists( working_out_folder_path ):
print("Creating output folder: " + str( working_out_folder_path ) )
print("")
os.makedirs( working_out_folder_path )
if checkCamerasElementParameter(cameras_element):
#clean previous results
cleanFilesInFolder( working_out_folder_path )
if cameras_element == 'top' or cameras_element == 'bottom':
if checkCamerasElement( cameras_element ):
print('Processing %s camera elements' % (cameras_element) )
if processCamerasElement( cameras_element ):
print('Successfull')
return 0 ## successful exit
else:
print('Someting went wrong')
return 1 ## exit with errors or warnings
else:
print("Input data not found [%s]." % (cameras_element) )
print("")
return 1 ## exit with errors or warnings
else:
if checkCamerasElement( 'top' ):
print('Processing %s camera elements' % ('top') )
if processCamerasElement( 'top' ):
print('Successfull')
return 0 ## successful exit
else:
print('Someting went wrong')
return 1 ## exit with errors or warnings
elif checkCamerasElement( 'bottom' ):
print('Processing %s camera elements' % ('bottom'))
if processCamerasElement( 'bottom' ):
print('Successfull')
return 0 ## successful exit
else:
print('Someting went wrong')
return 1 ## exit with errors or warnings
else:
print('There is no data to process.')
return 1 ## exit with errors or warnings
else:
#print("")
print('Cameras element unknown.')
print("'cameras_element' parameter was not properly provided.")
print('')
return 1 ## exit with errors or warnings
###### main-function-end ########
##sys.exit(0)
| [
"pandas.DataFrame",
"os.path.abspath",
"os.remove",
"os.makedirs",
"os.path.isdir",
"os.path.exists",
"os.path.isfile",
"numpy.array",
"pandas.concat",
"os.path.join",
"os.listdir",
"shutil.copy"
] | [((4414, 4500), 'os.path.join', 'join', (['working_in_folder_path', 'camera_name', 'input_classification_path_to_crops_file'], {}), '(working_in_folder_path, camera_name,\n input_classification_path_to_crops_file)\n', (4418, 4500), False, 'from os.path import isfile, join\n'), ((4743, 4761), 'os.path.join', 'join', (['prefix', 'strr'], {}), '(prefix, strr)\n', (4747, 4761), False, 'from os.path import isfile, join\n'), ((4916, 4958), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "('species', 'count')"}), "(columns=('species', 'count'))\n", (4928, 4958), True, 'import pandas as pd\n'), ((6933, 7018), 'numpy.array', 'np.array', (["['filename', 'x1', 'y1', 'x2', 'y2', 'artifact-multi-decision', 'ncam']"], {}), "(['filename', 'x1', 'y1', 'x2', 'y2', 'artifact-multi-decision',\n 'ncam'])\n", (6941, 7018), True, 'import numpy as np\n'), ((7165, 7266), 'numpy.array', 'np.array', (["['ncam', 'filename', 'x1', 'y1', 'x2', 'y2', 'species_name',\n 'artifact-multi-decision']"], {}), "(['ncam', 'filename', 'x1', 'y1', 'x2', 'y2', 'species_name',\n 'artifact-multi-decision'])\n", (7173, 7266), True, 'import numpy as np\n'), ((15252, 15272), 'os.listdir', 'listdir', (['folder_path'], {}), '(folder_path)\n', (15259, 15272), False, 'from os import listdir\n'), ((16636, 16680), 'os.path.abspath', 'os.path.abspath', (['input_classification_folder'], {}), '(input_classification_folder)\n', (16651, 16680), False, 'import os\n'), ((16713, 16758), 'os.path.abspath', 'os.path.abspath', (['output_classification_folder'], {}), '(output_classification_folder)\n', (16728, 16758), False, 'import os\n'), ((4048, 4091), 'os.path.join', 'join', (['working_in_folder_path', 'camera_folder'], {}), '(working_in_folder_path, camera_folder)\n', (4052, 4091), False, 'from os.path import isfile, join\n'), ((4105, 4139), 'os.path.isdir', 'os.path.isdir', (['camera_folder_wpath'], {}), '(camera_folder_wpath)\n', (4118, 4139), False, 'import os\n'), ((5635, 5665), 'os.path.join', 'join', (['folder_path', 'output_file'], {}), '(folder_path, output_file)\n', (5639, 5665), False, 'from os.path import isfile, join\n'), ((8070, 8084), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (8082, 8084), True, 'import pandas as pd\n'), ((8589, 8603), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (8601, 8603), True, 'import pandas as pd\n'), ((15302, 15330), 'os.path.join', 'join', (['folder_path', 'file_name'], {}), '(folder_path, file_name)\n', (15306, 15330), False, 'from os.path import isfile, join\n'), ((15344, 15367), 'os.path.isfile', 'isfile', (['file_name_wPath'], {}), '(file_name_wPath)\n', (15350, 15367), False, 'from os.path import isfile, join\n'), ((17065, 17104), 'os.path.exists', 'os.path.exists', (['working_out_folder_path'], {}), '(working_out_folder_path)\n', (17079, 17104), False, 'import os\n'), ((17202, 17238), 'os.makedirs', 'os.makedirs', (['working_out_folder_path'], {}), '(working_out_folder_path)\n', (17213, 17238), False, 'import os\n'), ((8740, 8783), 'os.path.join', 'join', (['working_in_folder_path', 'camera_folder'], {}), '(working_in_folder_path, camera_folder)\n', (8744, 8783), False, 'from os.path import isfile, join\n'), ((8903, 8937), 'os.path.isdir', 'os.path.isdir', (['camera_folder_wpath'], {}), '(camera_folder_wpath)\n', (8916, 8937), False, 'import os\n'), ((12266, 12322), 'numpy.array', 'np.array', (["['filename', 'acc', 'species-name', 'species']"], {}), "(['filename', 'acc', 'species-name', 'species'])\n", (12274, 12322), True, 'import numpy as np\n'), ((12983, 13060), 'os.path.join', 'join', (['working_in_folder_path', 'camera_folder_best_result', 'filename_best_result'], {}), '(working_in_folder_path, camera_folder_best_result, filename_best_result)\n', (12987, 13060), False, 'from os.path import isfile, join\n'), ((13077, 13111), 'os.path.isfile', 'isfile', (['filename_best_result_wPath'], {}), '(filename_best_result_wPath)\n', (13083, 13111), False, 'from os.path import isfile, join\n'), ((15383, 15409), 'os.remove', 'os.remove', (['file_name_wPath'], {}), '(file_name_wPath)\n', (15392, 15409), False, 'import os\n'), ((4166, 4222), 'os.path.join', 'join', (['camera_folder_wpath', 'input_classification_csv_file'], {}), '(camera_folder_wpath, input_classification_csv_file)\n', (4170, 4222), False, 'from os.path import isfile, join\n'), ((13130, 13194), 'shutil.copy', 'shutil.copy', (['filename_best_result_wPath', 'working_out_folder_path'], {}), '(filename_best_result_wPath, working_out_folder_path)\n', (13141, 13194), False, 'import shutil\n'), ((14809, 14823), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (14821, 14823), True, 'import pandas as pd\n'), ((9082, 9138), 'os.path.join', 'join', (['camera_folder_wpath', 'input_classification_csv_file'], {}), '(camera_folder_wpath, input_classification_csv_file)\n', (9086, 9138), False, 'from os.path import isfile, join\n'), ((9186, 9242), 'os.path.join', 'join', (['camera_folder_wpath', 'input_classification_csv_file'], {}), '(camera_folder_wpath, input_classification_csv_file)\n', (9190, 9242), False, 'from os.path import isfile, join\n'), ((9795, 9862), 'pandas.concat', 'pd.concat', (['[df_wFishesPerSpecie, df_wFishesPerSpecieAndCam]'], {'axis': '(0)'}), '([df_wFishesPerSpecie, df_wFishesPerSpecieAndCam], axis=0)\n', (9804, 9862), True, 'import pandas as pd\n'), ((10079, 10110), 'pandas.concat', 'pd.concat', (['[df, df_cam]'], {'axis': '(0)'}), '([df, df_cam], axis=0)\n', (10088, 10110), True, 'import pandas as pd\n')] |
"""
Helper function to load part affinity fields
and confidence maps on systems with no GPU
where realtime processing is not possible
"""
import sys
sys.path.insert(0,'../../easy_multi_person_pose_estimation')
from poseestimation import model
from time import time
import numpy as np
from os.path import isfile, join
class Loader:
"""
Use this loader instead of the function below if you need to load
many frames to avoid memory leaks!
"""
def __init__(self, with_gpu=False):
self.pe = model.PoseEstimator()
self.with_gpu = with_gpu
def load_confidence_map_and_paf(self, name, Im, frame, dir='/tmp'):
"""
loads the confidence map and paf
:param name: to store the data
:param Im: np.array: n x h x w x 3
:param frame: {int}
:param with_gpu:
:param dir
:return:
"""
return load_confidence_map_and_paf(
name, Im, frame, with_gpu=self.with_gpu, dir=dir, pe=self.pe)
def load_confidence_map_and_paf(name, Im, frame, with_gpu=False, dir='/tmp', pe=None):
"""
loads the confidence map and paf
:param name: to store the data
:param Im: np.array: n x h x w x 3
:param frame: {int}
:param with_gpu:
:param dir
:return:
"""
if pe is None:
pe = model.PoseEstimator()
if with_gpu:
heatmaps, pafs = pe.predict_pafs_and_heatmaps(Im)
else:
hm_file = join(dir, name + 'heatmaps' + str(frame) + '.npy')
paf_file = join(dir, name + 'pafs' + str(frame) + '.npy')
if isfile(hm_file) and isfile(paf_file):
heatmaps = np.load(hm_file)
pafs = np.load(paf_file)
else:
heatmaps = []
pafs = []
for im in Im:
_start = time()
hm, paf = pe.predict_pafs_and_heatmaps(im)
heatmaps.append(np.squeeze(hm))
pafs.append(np.squeeze(paf))
_end = time()
print('elapsed:', _end - _start)
heatmaps = np.array(heatmaps)
pafs = np.array(pafs)
np.save(hm_file, heatmaps)
np.save(paf_file, pafs)
return heatmaps[:,:,:,0:-1], pafs | [
"numpy.load",
"numpy.save",
"sys.path.insert",
"time.time",
"os.path.isfile",
"numpy.array",
"numpy.squeeze",
"poseestimation.model.PoseEstimator"
] | [((161, 222), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../../easy_multi_person_pose_estimation"""'], {}), "(0, '../../easy_multi_person_pose_estimation')\n", (176, 222), False, 'import sys\n'), ((529, 550), 'poseestimation.model.PoseEstimator', 'model.PoseEstimator', ([], {}), '()\n', (548, 550), False, 'from poseestimation import model\n'), ((1340, 1361), 'poseestimation.model.PoseEstimator', 'model.PoseEstimator', ([], {}), '()\n', (1359, 1361), False, 'from poseestimation import model\n'), ((1594, 1609), 'os.path.isfile', 'isfile', (['hm_file'], {}), '(hm_file)\n', (1600, 1609), False, 'from os.path import isfile, join\n'), ((1614, 1630), 'os.path.isfile', 'isfile', (['paf_file'], {}), '(paf_file)\n', (1620, 1630), False, 'from os.path import isfile, join\n'), ((1655, 1671), 'numpy.load', 'np.load', (['hm_file'], {}), '(hm_file)\n', (1662, 1671), True, 'import numpy as np\n'), ((1691, 1708), 'numpy.load', 'np.load', (['paf_file'], {}), '(paf_file)\n', (1698, 1708), True, 'import numpy as np\n'), ((2083, 2101), 'numpy.array', 'np.array', (['heatmaps'], {}), '(heatmaps)\n', (2091, 2101), True, 'import numpy as np\n'), ((2121, 2135), 'numpy.array', 'np.array', (['pafs'], {}), '(pafs)\n', (2129, 2135), True, 'import numpy as np\n'), ((2148, 2174), 'numpy.save', 'np.save', (['hm_file', 'heatmaps'], {}), '(hm_file, heatmaps)\n', (2155, 2174), True, 'import numpy as np\n'), ((2187, 2210), 'numpy.save', 'np.save', (['paf_file', 'pafs'], {}), '(paf_file, pafs)\n', (2194, 2210), True, 'import numpy as np\n'), ((1822, 1828), 'time.time', 'time', ([], {}), '()\n', (1826, 1828), False, 'from time import time\n'), ((2004, 2010), 'time.time', 'time', ([], {}), '()\n', (2008, 2010), False, 'from time import time\n'), ((1920, 1934), 'numpy.squeeze', 'np.squeeze', (['hm'], {}), '(hm)\n', (1930, 1934), True, 'import numpy as np\n'), ((1964, 1979), 'numpy.squeeze', 'np.squeeze', (['paf'], {}), '(paf)\n', (1974, 1979), True, 'import numpy as np\n')] |
# Calculate KZIT (index of first interior grid cell)
# interior grid cell = cell with no adjacent sidewall boundaries.
#
# Minimum of the 8 adjacent T-cells and the current T cell, -1 level.
# Author: <NAME>
# E-mail: <EMAIL>
# Date: May/2019
import numpy as np
from netCDF4 import Dataset
from xarray import DataArray
from xarray import Dataset as Datasetx
##
continent_flag = -1
fname = "/lustre/atlas1/cli115/proj-shared/ia_top_tx0.1_v2_60yrs/ia_top_tx0.1_v2_yel_patc_1948_intel_def_year_2009/ocn/hist/ia_top_tx0.1_v2_yel_patc_1948_intel.pop.h.2009-01.nc"
nc = Dataset(fname)
lont = nc.variables['TLONG'][:]
latt = nc.variables['TLAT'][:]
z_t = nc.variables['z_t'][:]
h_t = nc.variables['HT'][:]
kmt = nc.variables['KMT'][:] # k index of deepest T-cell.
ny, nx = h_t.shape
nym = ny - 1
nxm = nx - 1
nz = z_t.size
kmt = kmt - 1 # Python indexing starts at 0, FORTRAN indexing starts at 1.
# Get index of deepest interior grid cell.
print("Calculating KZIT.")
kzit = np.zeros((ny,nx))
for j in range(ny):
print(j+1," / ",ny)
for i in range(nx):
if kmt[j,i]==-1: # Continent mask.
kzit[j,i] = -1
continue
else:
if np.logical_and(j==0, i==0):
kzit[j,i] = np.min([kmt[j,i-1], kmt[j+1,i-1], kmt[j+1,i], kmt[j+1,i+1], kmt[j,i+1], kmt[j,i]]) - 1
elif np.logical_and(j==nym, i==0):
kzit[j,i] = np.min([kmt[j-1,i-1], kmt[j,i-1], kmt[j,i+1], kmt[j-1,i+1], kmt[j-1,i], kmt[j,i]]) - 1
elif np.logical_and(j==nym, i==nxm):
kzit[j,i] = np.min([kmt[j-1,i-1], kmt[j,i-1], kmt[j,0], kmt[j-1,0], kmt[j-1,i], kmt[j,i]]) - 1
elif np.logical_and(j==0, i==nxm):
kzit[j,i] = np.min([kmt[j,i-1], kmt[j+1,i-1], kmt[j+1,i], kmt[j+1,0], kmt[j,0], kmt[j,i]]) - 1
elif j==0:
kzit[j,i] = np.min([kmt[j,i-1], kmt[j+1,i-1], kmt[j+1,i], kmt[j+1,i+1], kmt[j,i+1], kmt[j,i]]) - 1
elif j==nym:
kzit[j,i] = np.min([kmt[j-1,i-1], kmt[j,i-1], kmt[j,i+1], kmt[j-1,i+1], kmt[j-1,i], kmt[j,i]]) - 1
elif i==0:
kzit[j,i] = np.min([kmt[j-1,i-1], kmt[j,i-1], kmt[j+1,i-1], kmt[j+1,i], kmt[j+1,i+1], kmt[j,i+1], kmt[j-1,i+1], kmt[j-1,i], kmt[j,i]]) - 1
elif i==nxm:
kzit[j,i] = np.min([kmt[j-1,i-1], kmt[j,i-1], kmt[j+1,i-1], kmt[j+1,i], kmt[j+1,0], kmt[j,0], kmt[j-1,0], kmt[j-1,i], kmt[j,i]]) - 1
else:
kzit[j,i] = np.min([kmt[j-1,i-1], kmt[j,i-1], kmt[j+1,i-1], kmt[j+1,i], kmt[j+1,i+1], kmt[j,i+1], kmt[j-1,i+1], kmt[j-1,i], kmt[j,i]]) - 1
# Change back to fortran indexing. Continent mask is now 0.
kzit = np.int32(kzit + 1)
kmt = np.int32(kmt + 1)
kzit[kzit==-1] = 0
dims = ['x', 'y']
coords = dict(lont=(dims, lont), latt=(dims, latt))
kmt = DataArray(kmt, coords=coords, dims=dims)
kzit = DataArray(kzit, coords=coords, dims=dims)
fout = '/ccs/home/apaloczy/analysis/data/kzit.nc'
Datasetx(data_vars=dict(kmt=kmt, kzit=kzit), coords=coords).to_netcdf(fout)
| [
"netCDF4.Dataset",
"numpy.logical_and",
"numpy.zeros",
"numpy.min",
"xarray.DataArray",
"numpy.int32"
] | [((584, 598), 'netCDF4.Dataset', 'Dataset', (['fname'], {}), '(fname)\n', (591, 598), False, 'from netCDF4 import Dataset\n'), ((991, 1009), 'numpy.zeros', 'np.zeros', (['(ny, nx)'], {}), '((ny, nx))\n', (999, 1009), True, 'import numpy as np\n'), ((2693, 2711), 'numpy.int32', 'np.int32', (['(kzit + 1)'], {}), '(kzit + 1)\n', (2701, 2711), True, 'import numpy as np\n'), ((2718, 2735), 'numpy.int32', 'np.int32', (['(kmt + 1)'], {}), '(kmt + 1)\n', (2726, 2735), True, 'import numpy as np\n'), ((2833, 2873), 'xarray.DataArray', 'DataArray', (['kmt'], {'coords': 'coords', 'dims': 'dims'}), '(kmt, coords=coords, dims=dims)\n', (2842, 2873), False, 'from xarray import DataArray\n'), ((2881, 2922), 'xarray.DataArray', 'DataArray', (['kzit'], {'coords': 'coords', 'dims': 'dims'}), '(kzit, coords=coords, dims=dims)\n', (2890, 2922), False, 'from xarray import DataArray\n'), ((1198, 1228), 'numpy.logical_and', 'np.logical_and', (['(j == 0)', '(i == 0)'], {}), '(j == 0, i == 0)\n', (1212, 1228), True, 'import numpy as np\n'), ((1358, 1390), 'numpy.logical_and', 'np.logical_and', (['(j == nym)', '(i == 0)'], {}), '(j == nym, i == 0)\n', (1372, 1390), True, 'import numpy as np\n'), ((1254, 1360), 'numpy.min', 'np.min', (['[kmt[j, i - 1], kmt[j + 1, i - 1], kmt[j + 1, i], kmt[j + 1, i + 1], kmt[j,\n i + 1], kmt[j, i]]'], {}), '([kmt[j, i - 1], kmt[j + 1, i - 1], kmt[j + 1, i], kmt[j + 1, i + 1],\n kmt[j, i + 1], kmt[j, i]])\n', (1260, 1360), True, 'import numpy as np\n'), ((1520, 1554), 'numpy.logical_and', 'np.logical_and', (['(j == nym)', '(i == nxm)'], {}), '(j == nym, i == nxm)\n', (1534, 1554), True, 'import numpy as np\n'), ((1416, 1522), 'numpy.min', 'np.min', (['[kmt[j - 1, i - 1], kmt[j, i - 1], kmt[j, i + 1], kmt[j - 1, i + 1], kmt[j -\n 1, i], kmt[j, i]]'], {}), '([kmt[j - 1, i - 1], kmt[j, i - 1], kmt[j, i + 1], kmt[j - 1, i + 1],\n kmt[j - 1, i], kmt[j, i]])\n', (1422, 1522), True, 'import numpy as np\n'), ((1680, 1712), 'numpy.logical_and', 'np.logical_and', (['(j == 0)', '(i == nxm)'], {}), '(j == 0, i == nxm)\n', (1694, 1712), True, 'import numpy as np\n'), ((1580, 1678), 'numpy.min', 'np.min', (['[kmt[j - 1, i - 1], kmt[j, i - 1], kmt[j, 0], kmt[j - 1, 0], kmt[j - 1, i],\n kmt[j, i]]'], {}), '([kmt[j - 1, i - 1], kmt[j, i - 1], kmt[j, 0], kmt[j - 1, 0], kmt[j -\n 1, i], kmt[j, i]])\n', (1586, 1678), True, 'import numpy as np\n'), ((1738, 1837), 'numpy.min', 'np.min', (['[kmt[j, i - 1], kmt[j + 1, i - 1], kmt[j + 1, i], kmt[j + 1, 0], kmt[j, 0],\n kmt[j, i]]'], {}), '([kmt[j, i - 1], kmt[j + 1, i - 1], kmt[j + 1, i], kmt[j + 1, 0], kmt\n [j, 0], kmt[j, i]])\n', (1744, 1837), True, 'import numpy as np\n'), ((1872, 1978), 'numpy.min', 'np.min', (['[kmt[j, i - 1], kmt[j + 1, i - 1], kmt[j + 1, i], kmt[j + 1, i + 1], kmt[j,\n i + 1], kmt[j, i]]'], {}), '([kmt[j, i - 1], kmt[j + 1, i - 1], kmt[j + 1, i], kmt[j + 1, i + 1],\n kmt[j, i + 1], kmt[j, i]])\n', (1878, 1978), True, 'import numpy as np\n'), ((2012, 2118), 'numpy.min', 'np.min', (['[kmt[j - 1, i - 1], kmt[j, i - 1], kmt[j, i + 1], kmt[j - 1, i + 1], kmt[j -\n 1, i], kmt[j, i]]'], {}), '([kmt[j - 1, i - 1], kmt[j, i - 1], kmt[j, i + 1], kmt[j - 1, i + 1],\n kmt[j - 1, i], kmt[j, i]])\n', (2018, 2118), True, 'import numpy as np\n'), ((2150, 2314), 'numpy.min', 'np.min', (['[kmt[j - 1, i - 1], kmt[j, i - 1], kmt[j + 1, i - 1], kmt[j + 1, i], kmt[j +\n 1, i + 1], kmt[j, i + 1], kmt[j - 1, i + 1], kmt[j - 1, i], kmt[j, i]]'], {}), '([kmt[j - 1, i - 1], kmt[j, i - 1], kmt[j + 1, i - 1], kmt[j + 1, i],\n kmt[j + 1, i + 1], kmt[j, i + 1], kmt[j - 1, i + 1], kmt[j - 1, i], kmt\n [j, i]])\n', (2156, 2314), True, 'import numpy as np\n'), ((2330, 2477), 'numpy.min', 'np.min', (['[kmt[j - 1, i - 1], kmt[j, i - 1], kmt[j + 1, i - 1], kmt[j + 1, i], kmt[j +\n 1, 0], kmt[j, 0], kmt[j - 1, 0], kmt[j - 1, i], kmt[j, i]]'], {}), '([kmt[j - 1, i - 1], kmt[j, i - 1], kmt[j + 1, i - 1], kmt[j + 1, i],\n kmt[j + 1, 0], kmt[j, 0], kmt[j - 1, 0], kmt[j - 1, i], kmt[j, i]])\n', (2336, 2477), True, 'import numpy as np\n'), ((2497, 2661), 'numpy.min', 'np.min', (['[kmt[j - 1, i - 1], kmt[j, i - 1], kmt[j + 1, i - 1], kmt[j + 1, i], kmt[j +\n 1, i + 1], kmt[j, i + 1], kmt[j - 1, i + 1], kmt[j - 1, i], kmt[j, i]]'], {}), '([kmt[j - 1, i - 1], kmt[j, i - 1], kmt[j + 1, i - 1], kmt[j + 1, i],\n kmt[j + 1, i + 1], kmt[j, i + 1], kmt[j - 1, i + 1], kmt[j - 1, i], kmt\n [j, i]])\n', (2503, 2661), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from mendeleev import element
from scipy.constants import codata
from read_database import load_levels_lines
from RF_mathematica import cross_section
from cumsum_diff import cumsum_diff
from iter_calc import *
import itertools
import matplotlib.pyplot as plt, mpld3
import scipy as sp
import sympy as sympy
from sys import *
import os
import ast
import scipy.stats
import matplotlib.cm as cm
sim_list='sim_list_all.csv'
atomionspins_file = open("datatables/nistatomionspins.csv",'rb')
atomionspins = np.genfromtxt(atomionspins_file,delimiter = '\t',dtype=str,skip_header=1,autostrip=1)
def popsim(ele_sym_A,ele_sym_B,T_B,dist,sidepeaks_transition,sidepeaks_collinear, time_steps, database, charge_state, double_ce):
print(sidepeaks_transition)
sidepeaks_sim=bool(sidepeaks_transition)
print(sidepeaks_sim)
try:
ele_num_B=int(ele_sym_B)
ele_sym_B=element(ele_num_B).symbol
except:
pass
ele_A=element(ele_sym_A) #e.g. Na, K, Li
ele_B=element(ele_sym_B)
if not os.path.exists("results/Z"+str(ele_B.atomic_number)):
os.makedirs("results/Z"+str(ele_B.atomic_number))
row = np.where(atomionspins[:,1] == ele_sym_A)
react_L_A=float(atomionspins[:,2][row][0]) #I
react_S_A=float(atomionspins[:,3][row][0])
prod_L_A=float(atomionspins[:,4][row][0]) # II
prod_S_A=float(atomionspins[:,5][row][0])
row = np.where(atomionspins[:,1] == ele_sym_B)
react_L_B=float(atomionspins[:,4][row][0]) # II
react_S_B=float(atomionspins[:,5][row][0])
show_notrans=0
m_B=ele_B.mass
I_A=ele_A.ionenergies[1]
B_string=ele_sym_B+'I'*charge_state
if dist !=0:
I_B, term, J, L, S, level, skipped_level, E_k_probs = load_levels_lines(B_string, 0, dist, database, ele_B, charge_state, I_A)
else:
I_B, term, J, L, S, level, skipped_level = load_levels_lines(B_string, 0, dist, database, ele_B, charge_state, I_A)
print(B_string+ " levels and lines loaded")
df_double_ce=pd.DataFrame()
tot_cs, levels_pops_detunes, levels_cs, df_double_ce = iter_calc([0.0], react_S_B, react_L_B, I_B, term, J, L, S, level, skipped_level, 0, df_double_ce, ele_B, ele_A, prod_S_A, react_S_A, react_L_A, charge_state, T_B, dist)
levels_pops_detunes_initial = levels_pops_detunes
I_B2, term2, J2, L2, S2, level2, skipped_level2 = I_B, term, J, L, S, level, skipped_level #save charge state 2 for iteration
second_ce_levels, second_ce_cs=[], []
if double_ce:
if dist == 0:
for i, l2 in enumerate(level2):
I_B, term, J, L, S, level, skipped_level = load_levels_lines(B_string, 1, dist, database, ele_B, charge_state, I_A)
print("##################################################################################")
print("##################################################################################")
print(str(l2), str(term2[i]), str(S2[i]), str(L2[i]), str(I_B-float(l2)*hc))
tot_cs, levels_pops_detunes, levels_cs, df_double_ce = iter_calc(l2, S2[i], L2[i], I_B-float(l2)*hc, term, J, L, S, level, skipped_level, 1, df_double_ce, ele_B, ele_A, prod_S_A, react_S_A, react_L_A, charge_state, T_B, dist)
print("Total CS:", tot_cs)
second_ce_levels.append(l2)
second_ce_cs.append(tot_cs)
file_string='results/Z'+str(ele_B.atomic_number)+'/second_ce/levels_pops_detunes'+ele_sym_B+"I"*charge_state+"_"+ele_sym_A+"_"+str(T_B)+"_"+str(dist)
df_double_ce.to_csv(file_string)
second_ce_levels_css=np.array(list(zip(second_ce_levels, second_ce_cs)), dtype=float)
np.savetxt('results/Z'+str(ele_B.atomic_number)+'/second_ce_levels_css'+ele_sym_B+"I"*charge_state+"_"+ele_sym_A+"_"+str(T_B)+"_"+str(dist), second_ce_levels_css, delimiter=';')
else:
print("can't do double cec with dist !=0 ")
##################evolve population###################
if not sidepeaks_sim: levels_pops_detunes=levels_pops_detunes[:,[0,1]] # remove energy differences column
evolved_level=[]
unevolved_level=[]
amu=codata.value('atomic mass constant')#kg
velocity=np.sqrt((2*T_B*1.6E-19)/(m_B*amu))*10**2
c=codata.value('speed of light in vacuum')*10**2 #cm
flight_time=dist/velocity
print("flight time:", flight_time)
print(ele_sym_A,":" ,ele_A.description)
print(ele_sym_B,":", ele_B.description)
E_k_probs.sort()
E_k_probs=list(E_k_probs for E_k_probs,_ in itertools.groupby(E_k_probs)) # remove possible duplicates
if flight_time != 0:
dt=(flight_time/time_steps)
for step in range(0,time_steps):
print("time step:",step)
#times previous step by time increment
# cs_norm_evol_previous=list(cs_norm_evol)
levels_pops_detunes_previous=levels_pops_detunes
#first decrease population of the upper level energies here
for index1,level_pop_detune1 in enumerate(levels_pops_detunes): #go through uppers
level1=level_pop_detune1[0]#upper level
pop1=level_pop_detune1[1]#upper pop
if sidepeaks_sim: eloss1=level_pop_detune1[2]
for E_k_prob in E_k_probs:
upper_energy=E_k_prob[0]
lower_energy=E_k_prob[1]
this_A=E_k_prob[2]
if round(upper_energy,2) == round(level1, 2):#find decays from this upper, round incase diff databases
if round(lower_energy,2) in np.around(levels_pops_detunes[:,0], decimals=2): # maintains precision of NIST database for lower energy
index3 = np.where(np.around(levels_pops_detunes[:,0], decimals=2)==round(lower_energy,2))
lower_energy=float(levels_pops_detunes[:,0][index3][0])
if sidepeaks_sim:
index2=np.array(np.where(np.all(levels_pops_detunes[:,[0,2]] == np.array([lower_energy,level1-eloss1-lower_energy]),axis=1)))
if index2.size == 1:
levels_pops_detunes[:,1][index2]=levels_pops_detunes[:,1][index2]+pop1*(1-np.exp(-dt*this_A)) #add pop into lower level
elif index2.size == 0:
newrow=np.array([lower_energy, pop1*(1-np.exp(-dt*this_A)), (level1-eloss1-lower_energy)], dtype=float)
levels_pops_detunes=np.vstack((levels_pops_detunes,newrow))
else: print("index 2 size error", index2)
pop1=pop1*np.exp(-dt*this_A) #decay upper level pop
levels_pops_detunes[:,1][index1]=pop1*np.exp(-dt*this_A)
else:
if lower_energy in levels_pops_detunes[:,0]:#adds pop to lower level if exiting level
index2=np.where(levels_pops_detunes[:,0]==lower_energy)
levels_pops_detunes[:,1][index2]=levels_pops_detunes[:,1][index2]+pop1*(1-np.exp(-dt*this_A))#adds decayed pop to lower level
else:
newrow=np.array([lower_energy, pop1*(1-np.exp(-dt*this_A))], dtype=float)#creates new pop if new level
levels_pops_detunes=np.vstack((levels_pops_detunes,newrow))
# if upper_energy==39625.506:
# print("upper_energy, lower_energy, pop1, pop1*np.exp(-dt*this_A), this_A")
# print(upper_energy, lower_energy, pop1, pop1*np.exp(-dt*this_A), this_A)
# print(step, step*dt)
pop1=pop1*np.exp(-dt*this_A) #decay upper level pop
levels_pops_detunes[:,1][index1]=pop1
print(levels_pops_detunes)
print("sum of pops:",sum(levels_pops_detunes[:,1]))
print("final states populated")
else:
print("no time of flight population change")
if sidepeaks_sim:
levels_pops_detunes_sorted=cumsum_diff(levels_pops_detunes[:,[0,1]]) #get rid of seperate entries for displaying populations
levels_pops_detunes_sorted=levels_pops_detunes_sorted[levels_pops_detunes_sorted[:,1].argsort()[::-1]]
else:
levels_pops_detunes_sorted=levels_pops_detunes
levels_pops_detunes_sorted=levels_pops_detunes_sorted[levels_pops_detunes_sorted[:,1].argsort()[::-1]]
levels_pops_detunes_sorted=levels_pops_detunes_sorted.astype(float)
print("top 10 populations:",levels_pops_detunes_sorted[:10])
np.savetxt('results/Z'+str(ele_B.atomic_number)+'/levels_pops_detunes_sorted'+ele_sym_B+"I"*charge_state+"_"+ele_sym_A+"_"+str(T_B)+"_"+str(dist), levels_pops_detunes_sorted, delimiter=';', fmt="%.5f")
fig, ax = plt.subplots(figsize=(6, 10))
# if show_notrans:
# unevolved_level=list(set(energy_levels_evol)-set(evolved_level))
# if len(unevolved_level)>0:
# plt.axvline(x=unevolved_level[1], color='m', linestyle='-', label="No transitions from level (no info)")
# for ulevel in unevolved_level[2:]: plt.axvline(x=ulevel, color='m', linestyle='-')
if len(skipped_level)>0:
ax.plot( [0,1] ,[skipped_level[0],skipped_level[0]], color='g', linestyle='--', label="Skipped initial population (unknown spin)")
for slevel in skipped_level[1:]:
ax.plot( [0,1] ,[slevel,slevel], color='g', linestyle='--')
ax.plot(levels_pops_detunes_initial[:,1],levels_pops_detunes_initial[:,0],'ro',label="Initial population")
for pt in levels_pops_detunes_initial[:,[0,1]]:
ax.plot( [0,pt[1]],[pt[0],pt[0]], color='r')
if flight_time != 0:
ax.plot(levels_pops_detunes_sorted[:,1],levels_pops_detunes_sorted[:,0],'bs',label="Final population")
#
for pt in levels_pops_detunes_sorted[:,[0,1]]:
ax.plot([0,pt[1]] ,[pt[0],pt[0]], color='b')
ax.set_ylabel("Energy level (cm-1)", fontsize=14)
ax.set_xlabel("Normalised population", fontsize=14)
ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax.plot([0,max(levels_pops_detunes_sorted[:,1])],[(I_B-I_A)/hc,(I_B-I_A)/hc], color='k', linestyle='--', label="CE entry energy")
##resonance plot##
if 1: #plot bare cross_section
try:
energy_levels, bare_cross_sections=levels_cs[:,0],levels_cs[:,1]
bare_cs_norm_before = [float(i)/sum(bare_cross_sections) for i in bare_cross_sections] # this norm is without f factor..
bare_cs=[float(c) for c in bare_cross_sections]
bare_el=[float(e) for e in energy_levels]
extra_els=np.linspace(min(bare_el),max(bare_el),30)
for el in extra_els:
I_B_ex=I_B-float(el)*hc
try:
cs=cross_section(I_A, I_B_ex, m_B, T_B)
bare_cs.append(float(cs))
bare_el.append(float(el))
print("el","cs",el,cs)
except:
print("problem with", el, "cm-1")
# if cs < 2.0E-10: #to reduce the amount of time wasted on calculation for the plot
# break
print("bare_cs_norm_before", bare_cs_norm_before)
print("bare_cs", bare_cs)
#values.index(min(values))
bare_cs_norm_after = [float(i)/sum(bare_cs) for i in bare_cs]
# scale_ref=bare_cs_norm_before.index(max(bare_cs_norm_before))
# scale_ratio=bare_cs_norm_after[scale_ref]/(bare_cs_norm_before[scale_ref]*levels_pops_detunes_initial[:,1][scale_ref]) #levels_pops_detunes_initial[:,1][scale_ref] for f factor norm
# max_pop=max(levels_pops_detunes_sorted[:,1])
max_cs_i=bare_cs_norm_before.index(max(bare_cs_norm_before))
# cs_i=bare_cs_norm_before[0]
scale_ratio=max(levels_pops_detunes_sorted[:,1])/bare_cs_norm_after[max_cs_i]#)*levels_pops_detunes_initial[:,1][0]
print("scale_ratio", scale_ratio)
# scale_ratio=float(bare_cs_norm_before[-1])/float(bare_cross_sections[-1])
# bare_cs_norm=[float(i)*scale_ratio for i in bare_cs]
bare_cs_norm=[scale_ratio*float(i)/sum(bare_cs) for i in bare_cs]
bare_el, bare_cs_norm = (list(t) for t in zip(*sorted(zip(bare_el, bare_cs_norm))))
ax.plot(bare_cs_norm,bare_el, '--', label="Bare cross-section")
except Exception as exception:
print(exception)
ax.legend(numpoints=1, loc="upper right")
mpld3.save_html(fig=fig, fileobj='results/Z'+str(ele_B.atomic_number)+'/fig_'+ele_sym_B+"I"*charge_state+"_"+ele_sym_A+"_"+str(T_B)+"_"+str(dist)+'.html')
fig.savefig('results/Z'+str(ele_B.atomic_number)+'/fig_'+ele_sym_B+"I"*charge_state+"_"+ele_sym_A+"_"+str(T_B)+"_"+str(dist)+'.pdf')
plt.show()
if sidepeaks_sim:
plt.show()
print("finished popsim")
# change energy of beam by the energy loss of previous transitions (make sure includes chains)
# and then dopplershift transition along with a scan range.. make last column beam energy instead?
if sidepeaks_sim:
try:
E_line_rest=sidepeaks_transition[1]-sidepeaks_transition[0]
transition_detuning_pops_f = open('results/Z'+str(ele_B.atomic_number)+'/transition_detuning_pops'+ele_sym_B+"I"*charge_state+"_"+ele_sym_A+"_"+str(T_B)+"_"+str(dist)+"_"+str(E_line_rest),'rb')
transition_detuning_pops=np.loadtxt(transition_detuning_pops_f, delimiter=';',dtype=float)
# print(transition_detuning_pops)
except Exception as exception:
print(exception)
#work out doppler shift of all transitions from lower levels,
#keep/plot those with range matching near what we want and keep track of seperate transitions
cm_to_MHz=29.9792458*10**3 #1 cm^-1=29.9792458 GHz in vacuum
E_line_rest=sidepeaks_transition[1]-sidepeaks_transition[0] #transition cm-^1
velocity=np.sqrt((2*(T_B)*1.6E-19)/(m_B*amu)) #shifts energy of beam in eV then converts to velocity
beta=velocity/(c*10**(-2))
if sidepeaks_collinear: E_line=E_line_rest*np.sqrt((1+beta)/(1-beta))
else: E_line=E_line_rest*np.sqrt((1-beta)/(1+beta))
only_same_transition=True
transition_detuning_pops=[]
for index1, level_pop_detune1 in enumerate(levels_pops_detunes): #where equals lower level..
level1=level_pop_detune1[0]
pop1=level_pop_detune1[1]
eloss1=level_pop_detune1[2]
if only_same_transition and (level1 == sidepeaks_transition[0]):
velocity=np.sqrt((2*(T_B-eloss1*hc)*1.6E-19)/(m_B*amu)) #shifts energy of beam in eV then converts to velocity #BEAM ENERGY IN JOULES NOT eV!!
beta=velocity/(c*10**(-2))
if sidepeaks_collinear:
E_line_detune=E_line_rest*np.sqrt((1+beta)/(1-beta))
else:
E_line_detune=E_line_rest*np.sqrt((1-beta)/(1+beta))
detuning = (float(E_line_detune)-float(E_line))*cm_to_MHz #detuning from transition in MHz
if abs(detuning) < 10000: #10 GHz scan range
print([E_line,detuning,pop1])
transition_detuning_pops.append([E_line,detuning,pop1])
else:
# print("detuning",abs(detuning), eloss1*hc ,"eV, too big?")
pass
else:
for E_k_prob in (E_k_prob for E_k_prob in E_k_probs if round(E_k_prob[1],2) == round(level1,2)):#where lower energy is transition within 2 d.p. Finds upper levels that transition into this lower.
this_transition=E_k_prob[0]-E_k_prob[1] #upper-lower
velocity=np.sqrt((2*(T_B-eloss1*hc)*1.6E-19)/(m_B*amu)) #shifts energy of beam in eV then converts to velocity #BEAM ENERGY IN JOULES NOT eV!!
beta=velocity/(c*10**(-2))
if sidepeaks_collinear:
this_transition_detune=this_transition*np.sqrt((1+beta)/(1-beta))
else:
this_transition_detune=this_transition*np.sqrt((1-beta)/(1+beta))
detuning = (float(this_transition_detune)-float(E_line))*cm_to_MHz #detuning from transition in MHz
if abs(detuning) < 10000: #10 GHz scan range
print([this_transition,detuning,pop1])
transition_detuning_pops.append([this_transition,detuning,pop1])
else:
# print("detuning",abs(detuning), eloss1*hc ,"eV, too big?")
pass
transition_detuning_pops=np.array(transition_detuning_pops)
transition_detuning_pops[:,2]=transition_detuning_pops[:,2] / transition_detuning_pops[:,2].max(axis=0)
np.savetxt('results/Z'+str(ele_B.atomic_number)+'/transition_detuning_pops'+ele_sym_B+"I"*charge_state+"_"+ele_sym_A+"_"+str(T_B)+"_"+str(dist)+"_"+str(E_line_rest), transition_detuning_pops, delimiter=';', fmt='%1.3f')
print(transition_detuning_pops)
transition_detuning_pops=transition_detuning_pops[transition_detuning_pops[:,0].argsort()] #sort into transitions
transitions_legend={}
for transition_detuning_pop in transition_detuning_pops:
key=transition_detuning_pop[0]
if key in transitions_legend:
transitions_legend[key]=np.vstack((transitions_legend[key],np.array([transition_detuning_pop[1],transition_detuning_pop[2]]))) # updates if exists, else adds
else:
transitions_legend[key]=np.array([[transition_detuning_pop[1],transition_detuning_pop[2]]])
print(transitions_legend)
colors = cm.rainbow(np.linspace(0, 1, len(transitions_legend)))
fig, ax = plt.subplots(figsize=(7, 5))
axes = [ax, ax.twiny()]#, ax.twinx()
for col_i, key in enumerate(transitions_legend):
axes[0].plot(transitions_legend[key][:,0], transitions_legend[key][:,1], 'ro', label=str(round(key,2))+"transition", c=colors[col_i])
for pt in transitions_legend[key][:,[0,1]]:
# plot (x,y) pairs.
# vertical line: 2 x,y pairs: (a,0) and (a,b)
axes[0].plot([pt[0],pt[0]], [0,pt[1]] , color=colors[col_i])
binning_MHz=10
#binning sidepeak data
no_bins=int(transition_detuning_pops[:,1].max()-transition_detuning_pops[:,1].min())/binning_MHz
binned_detunes=scipy.stats.binned_statistic(transition_detuning_pops[:,1],transition_detuning_pops[:,2], bins=no_bins, statistic="sum")
binned_detunes_bins=[(a + b) / 2 for a, b in zip(binned_detunes.bin_edges[0:], binned_detunes.bin_edges[1:])]
detune_counts=[]
for detune_i, detune in enumerate(transition_detuning_pops[:,1]):
detune_counts.append([detune]*int(transition_detuning_pops[:,2][detune_i]*1000))
# detune_counts=list(np.array(detune_counts).flat)
# print("detune_counts", detune_counts)
detune_counts=[item for sublist in detune_counts for item in sublist]
#binning sidepeak data
axes[0].bar(binned_detunes_bins, binned_detunes.statistic, label=str(binning_MHz)+" MHz binning", align='center', alpha=0.4, width=(binned_detunes_bins[1]-binned_detunes_bins[0])) # A bar chart
# X_plot = np.linspace(min(transition_detuning_pops[:,1])-50, max(transition_detuning_pops[:,1])+50, 1000)
# kde = KernelDensity(kernel='gaussian', bandwidth=4).fit(np.array(detune_counts).reshape((-1, 1)))
# log_dens = kde.score_samples(np.array(X_plot).reshape((-1, 1)))
# plt.fill(X_plot, np.exp(log_dens)*1000.0/(35*1.75), fc='#AAAAFF')
# sidepeak_model=ConstantModel()
# for detune_i, detune in enumerate(transition_detuning_pops[:,1]):
# sidepeak_model=sidepeak_model + LorentzianModel() # amplitude=np.array([transition_detuning_pops[:,2][detune_i]], sigma=[10.0], center=[detune]
#
# print(sidepeak_model.param_names)
#
# print("X_plot, sidepeak_model(X_plot)", X_plot, sidepeak_model(X_plot))
# plt.plot(X_plot, sidepeak_model(X_plot))
axes[0].set_xlabel("Detuning (MHz)", fontsize=20)
axes[1].set_xlabel('Detuning (eV)', fontsize=20)
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
plt.ylabel("Norm. Population", fontsize=20)
plt.legend(numpoints=1, loc=2)
plt.xlim((-200, +200))
# ax1Ticks = axes[0].get_xticks()
# ax2Ticks = ax1Ticks
#
# def tick_function(X):
# V = X + 49
# return [str(z) for z in V]
#
# axes[1].set_xticks(ax2Ticks)
# axes[1].set_xbound(axes[0].get_xbound())
# axes[1].set_xticklabels(tick_function(ax2Ticks))
ax = plt.gca()
ax.get_xaxis().get_major_formatter().set_scientific(False)
plt.show()
with open(sim_list,'rb') as csv_file:
iter_list = np.genfromtxt(csv_file,delimiter = ' ',dtype=str,skip_header=1,autostrip=1)
sim_no=[int(x) for x in iter_list.T[0]]
ele_sym_Bs=[x for x in iter_list.T[1]]
ele_sym_As=[ast.literal_eval(x) for x in iter_list.T[2]]
sidepeaks_transitions=[ast.literal_eval(x) for x in iter_list.T[3]]
sidepeaks_collinears=[ast.literal_eval(x) for x in iter_list.T[4]]
dists=[float(x) for x in iter_list.T[5]]
T_Bs=[float(x) for x in iter_list.T[6]]
time_stepss=[int(x) for x in iter_list.T[7]]
skips=[bool(int(x)) for x in iter_list.T[8]]
databases=[str(x) for x in iter_list.T[9]]
charge_states=[int(x) for x in iter_list.T[10]]
double_ces=[bool(int(x)) for x in iter_list.T[11]]
csv_file.close()
for sym_B_i, ele_sym_B in enumerate(ele_sym_Bs):
for sym_A_i, ele_sym_A in enumerate(ele_sym_As[sym_B_i]):
skip=skips[sym_B_i]
database=databases[sym_B_i]
sidepeaks_transition=sidepeaks_transitions[sym_B_i]
sidepeaks_collinear=sidepeaks_collinears[sym_B_i]
dist=dists[sym_B_i]
T_B=T_Bs[sym_B_i]
time_steps=time_stepss[sym_B_i]
charge_state=charge_states[sym_B_i]
double_ce=double_ces[sym_B_i]
print("tion:",ele_sym_A,ele_sym_B,T_B,dist,sidepeaks_transition,sidepeaks_collinear,time_steps, charge_state)
if not skip:
try:
popsim(ele_sym_A,ele_sym_B,T_B,dist,sidepeaks_transition,sidepeaks_collinear,time_steps,database, charge_state, double_ce)
except Exception as e:
print(e)
print("PROBLEM with element", ele_sym_B)
else:
print("simulation skipped")
| [
"scipy.constants.codata.value",
"RF_mathematica.cross_section",
"numpy.around",
"read_database.load_levels_lines",
"numpy.exp",
"matplotlib.pyplot.gca",
"pandas.DataFrame",
"numpy.genfromtxt",
"numpy.loadtxt",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"cumsum_diff.cumsum_diff",
... | [((544, 635), 'numpy.genfromtxt', 'np.genfromtxt', (['atomionspins_file'], {'delimiter': '"""\t"""', 'dtype': 'str', 'skip_header': '(1)', 'autostrip': '(1)'}), "(atomionspins_file, delimiter='\\t', dtype=str, skip_header=1,\n autostrip=1)\n", (557, 635), True, 'import numpy as np\n'), ((953, 971), 'mendeleev.element', 'element', (['ele_sym_A'], {}), '(ele_sym_A)\n', (960, 971), False, 'from mendeleev import element\n'), ((995, 1013), 'mendeleev.element', 'element', (['ele_sym_B'], {}), '(ele_sym_B)\n', (1002, 1013), False, 'from mendeleev import element\n'), ((1137, 1178), 'numpy.where', 'np.where', (['(atomionspins[:, 1] == ele_sym_A)'], {}), '(atomionspins[:, 1] == ele_sym_A)\n', (1145, 1178), True, 'import numpy as np\n'), ((1369, 1410), 'numpy.where', 'np.where', (['(atomionspins[:, 1] == ele_sym_B)'], {}), '(atomionspins[:, 1] == ele_sym_B)\n', (1377, 1410), True, 'import numpy as np\n'), ((1934, 1948), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1946, 1948), True, 'import pandas as pd\n'), ((3905, 3941), 'scipy.constants.codata.value', 'codata.value', (['"""atomic mass constant"""'], {}), "('atomic mass constant')\n", (3917, 3941), False, 'from scipy.constants import codata\n'), ((7860, 7889), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 10)'}), '(figsize=(6, 10))\n', (7872, 7889), True, 'import matplotlib.pyplot as plt, mpld3\n'), ((11448, 11458), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11456, 11458), True, 'import matplotlib.pyplot as plt, mpld3\n'), ((18710, 18788), 'numpy.genfromtxt', 'np.genfromtxt', (['csv_file'], {'delimiter': '"""\t"""', 'dtype': 'str', 'skip_header': '(1)', 'autostrip': '(1)'}), "(csv_file, delimiter='\\t', dtype=str, skip_header=1, autostrip=1)\n", (18723, 18788), True, 'import numpy as np\n'), ((1674, 1746), 'read_database.load_levels_lines', 'load_levels_lines', (['B_string', '(0)', 'dist', 'database', 'ele_B', 'charge_state', 'I_A'], {}), '(B_string, 0, dist, database, ele_B, charge_state, I_A)\n', (1691, 1746), False, 'from read_database import load_levels_lines\n'), ((1799, 1871), 'read_database.load_levels_lines', 'load_levels_lines', (['B_string', '(0)', 'dist', 'database', 'ele_B', 'charge_state', 'I_A'], {}), '(B_string, 0, dist, database, ele_B, charge_state, I_A)\n', (1816, 1871), False, 'from read_database import load_levels_lines\n'), ((3955, 3995), 'numpy.sqrt', 'np.sqrt', (['(2 * T_B * 1.6e-19 / (m_B * amu))'], {}), '(2 * T_B * 1.6e-19 / (m_B * amu))\n', (3962, 3995), True, 'import numpy as np\n'), ((3999, 4039), 'scipy.constants.codata.value', 'codata.value', (['"""speed of light in vacuum"""'], {}), "('speed of light in vacuum')\n", (4011, 4039), False, 'from scipy.constants import codata\n'), ((7147, 7190), 'cumsum_diff.cumsum_diff', 'cumsum_diff', (['levels_pops_detunes[:, [0, 1]]'], {}), '(levels_pops_detunes[:, [0, 1]])\n', (7158, 7190), False, 'from cumsum_diff import cumsum_diff\n'), ((11481, 11491), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11489, 11491), True, 'import matplotlib.pyplot as plt, mpld3\n'), ((15847, 15875), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(7, 5)'}), '(figsize=(7, 5))\n', (15859, 15875), True, 'import matplotlib.pyplot as plt, mpld3\n'), ((18125, 18186), 'matplotlib.pyplot.ticklabel_format', 'plt.ticklabel_format', ([], {'style': '"""sci"""', 'axis': '"""x"""', 'scilimits': '(0, 0)'}), "(style='sci', axis='x', scilimits=(0, 0))\n", (18145, 18186), True, 'import matplotlib.pyplot as plt, mpld3\n'), ((18188, 18231), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Norm. Population"""'], {'fontsize': '(20)'}), "('Norm. Population', fontsize=20)\n", (18198, 18231), True, 'import matplotlib.pyplot as plt, mpld3\n'), ((18234, 18264), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'numpoints': '(1)', 'loc': '(2)'}), '(numpoints=1, loc=2)\n', (18244, 18264), True, 'import matplotlib.pyplot as plt, mpld3\n'), ((18267, 18289), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-200, +200)'], {}), '((-200, +200))\n', (18275, 18289), True, 'import matplotlib.pyplot as plt, mpld3\n'), ((18572, 18581), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (18579, 18581), True, 'import matplotlib.pyplot as plt, mpld3\n'), ((18646, 18656), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18654, 18656), True, 'import matplotlib.pyplot as plt, mpld3\n'), ((18880, 18899), 'ast.literal_eval', 'ast.literal_eval', (['x'], {}), '(x)\n', (18896, 18899), False, 'import ast\n'), ((18949, 18968), 'ast.literal_eval', 'ast.literal_eval', (['x'], {}), '(x)\n', (18965, 18968), False, 'import ast\n'), ((19017, 19036), 'ast.literal_eval', 'ast.literal_eval', (['x'], {}), '(x)\n', (19033, 19036), False, 'import ast\n'), ((902, 920), 'mendeleev.element', 'element', (['ele_num_B'], {}), '(ele_num_B)\n', (909, 920), False, 'from mendeleev import element\n'), ((12031, 12097), 'numpy.loadtxt', 'np.loadtxt', (['transition_detuning_pops_f'], {'delimiter': '""";"""', 'dtype': 'float'}), "(transition_detuning_pops_f, delimiter=';', dtype=float)\n", (12041, 12097), True, 'import numpy as np\n'), ((2508, 2580), 'read_database.load_levels_lines', 'load_levels_lines', (['B_string', '(1)', 'dist', 'database', 'ele_B', 'charge_state', 'I_A'], {}), '(B_string, 1, dist, database, ele_B, charge_state, I_A)\n', (2525, 2580), False, 'from read_database import load_levels_lines\n'), ((4260, 4288), 'itertools.groupby', 'itertools.groupby', (['E_k_probs'], {}), '(E_k_probs)\n', (4277, 4288), False, 'import itertools\n'), ((12509, 12549), 'numpy.sqrt', 'np.sqrt', (['(2 * T_B * 1.6e-19 / (m_B * amu))'], {}), '(2 * T_B * 1.6e-19 / (m_B * amu))\n', (12516, 12549), True, 'import numpy as np\n'), ((14800, 14834), 'numpy.array', 'np.array', (['transition_detuning_pops'], {}), '(transition_detuning_pops)\n', (14808, 14834), True, 'import numpy as np\n'), ((15670, 15738), 'numpy.array', 'np.array', (['[[transition_detuning_pop[1], transition_detuning_pop[2]]]'], {}), '([[transition_detuning_pop[1], transition_detuning_pop[2]]])\n', (15678, 15738), True, 'import numpy as np\n'), ((9673, 9709), 'RF_mathematica.cross_section', 'cross_section', (['I_A', 'I_B_ex', 'm_B', 'T_B'], {}), '(I_A, I_B_ex, m_B, T_B)\n', (9686, 9709), False, 'from RF_mathematica import cross_section\n'), ((12677, 12709), 'numpy.sqrt', 'np.sqrt', (['((1 + beta) / (1 - beta))'], {}), '((1 + beta) / (1 - beta))\n', (12684, 12709), True, 'import numpy as np\n'), ((12732, 12764), 'numpy.sqrt', 'np.sqrt', (['((1 - beta) / (1 + beta))'], {}), '((1 - beta) / (1 + beta))\n', (12739, 12764), True, 'import numpy as np\n'), ((13097, 13153), 'numpy.sqrt', 'np.sqrt', (['(2 * (T_B - eloss1 * hc) * 1.6e-19 / (m_B * amu))'], {}), '(2 * (T_B - eloss1 * hc) * 1.6e-19 / (m_B * amu))\n', (13104, 13153), True, 'import numpy as np\n'), ((15534, 15600), 'numpy.array', 'np.array', (['[transition_detuning_pop[1], transition_detuning_pop[2]]'], {}), '([transition_detuning_pop[1], transition_detuning_pop[2]])\n', (15542, 15600), True, 'import numpy as np\n'), ((5122, 5170), 'numpy.around', 'np.around', (['levels_pops_detunes[:, 0]'], {'decimals': '(2)'}), '(levels_pops_detunes[:, 0], decimals=2)\n', (5131, 5170), True, 'import numpy as np\n'), ((14045, 14101), 'numpy.sqrt', 'np.sqrt', (['(2 * (T_B - eloss1 * hc) * 1.6e-19 / (m_B * amu))'], {}), '(2 * (T_B - eloss1 * hc) * 1.6e-19 / (m_B * amu))\n', (14052, 14101), True, 'import numpy as np\n'), ((5980, 6000), 'numpy.exp', 'np.exp', (['(-dt * this_A)'], {}), '(-dt * this_A)\n', (5986, 6000), True, 'import numpy as np\n'), ((6067, 6087), 'numpy.exp', 'np.exp', (['(-dt * this_A)'], {}), '(-dt * this_A)\n', (6073, 6087), True, 'import numpy as np\n'), ((6207, 6258), 'numpy.where', 'np.where', (['(levels_pops_detunes[:, 0] == lower_energy)'], {}), '(levels_pops_detunes[:, 0] == lower_energy)\n', (6215, 6258), True, 'import numpy as np\n'), ((6542, 6582), 'numpy.vstack', 'np.vstack', (['(levels_pops_detunes, newrow)'], {}), '((levels_pops_detunes, newrow))\n', (6551, 6582), True, 'import numpy as np\n'), ((6837, 6857), 'numpy.exp', 'np.exp', (['(-dt * this_A)'], {}), '(-dt * this_A)\n', (6843, 6857), True, 'import numpy as np\n'), ((13325, 13357), 'numpy.sqrt', 'np.sqrt', (['((1 + beta) / (1 - beta))'], {}), '((1 + beta) / (1 - beta))\n', (13332, 13357), True, 'import numpy as np\n'), ((13395, 13427), 'numpy.sqrt', 'np.sqrt', (['((1 - beta) / (1 + beta))'], {}), '((1 - beta) / (1 + beta))\n', (13402, 13427), True, 'import numpy as np\n'), ((5252, 5300), 'numpy.around', 'np.around', (['levels_pops_detunes[:, 0]'], {'decimals': '(2)'}), '(levels_pops_detunes[:, 0], decimals=2)\n', (5261, 5300), True, 'import numpy as np\n'), ((5872, 5912), 'numpy.vstack', 'np.vstack', (['(levels_pops_detunes, newrow)'], {}), '((levels_pops_detunes, newrow))\n', (5881, 5912), True, 'import numpy as np\n'), ((14289, 14321), 'numpy.sqrt', 'np.sqrt', (['((1 + beta) / (1 - beta))'], {}), '((1 + beta) / (1 - beta))\n', (14296, 14321), True, 'import numpy as np\n'), ((14374, 14406), 'numpy.sqrt', 'np.sqrt', (['((1 - beta) / (1 + beta))'], {}), '((1 - beta) / (1 + beta))\n', (14381, 14406), True, 'import numpy as np\n'), ((5483, 5539), 'numpy.array', 'np.array', (['[lower_energy, level1 - eloss1 - lower_energy]'], {}), '([lower_energy, level1 - eloss1 - lower_energy])\n', (5491, 5539), True, 'import numpy as np\n'), ((5656, 5676), 'numpy.exp', 'np.exp', (['(-dt * this_A)'], {}), '(-dt * this_A)\n', (5662, 5676), True, 'import numpy as np\n'), ((6338, 6358), 'numpy.exp', 'np.exp', (['(-dt * this_A)'], {}), '(-dt * this_A)\n', (6344, 6358), True, 'import numpy as np\n'), ((6450, 6470), 'numpy.exp', 'np.exp', (['(-dt * this_A)'], {}), '(-dt * this_A)\n', (6456, 6470), True, 'import numpy as np\n'), ((5779, 5799), 'numpy.exp', 'np.exp', (['(-dt * this_A)'], {}), '(-dt * this_A)\n', (5785, 5799), True, 'import numpy as np\n')] |
'''
This code is an implementation of policy iteration in the Grid World.
'''
__author__ = '<NAME>'
__copyright__ = 'Copyright 2019, Simple Reinforcement Learning project'
__license__ = 'MIT'
__version__ = '1.0.0'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>, gmail.com}'
__status__ = 'Development'
import numpy as np
grid_width = 4
grid_height = 4
discount = 1.
reward = -1.
action = [[-1,0], [1,0], [0, -1], [0, 1]]# up, down, left, right
action_string = ['U', 'D', 'L', 'R']
policy = np.full((grid_height*grid_width, len(action)), [0.25, 0.25, 0.25, 0.25])# initial policy
TOTAL_ITERATION = 10
GRID_RENDER = True
class GridWorldMDP():
def __init__(self, grid_width, grid_height, immediate_reward, discount_factor):
self.states = np.zeros((grid_height, grid_width))
self.r = immediate_reward
self.dis_f = discount_factor
class PolicyIteration():
def __init__(self, MDP, action, init_policy):
self.action = action
self.policy = init_policy
self.MDP = MDP
self.policy_evaluation_iteration = 1000
def policy_evaluation(self):
for _ in range(self.policy_evaluation_iteration):
value = np.zeros((grid_height, grid_width))
count = 0
for i in range(grid_height):
for j in range(grid_width):
for idx, act in enumerate(action):
if (i == 0 and j == 0) or (i == grid_height-1 and j == grid_width-1):# Terminal State
value[i, j] = 0
continue
row = i + act[0] if (i + act[0] >= 0) and (i + act[0] < grid_height) else i
column = j + act[1] if (j + act[1] >= 0) and (j + act[1] < grid_width) else j
value[i, j] += round(self.policy[count, idx]*(reward + discount*self.MDP.states[row, column]), 3)
count += 1
self.MDP.states = value
return self.MDP
def polciy_improvement(self):
count = 0
for i in range(grid_height):
for j in range(grid_width):
values = []
for act in action:
row = i + act[0] if (i + act[0] >= 0) and (i + act[0] < grid_height) else i
column = j + act[1] if (j + act[1] >= 0) and (j + act[1] < grid_width) else j
values.append(self.MDP.states[row, column])
improved_policy = np.zeros(4)
values = np.asanyarray(values).round(2)
maximums = np.where(values == values.max())[0]
for idx in maximums:
improved_policy[idx] = 1/len(maximums)
self.policy[count] = improved_policy
count += 1
def show_policy(self):
current_s = np.chararray((4,4), unicode = True, itemsize = 4)
count = 0
for i in range(grid_height):
for j in range(grid_width):
if (i == 0 and j == 0) or (i == grid_height-1 and j == grid_width-1):# Terminal State
current_s[i, j] = 'T'
count += 1
continue
for idx, prob in enumerate(policy[count]):
current_s[i, j] += action_string[idx] if prob != 0. else ''
count += 1
print(current_s)
def policy_iteration(self, iteration):
for m in range(iteration):
print("@@@@@ {} Iteration ====================".format(m))
evaluated_GridWorld = self.policy_evaluation()
print(evaluated_GridWorld.states)
self.polciy_improvement()
def main():
print("==================== Policy Iteration ====================")
grid_world = GridWorldMDP(grid_width, grid_height, reward, discount)
agent = PolicyIteration(grid_world, action, policy)
agent.policy_iteration(TOTAL_ITERATION)
if __name__=="__main__":
main()
| [
"numpy.chararray",
"numpy.asanyarray",
"numpy.zeros"
] | [((776, 811), 'numpy.zeros', 'np.zeros', (['(grid_height, grid_width)'], {}), '((grid_height, grid_width))\n', (784, 811), True, 'import numpy as np\n'), ((2911, 2957), 'numpy.chararray', 'np.chararray', (['(4, 4)'], {'unicode': '(True)', 'itemsize': '(4)'}), '((4, 4), unicode=True, itemsize=4)\n', (2923, 2957), True, 'import numpy as np\n'), ((1226, 1261), 'numpy.zeros', 'np.zeros', (['(grid_height, grid_width)'], {}), '((grid_height, grid_width))\n', (1234, 1261), True, 'import numpy as np\n'), ((2543, 2554), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (2551, 2554), True, 'import numpy as np\n'), ((2581, 2602), 'numpy.asanyarray', 'np.asanyarray', (['values'], {}), '(values)\n', (2594, 2602), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Reads tidefac output and adapt given bctides file as required by the tidefac
outputs.
This script uses a dictionary to store the constituent values. The problem with
dictionary is it does not maintain the order of the dictionary key as they are
added. So consequently when we used the distionary structure on tidal constituent
information and not on the boundary information, it produced a tidefac file which
is not consistent - the listing of constituent in the tidal potential portion
is not the same as the listing of the tidal amplitude and phases at the boundary
nodes. The soltuion from this is to read also the boundaries while reading the
initial constituent and write them as a whole data structure or keep track of the
constituents as they are read from the file. Currently none is implemented and
it is better not to use this version until this problem is fixed.
It is to be noted that, the reason behing changing the data structure is to make
use of more pythonic way writing codes using dictionaries, instead of numpy array.
TODO: Read boundary information
@author: khan
@email: <EMAIL>
"""
from __future__ import print_function
import numpy as np
from datetime import datetime, timedelta
import os
from collections import OrderedDict
class Bctides(object):
def __init__(
self,
info='',
ntip=0,
tip_dp=0,
tip=OrderedDict(),
nbfr=0,
bfr=OrderedDict(),
nope=0,
boundaries=[]
):
self.info = info
self.nitp = ntip
self.tip_dp = tip_dp
self.tip = tip
self.nbfr = nbfr
self.bfr = bfr
self.nope = nope
self.boundaries = boundaries
def read(self, filepath):
with open(filepath) as f:
ds = f.readlines()
# First the dates
self.info = ds[0].split('\n')[0]
__lnproc = 0
# Then the tidal potential information
self.ntip, self.tip_dp = np.fromstring(ds[1].split('!')[0], count=2, sep=' ')
self.ntip = int(self.ntip)
__lnproc = 1
for i in np.arange(self.ntip):
__talpha = ds[__lnproc+1].split('\n')[0]
print('Reading Wave {:d} - {:s}'.format(i, __talpha))
__jspc, __tamp, __tfreq, __tnf, __tear = np.fromstring(ds[__lnproc+2].split('\n')[0], count=5, sep=' ')
self.tip[__talpha.strip().upper()] = OrderedDict(jspc=int(__jspc), tamp=__tamp, tfreq=__tfreq, tnf=__tnf, tear=__tear)
__lnproc = __lnproc + 2
# Reading the boundary frequencies
self.nbfr = np.fromstring(ds[__lnproc+1], count=1, sep=' ')
self.nbfr = int(self.nbfr)
__lnproc = __lnproc + 1
for i in np.arange(self.nbfr):
__alpha = ds[__lnproc+1].split('\n')[0]
__amig, __ff, __face = np.fromstring(ds[__lnproc+2].split('\n')[0], count=3, sep=' ')
self.bfr[__alpha.strip().upper()] = OrderedDict(amig=__amig, ff=__ff, face=__face)
__lnproc = __lnproc + 2
# Open boundary sagments
self.nope = ds[__lnproc+1].split(' ')[0]
self.nope = int(self.nope)
__lnproc = __lnproc + 1
# For each open boundary sagment
self.boundaries = ds[__lnproc+1:len(ds)]
def update(self, tidefac):
# Update time
self.info = tidefac.info
# Updating the tidal potential nodal factor and equilibrium argument
for talpha in self.tip.keys():
if talpha in tidefac.const.keys():
self.tip[talpha]['tnf'] = tidefac.const[talpha][0]
self.tip[talpha]['tear'] = tidefac.const[talpha][1]
# Updating the Boundary frequency nodal factors and equilibrium argument
for alpha in self.bfr.keys():
if alpha in tidefac.const.keys():
self.bfr[alpha]['ff'] = tidefac.const[alpha][0]
self.bfr[alpha]['face'] = tidefac.const[alpha][1]
def write(self, filepath):
with open(filepath, 'w') as f:
# Header information
f.write('{:s}\n'.format(self.info))
# Tidal potential
f.write('{:d} {:3.2f} !ntip, tip_dp\n'.format(int(self.ntip), float(self.tip_dp)))
for alpha in self.tip.keys():
f.write('{:s}\n{:d}\t{:.6f}\t{:.16f}\t{:.5f}\t{:.2f}\n'\
.format(alpha,\
int(self.tip[alpha]['jspc']),\
self.tip[alpha]['tamp'],\
self.tip[alpha]['tfreq'],\
self.tip[alpha]['tnf'],\
self.tip[alpha]['tear']))
# Boundary frequencies
f.write('{:d} !nbfr\n'.format(int(self.nbfr)))
for alpha in self.bfr.keys():
f.write('{:s}\n{:.16E}\t{:.6f}\t{:.2f}\n'\
.format(alpha,\
self.bfr[alpha]['amig'],\
self.bfr[alpha]['ff'],\
self.bfr[alpha]['face']))
# Open boundaries
f.write('{:d} !Number of Open Boundaries\n'.format(self.nope))
for __line in self.boundaries:
f.write(__line)
class Tidefacout(object):
def __init__(self, year=0, month=0, day=0, hour=0, rnday=0, const=OrderedDict()):
self.year = year
self.month = month
self.day = day
self.hour = hour
self.rnday = rnday
self.const = const
def read(self, filepath):
# Reading date information
with open(filepath, 'r') as f:
# Reading the date section
__ds = f.readline()
__date = np.fromstring(__ds, dtype=float, count=4, sep=',')
self.year = __date[0]
self.month = int(__date[1])
self.day = int(__date[2])
self.hour = int(__date[3])
# Reading the run length section
__ds = f.readline()
__rnday = np.fromstring(__ds, dtype=float, count=1, sep=',')
self.rnday = __rnday[0]
# Reading the constants, node factor and eq. argument ref. to GM in deg.
__const = np.genfromtxt(fname=filepath, dtype=None, skip_header=6, \
delimiter=None, autostrip=True)
__const = np.array([[i for i in j] for j in __const])
__const = OrderedDict({i[0].upper():[float(j) for j in i[1:3]] for i in __const})
self.const = __const
# Tidefac header information
self.info = f'{self.rnday:.2f} days - {self.year:4.0f}/{self.month:02.0f}/{self.day:02.0f} {self.hour:02.2f} UTC'
def __str__(self):
return(self.info)
if __name__=='__main__':
path = '/run/media/khan/Workbench/Projects/Surge Model/Bctides'
bctide_source = os.path.join(path, 'bctides.ini')
bctide_update = os.path.join(path, 'bctides.in')
tfacfile = os.path.join(path, 'tide_fac.out')
bctides = Bctides()
bctides.read(filepath=bctide_source)
tfac = Tidefacout()
tfac.read(filepath=tfacfile)
bctides.update(tfac)
bctides.write(filepath=bctide_update)
| [
"numpy.genfromtxt",
"numpy.arange",
"numpy.array",
"collections.OrderedDict",
"os.path.join",
"numpy.fromstring"
] | [((7074, 7107), 'os.path.join', 'os.path.join', (['path', '"""bctides.ini"""'], {}), "(path, 'bctides.ini')\n", (7086, 7107), False, 'import os\n'), ((7128, 7160), 'os.path.join', 'os.path.join', (['path', '"""bctides.in"""'], {}), "(path, 'bctides.in')\n", (7140, 7160), False, 'import os\n'), ((7176, 7210), 'os.path.join', 'os.path.join', (['path', '"""tide_fac.out"""'], {}), "(path, 'tide_fac.out')\n", (7188, 7210), False, 'import os\n'), ((1428, 1441), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1439, 1441), False, 'from collections import OrderedDict\n'), ((1473, 1486), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1484, 1486), False, 'from collections import OrderedDict\n'), ((5574, 5587), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5585, 5587), False, 'from collections import OrderedDict\n'), ((6447, 6539), 'numpy.genfromtxt', 'np.genfromtxt', ([], {'fname': 'filepath', 'dtype': 'None', 'skip_header': '(6)', 'delimiter': 'None', 'autostrip': '(True)'}), '(fname=filepath, dtype=None, skip_header=6, delimiter=None,\n autostrip=True)\n', (6460, 6539), True, 'import numpy as np\n'), ((6588, 6631), 'numpy.array', 'np.array', (['[[i for i in j] for j in __const]'], {}), '([[i for i in j] for j in __const])\n', (6596, 6631), True, 'import numpy as np\n'), ((2183, 2203), 'numpy.arange', 'np.arange', (['self.ntip'], {}), '(self.ntip)\n', (2192, 2203), True, 'import numpy as np\n'), ((2711, 2760), 'numpy.fromstring', 'np.fromstring', (['ds[__lnproc + 1]'], {'count': '(1)', 'sep': '""" """'}), "(ds[__lnproc + 1], count=1, sep=' ')\n", (2724, 2760), True, 'import numpy as np\n'), ((2868, 2888), 'numpy.arange', 'np.arange', (['self.nbfr'], {}), '(self.nbfr)\n', (2877, 2888), True, 'import numpy as np\n'), ((5945, 5995), 'numpy.fromstring', 'np.fromstring', (['__ds'], {'dtype': 'float', 'count': '(4)', 'sep': '""","""'}), "(__ds, dtype=float, count=4, sep=',')\n", (5958, 5995), True, 'import numpy as np\n'), ((6260, 6310), 'numpy.fromstring', 'np.fromstring', (['__ds'], {'dtype': 'float', 'count': '(1)', 'sep': '""","""'}), "(__ds, dtype=float, count=1, sep=',')\n", (6273, 6310), True, 'import numpy as np\n'), ((3100, 3146), 'collections.OrderedDict', 'OrderedDict', ([], {'amig': '__amig', 'ff': '__ff', 'face': '__face'}), '(amig=__amig, ff=__ff, face=__face)\n', (3111, 3146), False, 'from collections import OrderedDict\n')] |
import numpy as np
from scipy import signal
from .utils.jade import jadeR
from .base import VHRMethod
class ICA(VHRMethod):
methodName = 'ICA'
def __init__(self, **kwargs):
self.tech = kwargs['ICAmethod']
super(ICA, self).__init__(**kwargs)
def apply(self, X):
""" ICA method """
# -- JADE (ICA)
if self.tech == 'jade':
W = self.__jade(X)
elif 'fastICA':
W = self.__fastICA(X)
bvp = np.dot(W,X) # 3-dim signal!!
return bvp
def __jade(self, X):
W = np.asarray(jadeR(X, 3, False))
return W
def __fastICA(self, X):
from sklearn.decomposition import FastICA, PCA
from numpy.linalg import inv, eig
# -- PCA
pca = PCA(n_components=3)
Y = pca.fit_transform(X)
# -- ICA
ica = FastICA(n_components=3, max_iter=2000)
S = ica.fit_transform(Y)
return S.T | [
"numpy.dot",
"sklearn.decomposition.FastICA",
"sklearn.decomposition.PCA"
] | [((506, 518), 'numpy.dot', 'np.dot', (['W', 'X'], {}), '(W, X)\n', (512, 518), True, 'import numpy as np\n'), ((808, 827), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(3)'}), '(n_components=3)\n', (811, 827), False, 'from sklearn.decomposition import FastICA, PCA\n'), ((893, 931), 'sklearn.decomposition.FastICA', 'FastICA', ([], {'n_components': '(3)', 'max_iter': '(2000)'}), '(n_components=3, max_iter=2000)\n', (900, 931), False, 'from sklearn.decomposition import FastICA, PCA\n')] |
from __future__ import division, print_function
import math
import numpy as np
import numpy.random
import scipy as sp
import scipy.stats
def ballot_comparison_pvalue(n, gamma, o1, u1, o2, u2, reported_margin, N, null_lambda=1):
"""
Compute the p-value for a ballot comparison audit using Kaplan-Markov
Parameters
----------
n : int
sample size
gamma : float
value > 1 to inflate the error bound, to avoid requiring full hand count for a single 2-vote overstatement
o1 : int
number of ballots that overstate any
margin by one vote but no margin by two votes
u1 : int
number of ballots that understate any margin by
exactly one vote, and every margin by at least one vote
o2 : int
number of ballots that overstate any margin by two votes
u2 : int
number of ballots that understate every margin by two votes
reported_margin : float
the smallest reported margin *in votes* between a winning
and losing candidate for the contest as a whole, including any other strata
N : int
number of votes cast in the stratum
null_lambda : float
fraction of the overall margin (in votes) to test for in the stratum. If the overall margin is reported_margin,
test that the overstatement in this stratum does not exceed null_lambda*reported_margin
Returns
-------
pvalue
"""
U_s = 2*N/reported_margin
log_pvalue = n*np.log(1 - null_lambda/(gamma*U_s)) - \
o1*np.log(1 - 1/(2*gamma)) - \
o2*np.log(1 - 1/gamma) - \
u1*np.log(1 + 1/(2*gamma)) - \
u2*np.log(1 + 1/gamma)
pvalue = np.exp(log_pvalue)
return np.min([pvalue, 1])
def findNmin_ballot_comparison(alpha, gamma, o1, u1, o2, u2,
reported_margin, N, null_lambda=1):
"""
Compute the smallest sample size for which a ballot comparison
audit, using Kaplan-Markov, with the given statistics could stop
Parameters
----------
alpha : float
risk limit
gamma : float
value > 1 to inflate the error bound, to avoid requiring full hand count for a single 2-vote overstatement
o1 : int
number of ballots that overstate any
margin by one vote but no margin by two votes
u1 : int
number of ballots that understate any margin by
exactly one vote, and every margin by at least one vote
o2 : int
number of ballots that overstate any margin by two votes
u2 : int
number of ballots that understate every margin by two votes
reported_margin : float
the smallest reported margin *in votes* between a winning
and losing candidate in the contest as a whole, including any other strata
N : int
number of votes cast in the stratum
null_lambda : float
fraction of the overall margin (in votes) to test for in the stratum. If the overall margin is reported_margin,
test that the overstatement in this stratum does not exceed null_lambda*reported_margin
Returns
-------
n
"""
U_s = 2*N/reported_margin
val = -gamma*U_s/null_lambda * (np.log(alpha) +
o1*np.log(1 - 1/(2*gamma)) + \
o2*np.log(1 - 1/gamma) + \
u1*np.log(1 + 1/(2*gamma)) + \
u2*np.log(1 + 1/gamma) )
val2 = o1+o2+u1+u2
return np.max([int(val)+1, val2])
def findNmin_ballot_comparison_rates(alpha, gamma, r1, s1, r2, s2,
reported_margin, N, null_lambda=1):
"""
Compute the smallest sample size for which a ballot comparison
audit, using Kaplan-Markov, with the given statistics could stop
Parameters
----------
alpha : float
risk limit
gamma : float
value > 1 to inflate the error bound, to avoid requiring full hand count for a single 2-vote overstatement
r1 : int
hypothesized rate of ballots that overstate any
margin by one vote but no margin by two votes
s1 : int
hypothesizedrate of ballots that understate any margin by
exactly one vote, and every margin by at least one vote
r2 : int
hypothesizedrate of ballots that overstate any margin by two votes
s2 : int
hypothesizedrate of ballots that understate every margin by two votes
reported_margin : float
the smallest reported margin *in votes* between a winning
and losing candidate in the contest as a whole, including any other strata
N : int
number of votes cast in the stratum
null_lambda : float
fraction of the overall margin (in votes) to test for in the stratum. If the overall margin is reported_margin,
test that the overstatement in this stratum does not exceed null_lambda*reported_margin
Returns
-------
n
"""
U_s = 2*N/reported_margin
denom = (np.log(1 - null_lambda/(U_s*gamma)) -
r1*np.log(1 - 1/(2*gamma))- \
r2*np.log(1 - 1/gamma) - \
s1*np.log(1 + 1/(2*gamma)) - \
s2*np.log(1 + 1/gamma) )
return np.ceil(np.log(alpha)/denom) if denom < 0 else np.nan
# unit tests from "A Gentle Introduction..."
def gentle_intro_tests():
np.testing.assert_array_less(ballot_comparison_pvalue(80, 1.03905, 0,1,0,0,5,100), 0.1)
np.testing.assert_array_less(ballot_comparison_pvalue(96, 1.03905, 0,0,0,0,5,100), 0.1)
np.testing.assert_equal(findNmin_ballot_comparison(0.1, 1.03905, 0,1,0,0,5,100), 80)
np.testing.assert_equal(findNmin_ballot_comparison(0.1, 1.03905, 0,0,0,0,5,100), 96)
# unit tests from pbstark/S157F17/audit.ipynb
def stat157_tests():
np.testing.assert_equal(ballot_comparison_pvalue(n=200, gamma=1.03905, o1=1, u1=0, o2=0, u2=0,
reported_margin=(354040 - 337589), N=354040+337589+33234),
0.21438135077031845)
np.testing.assert_equal(findNmin_ballot_comparison_rates(alpha=0.05, gamma=1.03905,
r1=.001, r2=0, s1=.001, s2=0,
reported_margin=5, N=100),
125)
assert math.isnan(findNmin_ballot_comparison_rates(alpha=0.05, gamma=1.03905,
r1=.05, r2=0, s1=0, s2=0,
reported_margin=5, N=100))
if __name__ == "__main__":
gentle_intro_tests()
stat157_tests() | [
"numpy.min",
"numpy.log",
"numpy.exp"
] | [((1730, 1748), 'numpy.exp', 'np.exp', (['log_pvalue'], {}), '(log_pvalue)\n', (1736, 1748), True, 'import numpy as np\n'), ((1760, 1779), 'numpy.min', 'np.min', (['[pvalue, 1]'], {}), '([pvalue, 1])\n', (1766, 1779), True, 'import numpy as np\n'), ((1697, 1718), 'numpy.log', 'np.log', (['(1 + 1 / gamma)'], {}), '(1 + 1 / gamma)\n', (1703, 1718), True, 'import numpy as np\n'), ((5201, 5222), 'numpy.log', 'np.log', (['(1 + 1 / gamma)'], {}), '(1 + 1 / gamma)\n', (5207, 5222), True, 'import numpy as np\n'), ((1646, 1673), 'numpy.log', 'np.log', (['(1 + 1 / (2 * gamma))'], {}), '(1 + 1 / (2 * gamma))\n', (1652, 1673), True, 'import numpy as np\n'), ((3427, 3448), 'numpy.log', 'np.log', (['(1 + 1 / gamma)'], {}), '(1 + 1 / gamma)\n', (3433, 3448), True, 'import numpy as np\n'), ((5154, 5181), 'numpy.log', 'np.log', (['(1 + 1 / (2 * gamma))'], {}), '(1 + 1 / (2 * gamma))\n', (5160, 5181), True, 'import numpy as np\n'), ((5242, 5255), 'numpy.log', 'np.log', (['alpha'], {}), '(alpha)\n', (5248, 5255), True, 'import numpy as np\n'), ((1599, 1620), 'numpy.log', 'np.log', (['(1 - 1 / gamma)'], {}), '(1 - 1 / gamma)\n', (1605, 1620), True, 'import numpy as np\n'), ((3380, 3407), 'numpy.log', 'np.log', (['(1 + 1 / (2 * gamma))'], {}), '(1 + 1 / (2 * gamma))\n', (3386, 3407), True, 'import numpy as np\n'), ((5008, 5047), 'numpy.log', 'np.log', (['(1 - null_lambda / (U_s * gamma))'], {}), '(1 - null_lambda / (U_s * gamma))\n', (5014, 5047), True, 'import numpy as np\n'), ((5111, 5132), 'numpy.log', 'np.log', (['(1 - 1 / gamma)'], {}), '(1 - 1 / gamma)\n', (5117, 5132), True, 'import numpy as np\n'), ((1485, 1524), 'numpy.log', 'np.log', (['(1 - null_lambda / (gamma * U_s))'], {}), '(1 - null_lambda / (gamma * U_s))\n', (1491, 1524), True, 'import numpy as np\n'), ((1548, 1575), 'numpy.log', 'np.log', (['(1 - 1 / (2 * gamma))'], {}), '(1 - 1 / (2 * gamma))\n', (1554, 1575), True, 'import numpy as np\n'), ((3255, 3268), 'numpy.log', 'np.log', (['alpha'], {}), '(alpha)\n', (3261, 3268), True, 'import numpy as np\n'), ((3337, 3358), 'numpy.log', 'np.log', (['(1 - 1 / gamma)'], {}), '(1 - 1 / gamma)\n', (3343, 3358), True, 'import numpy as np\n'), ((5065, 5092), 'numpy.log', 'np.log', (['(1 - 1 / (2 * gamma))'], {}), '(1 - 1 / (2 * gamma))\n', (5071, 5092), True, 'import numpy as np\n'), ((3290, 3317), 'numpy.log', 'np.log', (['(1 - 1 / (2 * gamma))'], {}), '(1 - 1 / (2 * gamma))\n', (3296, 3317), True, 'import numpy as np\n')] |
# 两层全链接网络
# pytorch 官方示例
import numpy as np
# N为样本大小; D_in为样本维度
# H为隐藏层维度; D_out 为输出维度(分类数)
N,D_in, H, D_out = 64,1000,100,10
#生成随机样本
x = np.random.randn(N,D_in)
y = np.random.randn(N,D_out)
#生成随机权重
w1 = np.random.randn(D_in, H)
w2 = np.random.randn(H, D_out)
learning_rate = 1e-6
for t in range(500):
#前向传播:计算Y的预测值
h = x.dot(w1)
h_relu = np.maximum(h,0) #ReLU 激活函数
y_pred = h_relu.dot(w2)
#计算误差并输出
loss = np.square(y_pred - y).sum()
print(t,loss)
#更新权重;
grad_y_pred = 2.0 * (y_pred - y)
grad_w2 = h_relu.T.dot(grad_y_pred)
grad_h_relu = grad_y_pred.dot(w2.T)
grad_h = grad_h_relu.copy()
grad_h[h < 0] = 0
grad_w1 = x.T.dot(grad_h)
w1 -= learning_rate * grad_w1
w2 -= learning_rate * grad_w2 | [
"numpy.square",
"numpy.maximum",
"numpy.random.randn"
] | [((140, 164), 'numpy.random.randn', 'np.random.randn', (['N', 'D_in'], {}), '(N, D_in)\n', (155, 164), True, 'import numpy as np\n'), ((168, 193), 'numpy.random.randn', 'np.random.randn', (['N', 'D_out'], {}), '(N, D_out)\n', (183, 193), True, 'import numpy as np\n'), ((207, 231), 'numpy.random.randn', 'np.random.randn', (['D_in', 'H'], {}), '(D_in, H)\n', (222, 231), True, 'import numpy as np\n'), ((237, 262), 'numpy.random.randn', 'np.random.randn', (['H', 'D_out'], {}), '(H, D_out)\n', (252, 262), True, 'import numpy as np\n'), ((355, 371), 'numpy.maximum', 'np.maximum', (['h', '(0)'], {}), '(h, 0)\n', (365, 371), True, 'import numpy as np\n'), ((435, 456), 'numpy.square', 'np.square', (['(y_pred - y)'], {}), '(y_pred - y)\n', (444, 456), True, 'import numpy as np\n')] |
"""
Neighborhood Components Analysis (NCA)
"""
from __future__ import absolute_import
import warnings
import time
import sys
import numpy as np
from scipy.optimize import minimize
from sklearn.metrics import pairwise_distances
from sklearn.exceptions import ConvergenceWarning, ChangedBehaviorWarning
from sklearn.utils.fixes import logsumexp
from sklearn.base import TransformerMixin
from ._util import _initialize_components, _check_n_components
from .base_metric import MahalanobisMixin
EPS = np.finfo(float).eps
class NCA(MahalanobisMixin, TransformerMixin):
"""Neighborhood Components Analysis (NCA)
NCA is a distance metric learning algorithm which aims to improve the
accuracy of nearest neighbors classification compared to the standard
Euclidean distance. The algorithm directly maximizes a stochastic variant
of the leave-one-out k-nearest neighbors(KNN) score on the training set.
It can also learn a low-dimensional linear transformation of data that can
be used for data visualization and fast classification.
Read more in the :ref:`User Guide <nca>`.
Parameters
----------
init : None, string or numpy array, optional (default=None)
Initialization of the linear transformation. Possible options are
'auto', 'pca', 'identity', 'random', and a numpy array of shape
(n_features_a, n_features_b). If None, will be set automatically to
'auto' (this option is to raise a warning if 'init' is not set,
and stays to its default value None, in v0.5.0).
'auto'
Depending on ``n_components``, the most reasonable initialization
will be chosen. If ``n_components <= n_classes`` we use 'lda', as
it uses labels information. If not, but
``n_components < min(n_features, n_samples)``, we use 'pca', as
it projects data in meaningful directions (those of higher
variance). Otherwise, we just use 'identity'.
'pca'
``n_components`` principal components of the inputs passed
to :meth:`fit` will be used to initialize the transformation.
(See `sklearn.decomposition.PCA`)
'lda'
``min(n_components, n_classes)`` most discriminative
components of the inputs passed to :meth:`fit` will be used to
initialize the transformation. (If ``n_components > n_classes``,
the rest of the components will be zero.) (See
`sklearn.discriminant_analysis.LinearDiscriminantAnalysis`)
'identity'
If ``n_components`` is strictly smaller than the
dimensionality of the inputs passed to :meth:`fit`, the identity
matrix will be truncated to the first ``n_components`` rows.
'random'
The initial transformation will be a random array of shape
`(n_components, n_features)`. Each value is sampled from the
standard normal distribution.
numpy array
n_features_b must match the dimensionality of the inputs passed to
:meth:`fit` and n_features_a must be less than or equal to that.
If ``n_components`` is not None, n_features_a must match it.
n_components : int or None, optional (default=None)
Dimensionality of reduced space (if None, defaults to dimension of X).
num_dims : Not used
.. deprecated:: 0.5.0
`num_dims` was deprecated in version 0.5.0 and will
be removed in 0.6.0. Use `n_components` instead.
max_iter : int, optional (default=100)
Maximum number of iterations done by the optimization algorithm.
tol : float, optional (default=None)
Convergence tolerance for the optimization.
verbose : bool, optional (default=False)
Whether to print progress messages or not.
random_state : int or numpy.RandomState or None, optional (default=None)
A pseudo random number generator object or a seed for it if int. If
``init='random'``, ``random_state`` is used to initialize the random
transformation. If ``init='pca'``, ``random_state`` is passed as an
argument to PCA when initializing the transformation.
Examples
--------
>>> import numpy as np
>>> from metric_learn import NCA
>>> from sklearn.datasets import load_iris
>>> iris_data = load_iris()
>>> X = iris_data['data']
>>> Y = iris_data['target']
>>> nca = NCA(max_iter=1000)
>>> nca.fit(X, Y)
Attributes
----------
n_iter_ : `int`
The number of iterations the solver has run.
components_ : `numpy.ndarray`, shape=(n_components, n_features)
The learned linear transformation ``L``.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>. `Neighbourhood
Components Analysis
<http://www.cs.nyu.edu/~roweis/papers/ncanips.pdf>`_.
Advances in Neural Information Processing Systems. 17, 513-520, 2005.
.. [2] Wikipedia entry on `Neighborhood Components Analysis
<https://en.wikipedia.org/wiki/Neighbourhood_components_analysis>`_
"""
def __init__(self, init=None, n_components=None, num_dims='deprecated',
max_iter=100, tol=None, verbose=False, preprocessor=None,
random_state=None):
self.n_components = n_components
self.init = init
self.num_dims = num_dims
self.max_iter = max_iter
self.tol = tol
self.verbose = verbose
self.random_state = random_state
super(NCA, self).__init__(preprocessor)
def fit(self, X, y):
"""
X: data matrix, (n x d)
y: scalar labels, (n)
"""
if self.num_dims != 'deprecated':
warnings.warn('"num_dims" parameter is not used.'
' It has been deprecated in version 0.5.0 and will be'
' removed in 0.6.0. Use "n_components" instead',
DeprecationWarning)
X, labels = self._prepare_inputs(X, y, ensure_min_samples=2)
n, d = X.shape
n_components = _check_n_components(d, self.n_components)
# Measure the total training time
train_time = time.time()
# Initialize A
# if the init is the default (None), we raise a warning
if self.init is None:
# TODO: replace init=None by init='auto' in v0.6.0 and remove the warning
msg = ("Warning, no init was set (`init=None`). As of version 0.5.0, "
"the default init will now be set to 'auto', instead of the "
"previous scaling matrix. If you still want to use the same "
"scaling matrix as before, set "
"init=np.eye(X.shape[1])/(np.maximum(X.max(axis=0)-X.min(axis=0)"
", EPS))). This warning will disappear in v0.6.0, and `init` "
"parameter's default value will be set to 'auto'.")
warnings.warn(msg, ChangedBehaviorWarning)
init = 'auto'
else:
init = self.init
A = _initialize_components(n_components, X, labels, init, self.verbose,
self.random_state)
# Run NCA
mask = labels[:, np.newaxis] == labels[np.newaxis, :]
optimizer_params = {'method': 'L-BFGS-B',
'fun': self._loss_grad_lbfgs,
'args': (X, mask, -1.0),
'jac': True,
'x0': A.ravel(),
'options': dict(maxiter=self.max_iter),
'tol': self.tol
}
# Call the optimizer
self.n_iter_ = 0
opt_result = minimize(**optimizer_params)
self.components_ = opt_result.x.reshape(-1, X.shape[1])
self.n_iter_ = opt_result.nit
# Stop timer
train_time = time.time() - train_time
if self.verbose:
cls_name = self.__class__.__name__
# Warn the user if the algorithm did not converge
if not opt_result.success:
warnings.warn('[{}] NCA did not converge: {}'.format(
cls_name, opt_result.message), ConvergenceWarning)
print('[{}] Training took {:8.2f}s.'.format(cls_name, train_time))
return self
def _loss_grad_lbfgs(self, A, X, mask, sign=1.0):
if self.n_iter_ == 0 and self.verbose:
header_fields = ['Iteration', 'Objective Value', 'Time(s)']
header_fmt = '{:>10} {:>20} {:>10}'
header = header_fmt.format(*header_fields)
cls_name = self.__class__.__name__
print('[{cls}]'.format(cls=cls_name))
print('[{cls}] {header}\n[{cls}] {sep}'.format(cls=cls_name,
header=header,
sep='-' * len(header)))
start_time = time.time()
A = A.reshape(-1, X.shape[1])
X_embedded = np.dot(X, A.T) # (n_samples, n_components)
# Compute softmax distances
p_ij = pairwise_distances(X_embedded, squared=True)
np.fill_diagonal(p_ij, np.inf)
p_ij = np.exp(-p_ij - logsumexp(-p_ij, axis=1)[:, np.newaxis])
# (n_samples, n_samples)
# Compute loss
masked_p_ij = p_ij * mask
p = masked_p_ij.sum(axis=1, keepdims=True) # (n_samples, 1)
loss = p.sum()
# Compute gradient of loss w.r.t. `transform`
weighted_p_ij = masked_p_ij - p_ij * p
weighted_p_ij_sym = weighted_p_ij + weighted_p_ij.T
np.fill_diagonal(weighted_p_ij_sym, - weighted_p_ij.sum(axis=0))
gradient = 2 * (X_embedded.T.dot(weighted_p_ij_sym)).dot(X)
if self.verbose:
start_time = time.time() - start_time
values_fmt = '[{cls}] {n_iter:>10} {loss:>20.6e} {start_time:>10.2f}'
print(values_fmt.format(cls=self.__class__.__name__,
n_iter=self.n_iter_, loss=loss,
start_time=start_time))
sys.stdout.flush()
self.n_iter_ += 1
return sign * loss, sign * gradient.ravel()
| [
"numpy.fill_diagonal",
"scipy.optimize.minimize",
"sklearn.utils.fixes.logsumexp",
"sklearn.metrics.pairwise_distances",
"time.time",
"numpy.finfo",
"sys.stdout.flush",
"numpy.dot",
"warnings.warn"
] | [((499, 514), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (507, 514), True, 'import numpy as np\n'), ((5972, 5983), 'time.time', 'time.time', ([], {}), '()\n', (5981, 5983), False, 'import time\n'), ((7385, 7413), 'scipy.optimize.minimize', 'minimize', ([], {}), '(**optimizer_params)\n', (7393, 7413), False, 'from scipy.optimize import minimize\n'), ((8506, 8517), 'time.time', 'time.time', ([], {}), '()\n', (8515, 8517), False, 'import time\n'), ((8570, 8584), 'numpy.dot', 'np.dot', (['X', 'A.T'], {}), '(X, A.T)\n', (8576, 8584), True, 'import numpy as np\n'), ((8657, 8701), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['X_embedded'], {'squared': '(True)'}), '(X_embedded, squared=True)\n', (8675, 8701), False, 'from sklearn.metrics import pairwise_distances\n'), ((8706, 8736), 'numpy.fill_diagonal', 'np.fill_diagonal', (['p_ij', 'np.inf'], {}), '(p_ij, np.inf)\n', (8722, 8736), True, 'import numpy as np\n'), ((5537, 5714), 'warnings.warn', 'warnings.warn', (['""""num_dims" parameter is not used. It has been deprecated in version 0.5.0 and will be removed in 0.6.0. Use "n_components" instead"""', 'DeprecationWarning'], {}), '(\n \'"num_dims" parameter is not used. It has been deprecated in version 0.5.0 and will be removed in 0.6.0. Use "n_components" instead\'\n , DeprecationWarning)\n', (5550, 5714), False, 'import warnings\n'), ((6669, 6711), 'warnings.warn', 'warnings.warn', (['msg', 'ChangedBehaviorWarning'], {}), '(msg, ChangedBehaviorWarning)\n', (6682, 6711), False, 'import warnings\n'), ((7544, 7555), 'time.time', 'time.time', ([], {}), '()\n', (7553, 7555), False, 'import time\n'), ((9585, 9603), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9601, 9603), False, 'import sys\n'), ((9293, 9304), 'time.time', 'time.time', ([], {}), '()\n', (9302, 9304), False, 'import time\n'), ((8763, 8787), 'sklearn.utils.fixes.logsumexp', 'logsumexp', (['(-p_ij)'], {'axis': '(1)'}), '(-p_ij, axis=1)\n', (8772, 8787), False, 'from sklearn.utils.fixes import logsumexp\n')] |
import os
import numpy as np
from rpeakdetection.rpeak_detector import RPeakDetector
rpd = RPeakDetector()
evaluation_width = 36
ecg_folder = "../data/ecg/mitdb/"
peaks_folder = "../data/peaks/pan_tompkins/"
precisions = list()
recalls = list()
for name in os.listdir(peaks_folder):
peaks = list()
file = open(peaks_folder + name, "r")
name = name.replace(".tsv", "")
for line in file:
peak = line.replace("\n", "")
peaks.append(int(peak))
precision, recall = rpd.evaluate(peaks, ecg_folder + name, evaluation_width )
precisions.append(precision)
recalls.append(recall)
print("av prec")
print(np.mean(precisions))
print("av recall")
print(np.mean(recalls)) | [
"numpy.mean",
"rpeakdetection.rpeak_detector.RPeakDetector",
"os.listdir"
] | [((91, 106), 'rpeakdetection.rpeak_detector.RPeakDetector', 'RPeakDetector', ([], {}), '()\n', (104, 106), False, 'from rpeakdetection.rpeak_detector import RPeakDetector\n'), ((257, 281), 'os.listdir', 'os.listdir', (['peaks_folder'], {}), '(peaks_folder)\n', (267, 281), False, 'import os\n'), ((637, 656), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (644, 656), True, 'import numpy as np\n'), ((683, 699), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (690, 699), True, 'import numpy as np\n')] |
import numpy as np
import torch.utils.data
class MultiEpochSampler(torch.utils.data.Sampler):
r"""Samples elements randomly over multiple epochs
Arguments:
data_source (Dataset): dataset to sample from
num_iter (int) : Number of times to loop over the dataset
start_itr (int) : which iteration to begin from
"""
def __init__(self, data_source, num_iter, start_itr=0, batch_size=128):
super().__init__(data_source)
self.data_source = data_source
self.dataset_size = len(self.data_source)
self.num_iter = num_iter
self.start_itr = start_itr
self.batch_size = batch_size
self.num_epochs = int(np.ceil((self.num_iter * self.batch_size) / float(self.dataset_size)))
if not isinstance(self.dataset_size, int) or self.dataset_size <= 0:
raise ValueError("dataset size should be a positive integeral "
"value, but got dataset_size={}".format(self.dataset_size))
def __iter__(self):
n = self.dataset_size
# Determine number of epochs
num_epochs = int(np.ceil(((self.num_iter - self.start_itr) * self.batch_size) / float(n)))
out = np.concatenate([np.random.permutation(n) for epoch in range(self.num_epochs)])[-num_epochs * n: self.num_iter * self.batch_size]
out = out[(self.start_itr * self.batch_size % n):]
return iter(out)
def __len__(self):
return (self.num_iter - self.start_itr) * self.batch_size
| [
"numpy.random.permutation"
] | [((1225, 1249), 'numpy.random.permutation', 'np.random.permutation', (['n'], {}), '(n)\n', (1246, 1249), True, 'import numpy as np\n')] |
"""Ray-distributed environment stepper."""
import typing
import numpy as np
import ray
from alpacka.batch_steppers import core
from alpacka.batch_steppers import worker_utils
class RayObject(typing.NamedTuple):
"""Keeps value and id of an object in the Ray Object Store."""
id: typing.Any
value: typing.Any
@classmethod
def from_value(cls, value, weakref=False):
return cls(ray.put(value, weakref=weakref), value)
class RayBatchStepper(core.BatchStepper):
"""Batch stepper running remotely using Ray.
Runs predictions and steps environments for all Agents separately in their
own workers.
It's highly recommended to pass params to run_episode_batch as a numpy array
or a collection of numpy arrays. Then each worker can retrieve params with
zero-copy operation on each node.
"""
def __init__(
self,
env_class,
agent_class,
network_fn,
n_envs,
output_dir,
compress_episodes=True,
):
super().__init__(env_class, agent_class, network_fn, n_envs, output_dir)
config = worker_utils.get_config(env_class, agent_class, network_fn)
ray_worker_cls = ray.remote(worker_utils.Worker)
if not ray.is_initialized():
kwargs = {
# Size of the Plasma object store, hardcoded to 1GB for now.
# TODO(xxx): Gin-configure if we ever need to change it.
'object_store_memory': int(1e9),
}
ray.init(**kwargs)
self.workers = [
ray_worker_cls.remote( # pylint: disable=no-member
env_class, agent_class, network_fn, config,
worker_utils.init_hooks, compress_episodes
)
for _ in range(n_envs)
]
self._params = RayObject(None, None)
self._solve_kwargs_per_worker = [
RayObject(None, None) for _ in range(self.n_envs)
]
self._compress_episodes = compress_episodes
def _run_episode_batch(self, params, solve_kwargs_per_agent):
# Optimization, don't send the same parameters again.
if self._params.value is None or not all(
[np.array_equal(p1, p2)
for p1, p2 in zip(params, self._params.value)]
):
self._params = RayObject.from_value(params)
# TODO(xxx): Don't send the same solve kwargs again. This is more
# problematic than with params, as values may have very
# different types e.g. basic data types or np.ndarray or ???.
self._solve_kwargs_per_worker = [
RayObject.from_value(solve_kwargs)
for solve_kwargs in solve_kwargs_per_agent
]
episodes = ray.get([
w.run.remote(self._params.id, solve_kwargs.id)
for w, solve_kwargs in
zip(self.workers, self._solve_kwargs_per_worker)]
)
if self._compress_episodes:
episodes = [
worker_utils.decompress_episode(episode)
for episode in episodes
]
return episodes
| [
"ray.init",
"ray.remote",
"alpacka.batch_steppers.worker_utils.get_config",
"alpacka.batch_steppers.worker_utils.decompress_episode",
"ray.is_initialized",
"ray.put",
"numpy.array_equal"
] | [((1112, 1171), 'alpacka.batch_steppers.worker_utils.get_config', 'worker_utils.get_config', (['env_class', 'agent_class', 'network_fn'], {}), '(env_class, agent_class, network_fn)\n', (1135, 1171), False, 'from alpacka.batch_steppers import worker_utils\n'), ((1197, 1228), 'ray.remote', 'ray.remote', (['worker_utils.Worker'], {}), '(worker_utils.Worker)\n', (1207, 1228), False, 'import ray\n'), ((408, 439), 'ray.put', 'ray.put', (['value'], {'weakref': 'weakref'}), '(value, weakref=weakref)\n', (415, 439), False, 'import ray\n'), ((1245, 1265), 'ray.is_initialized', 'ray.is_initialized', ([], {}), '()\n', (1263, 1265), False, 'import ray\n'), ((1515, 1533), 'ray.init', 'ray.init', ([], {}), '(**kwargs)\n', (1523, 1533), False, 'import ray\n'), ((3011, 3051), 'alpacka.batch_steppers.worker_utils.decompress_episode', 'worker_utils.decompress_episode', (['episode'], {}), '(episode)\n', (3042, 3051), False, 'from alpacka.batch_steppers import worker_utils\n'), ((2205, 2227), 'numpy.array_equal', 'np.array_equal', (['p1', 'p2'], {}), '(p1, p2)\n', (2219, 2227), True, 'import numpy as np\n')] |
import os
import time
import argparse
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import animation
from algorithms.gradient_descent_2d import GradientDescent2D
from algorithms.momentum_2d import Momentum2D
plt.style.use('seaborn')
def getArguments():
parser = argparse.ArgumentParser(description='Parameters to tweak gradient descent.')
parser.add_argument('--lr', type=float, default=3e-2,
help='Learning rate. Set to 0.2 to see gradient descent NOT converging. Defaults to 0.03')
parser.add_argument('--max_iterations', type=int, default=150,
help='Maximum iterations for gradient descent to run. Defaults to 150')
parser.add_argument('--start_point', type=float, default=1.0,
help='Starting point for gradient descent. Defaults to 1.0')
parser.add_argument('-e', '--epsilon', type=float, default=1e-3,
help='Epsilon for checking convergence. Defaults to 0.001')
parser.add_argument('-r', '--random', action='store_true',
help='Flag to initialize a random starting point')
parser.add_argument('-s', '--save', action='store_true',
help="Flag to save visualizations and animations")
parser.add_argument('-l', '--length', type=int, default=5,
help="Length of the animation in seconds. Defaults to 5")
parser.add_argument('--use-momentum', action='store_true',
help='Flag to use momentum in gradient descent')
parser.add_argument('--momentum', type=float, default=0.3,
help='Momentum for gradient descent. Only used when use-momentum is True. Defaults to 0.3')
return parser.parse_args()
def animate(i, dataset, line):
line.set_data(dataset[:, :i])
return line
def plotAndSaveGraphs(gd, args):
fig = plt.figure(figsize=(16, 9))
# plot the original function
ax = fig.add_subplot(111)
x = np.linspace(-2.5, 1, 1000)
y = gd.f(x)
ax.plot(x, y, c='b', label='function', alpha=0.6)
# destructure history object
history = gd.getHistory()
gradientHistory = history['grads']
xHistory = history['x']
yHistory = gd.f(np.array(xHistory))
dataset = np.array([xHistory, yHistory])
totalIterations = len(xHistory) - 1
line = ax.plot(dataset[0], dataset[1], label='optimization', c='r', marker='.', alpha=0.4)[0]
ax.set_title(f'Iterations: {totalIterations} lr: {args.lr}')
ax.set_xlabel('X')
ax.set_ylabel('f(x)')
ax.legend()
lengthOfVideo = args.length
nFrames = totalIterations + 1
interval = lengthOfVideo * 1000 / nFrames
fps = (1 / (interval / 1000))
print('=' * 80)
print('[INFO]\t\tParameters for Animation')
print('=' * 80)
print(f'[INFO] Duration of video: {lengthOfVideo} seconds')
print(f'[DEBUG] Total number of frames: {nFrames}')
print(f'[DEBUG] Interval for each frame: {interval}')
print(f'[DEBUG] FPS of video: {fps}')
print('=' * 80)
ani = animation.FuncAnimation(fig, animate, fargs=(dataset, line),
frames=nFrames, blit=False,
interval=interval, repeat=True)
# make directories
if args.save:
pathToDirectory = os.path.join('visualizations', 'gradient_descent')
if not os.path.exists(pathToDirectory):
os.makedirs(pathToDirectory)
# save animation
if args.save:
fileName = os.path.join(pathToDirectory, 'GradientDescent2D.mp4')
print('[INFO] Saving animation...')
startTime = time.time()
ani.save(fileName, fps=fps)
timeDifference = time.time() - startTime
print(f'[INFO] Animation saved to {fileName}. Took {timeDifference:.2f} seconds.')
plt.close()
else:
plt.show()
sns.kdeplot(x=gradientHistory, fill=True)
plt.xlabel('Gradients')
plt.title('Distribution of Gradients')
# save distribution of gradients
if args.save:
fileName = os.path.join(pathToDirectory, 'DistributionOfGradients2D.png')
plt.savefig(fileName)
print(f'[INFO] Distribution of gradients saved to {fileName}')
plt.close()
else:
plt.show()
def main():
args = getArguments()
print('[DEBUG]', args)
if args.use_momentum:
gd = Momentum2D(alpha=args.lr,
max_iterations=args.max_iterations,
start_point=args.start_point,
random=args.random,
epsilon=args.epsilon,
momentum=args.momentum)
else:
gd = GradientDescent2D(alpha=args.lr,
max_iterations=args.max_iterations,
start_point=args.start_point,
random=args.random,
epsilon=args.epsilon)
gd.run()
print(f'[DEBUG] Value of x: {gd.x}')
print('[DEBUG] Expected value: -1.59791')
plotAndSaveGraphs(gd, args)
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.title",
"seaborn.kdeplot",
"argparse.ArgumentParser",
"matplotlib.pyplot.show",
"os.makedirs",
"algorithms.gradient_descent_2d.GradientDescent2D",
"matplotlib.pyplot.close",
"os.path.exists",
"matplotlib.animation.FuncAnimation",
"time.time",
"matplotlib.pyplot.style.use",
"... | [((253, 277), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (266, 277), True, 'import matplotlib.pyplot as plt\n'), ((313, 389), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Parameters to tweak gradient descent."""'}), "(description='Parameters to tweak gradient descent.')\n", (336, 389), False, 'import argparse\n'), ((1925, 1952), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (1935, 1952), True, 'import matplotlib.pyplot as plt\n'), ((2025, 2051), 'numpy.linspace', 'np.linspace', (['(-2.5)', '(1)', '(1000)'], {}), '(-2.5, 1, 1000)\n', (2036, 2051), True, 'import numpy as np\n'), ((2307, 2337), 'numpy.array', 'np.array', (['[xHistory, yHistory]'], {}), '([xHistory, yHistory])\n', (2315, 2337), True, 'import numpy as np\n'), ((3094, 3218), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'animate'], {'fargs': '(dataset, line)', 'frames': 'nFrames', 'blit': '(False)', 'interval': 'interval', 'repeat': '(True)'}), '(fig, animate, fargs=(dataset, line), frames=nFrames,\n blit=False, interval=interval, repeat=True)\n', (3117, 3218), False, 'from matplotlib import animation\n'), ((3911, 3952), 'seaborn.kdeplot', 'sns.kdeplot', ([], {'x': 'gradientHistory', 'fill': '(True)'}), '(x=gradientHistory, fill=True)\n', (3922, 3952), True, 'import seaborn as sns\n'), ((3957, 3980), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Gradients"""'], {}), "('Gradients')\n", (3967, 3980), True, 'import matplotlib.pyplot as plt\n'), ((3985, 4023), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of Gradients"""'], {}), "('Distribution of Gradients')\n", (3994, 4023), True, 'import matplotlib.pyplot as plt\n'), ((2273, 2291), 'numpy.array', 'np.array', (['xHistory'], {}), '(xHistory)\n', (2281, 2291), True, 'import numpy as np\n'), ((3351, 3401), 'os.path.join', 'os.path.join', (['"""visualizations"""', '"""gradient_descent"""'], {}), "('visualizations', 'gradient_descent')\n", (3363, 3401), False, 'import os\n'), ((3550, 3604), 'os.path.join', 'os.path.join', (['pathToDirectory', '"""GradientDescent2D.mp4"""'], {}), "(pathToDirectory, 'GradientDescent2D.mp4')\n", (3562, 3604), False, 'import os\n'), ((3669, 3680), 'time.time', 'time.time', ([], {}), '()\n', (3678, 3680), False, 'import time\n'), ((3865, 3876), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3874, 3876), True, 'import matplotlib.pyplot as plt\n'), ((3895, 3905), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3903, 3905), True, 'import matplotlib.pyplot as plt\n'), ((4099, 4161), 'os.path.join', 'os.path.join', (['pathToDirectory', '"""DistributionOfGradients2D.png"""'], {}), "(pathToDirectory, 'DistributionOfGradients2D.png')\n", (4111, 4161), False, 'import os\n'), ((4170, 4191), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fileName'], {}), '(fileName)\n', (4181, 4191), True, 'import matplotlib.pyplot as plt\n'), ((4271, 4282), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4280, 4282), True, 'import matplotlib.pyplot as plt\n'), ((4301, 4311), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4309, 4311), True, 'import matplotlib.pyplot as plt\n'), ((4419, 4586), 'algorithms.momentum_2d.Momentum2D', 'Momentum2D', ([], {'alpha': 'args.lr', 'max_iterations': 'args.max_iterations', 'start_point': 'args.start_point', 'random': 'args.random', 'epsilon': 'args.epsilon', 'momentum': 'args.momentum'}), '(alpha=args.lr, max_iterations=args.max_iterations, start_point=\n args.start_point, random=args.random, epsilon=args.epsilon, momentum=\n args.momentum)\n', (4429, 4586), False, 'from algorithms.momentum_2d import Momentum2D\n'), ((4720, 4864), 'algorithms.gradient_descent_2d.GradientDescent2D', 'GradientDescent2D', ([], {'alpha': 'args.lr', 'max_iterations': 'args.max_iterations', 'start_point': 'args.start_point', 'random': 'args.random', 'epsilon': 'args.epsilon'}), '(alpha=args.lr, max_iterations=args.max_iterations,\n start_point=args.start_point, random=args.random, epsilon=args.epsilon)\n', (4737, 4864), False, 'from algorithms.gradient_descent_2d import GradientDescent2D\n'), ((3417, 3448), 'os.path.exists', 'os.path.exists', (['pathToDirectory'], {}), '(pathToDirectory)\n', (3431, 3448), False, 'import os\n'), ((3462, 3490), 'os.makedirs', 'os.makedirs', (['pathToDirectory'], {}), '(pathToDirectory)\n', (3473, 3490), False, 'import os\n'), ((3742, 3753), 'time.time', 'time.time', ([], {}), '()\n', (3751, 3753), False, 'import time\n')] |
import sys
#import importlib
#importlib.reload(sys)
import pickle
import re
from string import punctuation
from nltk.tokenize import word_tokenize
#sys.setdefaultencoding("utf-8")
import numpy as np
import math
class PhraseVector:
def __init__(self, wordvec_model, phrase):
self.phrase = phrase
self.wordvec_model = wordvec_model
self.vector = self.PhraseToVec(phrase)
# Retireve original phrase text
def GetPhrase(self):
return self.phrase
# Combine multiple vectors
def ConvertVectorSetToVecAverageBased(self, vectorSet, ignore=[]):
if len(ignore) == 0:
return np.mean(vectorSet, axis=0)
else:
return np.dot(np.transpose(vectorSet), ignore) / sum(ignore)
# Some basic clean up of phrase
def standardize_text(self, phrase):
remove = punctuation
remove = remove.replace("\'", "")
pattern = r"[{}]".format(remove)
phrase = re.sub(r"http\S+", "", phrase)
phrase = re.sub(r"http", "", phrase)
phrase = re.sub(r"@\S+", "", phrase)
phrase = re.sub(pattern, "", phrase)
phrase = re.sub(r"[^\w\s\d+]", "", phrase)
phrase = re.sub(r"[^\D+]", "", phrase)
phrase = re.sub(r"@", "at", phrase)
phrase = phrase.lower()
return phrase
def tokenMaker(self, phrase):
words = word_tokenize(phrase)
with open('model/stopwords.pickle', 'rb') as f:
custom_stopwords = pickle.load(f, encoding='latin1')
custom_stopwords = custom_stopwords + ['nt', 'eur', 'euro', 'ive', 'hey']
filtered_words = [word for word in words if word not in custom_stopwords]
return filtered_words
# Retrieve the phrase vector based on the vectors of each word in the phrase
def PhraseToVec(self, phrase):
phrase_clean = self.standardize_text(phrase)
wordsInPhrase = self.tokenMaker(phrase_clean) # [word for word in phrase.split()]
vectorSet = []
for aWord in wordsInPhrase:
try:
wordVector = self.wordvec_model[aWord]
vectorSet.append(wordVector)
except:
# Word not in vocabulary
pass
return self.ConvertVectorSetToVecAverageBased(vectorSet)
# Calculate the cosine similarity for the current phrase and another phrase vector
def CosineSimilarity(self, otherPhraseVec):
cosine_similarity = np.dot(self.vector, otherPhraseVec) / (np.linalg.norm(self.vector) * np.linalg.norm(otherPhraseVec))
try:
if math.isnan(cosine_similarity):
cosine_similarity = 0
except:
cosine_similarity = 0
return cosine_similarity | [
"math.isnan",
"numpy.transpose",
"pickle.load",
"numpy.mean",
"numpy.linalg.norm",
"numpy.dot",
"re.sub",
"nltk.tokenize.word_tokenize"
] | [((960, 990), 're.sub', 're.sub', (['"""http\\\\S+"""', '""""""', 'phrase'], {}), "('http\\\\S+', '', phrase)\n", (966, 990), False, 'import re\n'), ((1008, 1034), 're.sub', 're.sub', (['"""http"""', '""""""', 'phrase'], {}), "('http', '', phrase)\n", (1014, 1034), False, 'import re\n'), ((1053, 1080), 're.sub', 're.sub', (['"""@\\\\S+"""', '""""""', 'phrase'], {}), "('@\\\\S+', '', phrase)\n", (1059, 1080), False, 'import re\n'), ((1098, 1125), 're.sub', 're.sub', (['pattern', '""""""', 'phrase'], {}), "(pattern, '', phrase)\n", (1104, 1125), False, 'import re\n'), ((1143, 1178), 're.sub', 're.sub', (['"""[^\\\\w\\\\s\\\\d+]"""', '""""""', 'phrase'], {}), "('[^\\\\w\\\\s\\\\d+]', '', phrase)\n", (1149, 1178), False, 'import re\n'), ((1194, 1223), 're.sub', 're.sub', (['"""[^\\\\D+]"""', '""""""', 'phrase'], {}), "('[^\\\\D+]', '', phrase)\n", (1200, 1223), False, 'import re\n'), ((1241, 1266), 're.sub', 're.sub', (['"""@"""', '"""at"""', 'phrase'], {}), "('@', 'at', phrase)\n", (1247, 1266), False, 'import re\n'), ((1373, 1394), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['phrase'], {}), '(phrase)\n', (1386, 1394), False, 'from nltk.tokenize import word_tokenize\n'), ((640, 666), 'numpy.mean', 'np.mean', (['vectorSet'], {'axis': '(0)'}), '(vectorSet, axis=0)\n', (647, 666), True, 'import numpy as np\n'), ((1483, 1516), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (1494, 1516), False, 'import pickle\n'), ((2460, 2495), 'numpy.dot', 'np.dot', (['self.vector', 'otherPhraseVec'], {}), '(self.vector, otherPhraseVec)\n', (2466, 2495), True, 'import numpy as np\n'), ((2589, 2618), 'math.isnan', 'math.isnan', (['cosine_similarity'], {}), '(cosine_similarity)\n', (2599, 2618), False, 'import math\n'), ((2499, 2526), 'numpy.linalg.norm', 'np.linalg.norm', (['self.vector'], {}), '(self.vector)\n', (2513, 2526), True, 'import numpy as np\n'), ((2529, 2559), 'numpy.linalg.norm', 'np.linalg.norm', (['otherPhraseVec'], {}), '(otherPhraseVec)\n', (2543, 2559), True, 'import numpy as np\n'), ((707, 730), 'numpy.transpose', 'np.transpose', (['vectorSet'], {}), '(vectorSet)\n', (719, 730), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from numpy.testing import assert_array_almost_equal
from specklesnake.model.bilinear import BilinearElement
class TestBilinearElement(unittest.TestCase):
def setUp(self):
super().setUp()
element = BilinearElement()
nodes = np.array([
[0.0, 0.0],
[1.0, 0.0],
[1.0, 1.0],
[0.0, 1.0],
])
element.nodes = nodes
self.element = element
def test_single_point(self):
expected = [
(-1, -1, 0),
(1, -1, 1),
(1, 1, 2),
(-1, 1, 3)
]
for xi, eta, node_idx in expected:
point = self.element.single_point(xi, eta)
assert_array_almost_equal(point, self.element.nodes[node_idx, :])
def test_deformation_gradient(self):
in_def_grad = np.array([
[1.1, 0.0],
[0.0, 1.0]
])
disp_vec = self.element.nodes @ in_def_grad - self.element.nodes
out_def_grad = self.element.deformation_gradient(0, 0, disp_vec)
assert_array_almost_equal(in_def_grad, out_def_grad)
| [
"specklesnake.model.bilinear.BilinearElement",
"numpy.testing.assert_array_almost_equal",
"numpy.array"
] | [((256, 273), 'specklesnake.model.bilinear.BilinearElement', 'BilinearElement', ([], {}), '()\n', (271, 273), False, 'from specklesnake.model.bilinear import BilinearElement\n'), ((290, 348), 'numpy.array', 'np.array', (['[[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0]]'], {}), '([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0]])\n', (298, 348), True, 'import numpy as np\n'), ((869, 903), 'numpy.array', 'np.array', (['[[1.1, 0.0], [0.0, 1.0]]'], {}), '([[1.1, 0.0], [0.0, 1.0]])\n', (877, 903), True, 'import numpy as np\n'), ((1092, 1144), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['in_def_grad', 'out_def_grad'], {}), '(in_def_grad, out_def_grad)\n', (1117, 1144), False, 'from numpy.testing import assert_array_almost_equal\n'), ((739, 804), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['point', 'self.element.nodes[node_idx, :]'], {}), '(point, self.element.nodes[node_idx, :])\n', (764, 804), False, 'from numpy.testing import assert_array_almost_equal\n')] |
import numpy as np
import cv2
# import glob
# Termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# Prepare object Points
objp = np.zeros((7*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:7].T.reshape(-1,2)
# Array to store object points and image points from all images
obj_points = [] # 3d point in real world space
img_points = [] # 2d points in image plane.
# Array to store images
images = []
camera_index = 0
found = True
while found:
cap = cv2.VideoCapture(camera_index)
ret, frame = cap.read()
if not ret:
found = False
break
print("Camera found, ID: " + str(camera_index))
camera_index += 1
gray = None
# 1. Capture images and chessboard object points
cap = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = cap.read()
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
chessboardCornerRet, corners = cv2.findChessboardCorners(gray, (9, 7), None)
# If found, add object points, image points (after refining them)
if chessboardCornerRet:
obj_points.append(objp)
corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
img_points.append(corners2)
# Draw and display the corners
img = cv2.drawChessboardCorners(frame, (9, 7), corners2, chessboardCornerRet)
cv2.putText(img, "Calibration images taken: " + str(len(img_points)), (10, 35), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)
cv2.imshow('img', img)
cv2.waitKey(500)
# Display the resulting frame
cv2.putText(gray, "Calibration images taken: " + str(len(img_points)), (10, 35), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)
cv2.imshow('img', gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# 3. Calculate camera matrix and distortion coefficients
ret, mtx, dist, rotation_vectors, translation_vectors = cv2.calibrateCamera(obj_points, img_points, gray.shape[::-1], None, None)
print ("camera matrix: \n" + str(mtx))
print ("distortion coefficients: \n" + str(dist))
print ("rotation vectors: \n" + str(dist))
print ("translation vectors: \n" + str(dist))
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
h, w = frame.shape[:2]
new_camera_mtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))
print("New camera matrix: \n" + str(new_camera_mtx))
# 4. Store Coefficients in file
np.savetxt("camera_matrix.camera", new_camera_mtx, delimiter=',')
np.savetxt("distortion_coefficients.camera", dist, delimiter=',')
# 5. Calculate the error
mean_error = 0
for i in range(len(obj_points)):
imgpoints2, _ = cv2.projectPoints(obj_points[i], rotation_vectors[i], translation_vectors[i], mtx, dist)
error = cv2.norm(img_points[i],imgpoints2, cv2.NORM_L2)/len(imgpoints2)
mean_error += error
print("total error: ", mean_error/len(obj_points))
# 6. Show comparison
while True:
# Capture frame-by-frame
ret, frame = cap.read()
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
# undistort
dst = cv2.undistort(frame, mtx, dist, None, new_camera_mtx)
# crop the image
x, y, w, h = roi
dst = dst[y:y + h, x:x + w]
cropped_frame = frame[y:y + h, x:x + w]
alpha = 0.5
beta = (1.0 - alpha)
dst = cv2.addWeighted(cropped_frame, alpha, dst, beta, 0.0)
cv2.imshow('img', dst)
if cv2.waitKey(1) & 0xFF == ord('w'):
break
# FINAL Close down and output coefficients
cap.release()
cv2.destroyAllWindows()
| [
"cv2.undistort",
"cv2.findChessboardCorners",
"cv2.cvtColor",
"cv2.waitKey",
"numpy.savetxt",
"numpy.zeros",
"cv2.imshow",
"cv2.projectPoints",
"cv2.addWeighted",
"cv2.VideoCapture",
"cv2.cornerSubPix",
"cv2.calibrateCamera",
"cv2.norm",
"cv2.drawChessboardCorners",
"cv2.destroyAllWindow... | [((175, 207), 'numpy.zeros', 'np.zeros', (['(7 * 9, 3)', 'np.float32'], {}), '((7 * 9, 3), np.float32)\n', (183, 207), True, 'import numpy as np\n'), ((755, 774), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (771, 774), False, 'import cv2\n'), ((2006, 2029), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2027, 2029), False, 'import cv2\n'), ((2144, 2217), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['obj_points', 'img_points', 'gray.shape[::-1]', 'None', 'None'], {}), '(obj_points, img_points, gray.shape[::-1], None, None)\n', (2163, 2217), False, 'import cv2\n'), ((2404, 2423), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (2420, 2423), False, 'import cv2\n'), ((2494, 2553), 'cv2.getOptimalNewCameraMatrix', 'cv2.getOptimalNewCameraMatrix', (['mtx', 'dist', '(w, h)', '(1)', '(w, h)'], {}), '(mtx, dist, (w, h), 1, (w, h))\n', (2523, 2553), False, 'import cv2\n'), ((2641, 2706), 'numpy.savetxt', 'np.savetxt', (['"""camera_matrix.camera"""', 'new_camera_mtx'], {'delimiter': '""","""'}), "('camera_matrix.camera', new_camera_mtx, delimiter=',')\n", (2651, 2706), True, 'import numpy as np\n'), ((2707, 2772), 'numpy.savetxt', 'np.savetxt', (['"""distortion_coefficients.camera"""', 'dist'], {'delimiter': '""","""'}), "('distortion_coefficients.camera', dist, delimiter=',')\n", (2717, 2772), True, 'import numpy as np\n'), ((3743, 3766), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3764, 3766), False, 'import cv2\n'), ((499, 529), 'cv2.VideoCapture', 'cv2.VideoCapture', (['camera_index'], {}), '(camera_index)\n', (515, 529), False, 'import cv2\n'), ((995, 1034), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (1007, 1034), False, 'import cv2\n'), ((1106, 1151), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['gray', '(9, 7)', 'None'], {}), '(gray, (9, 7), None)\n', (1131, 1151), False, 'import cv2\n'), ((1911, 1934), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'gray'], {}), "('img', gray)\n", (1921, 1934), False, 'import cv2\n'), ((2867, 2960), 'cv2.projectPoints', 'cv2.projectPoints', (['obj_points[i]', 'rotation_vectors[i]', 'translation_vectors[i]', 'mtx', 'dist'], {}), '(obj_points[i], rotation_vectors[i], translation_vectors[i\n ], mtx, dist)\n', (2884, 2960), False, 'import cv2\n'), ((3321, 3374), 'cv2.undistort', 'cv2.undistort', (['frame', 'mtx', 'dist', 'None', 'new_camera_mtx'], {}), '(frame, mtx, dist, None, new_camera_mtx)\n', (3334, 3374), False, 'import cv2\n'), ((3547, 3600), 'cv2.addWeighted', 'cv2.addWeighted', (['cropped_frame', 'alpha', 'dst', 'beta', '(0.0)'], {}), '(cropped_frame, alpha, dst, beta, 0.0)\n', (3562, 3600), False, 'import cv2\n'), ((3606, 3628), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'dst'], {}), "('img', dst)\n", (3616, 3628), False, 'import cv2\n'), ((1303, 1364), 'cv2.cornerSubPix', 'cv2.cornerSubPix', (['gray', 'corners', '(11, 11)', '(-1, -1)', 'criteria'], {}), '(gray, corners, (11, 11), (-1, -1), criteria)\n', (1319, 1364), False, 'import cv2\n'), ((1455, 1526), 'cv2.drawChessboardCorners', 'cv2.drawChessboardCorners', (['frame', '(9, 7)', 'corners2', 'chessboardCornerRet'], {}), '(frame, (9, 7), corners2, chessboardCornerRet)\n', (1480, 1526), False, 'import cv2\n'), ((1681, 1703), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (1691, 1703), False, 'import cv2\n'), ((1712, 1728), 'cv2.waitKey', 'cv2.waitKey', (['(500)'], {}), '(500)\n', (1723, 1728), False, 'import cv2\n'), ((2968, 3016), 'cv2.norm', 'cv2.norm', (['img_points[i]', 'imgpoints2', 'cv2.NORM_L2'], {}), '(img_points[i], imgpoints2, cv2.NORM_L2)\n', (2976, 3016), False, 'import cv2\n'), ((1942, 1956), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1953, 1956), False, 'import cv2\n'), ((3636, 3650), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3647, 3650), False, 'import cv2\n')] |
from argparse import ArgumentParser
import json
import networkx as nx
import gzip
import numpy as np
import statistics
def compute_diameter(adjacency_list):
# graph is a list of edges
# every edge is a list: [source, type, target]
g = nx.Graph()
for edge_source, _, edge_target in adjacency_list:
g.add_edge(edge_source, edge_target)
return nx.diameter(g)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("--data", dest="data", required=True)
args = parser.parse_args()
with gzip.open(args.data, 'r') as file:
lines = file.readlines()
objs = [json.loads(line) for line in lines]
graphs = [o['graph'] for o in objs]
diameters = [compute_diameter(graph) for graph in graphs]
print('Max diameter: ', max(diameters))
print('Mean diameter: ', np.mean(diameters))
print('stddev: ', statistics.stdev(diameters))
percentiles = range(10, 110, 10)
percentile_results = np.percentile(diameters, percentiles)
for i, res in zip(percentiles, percentile_results):
print('Diameters - {} percentile: {}'.format(i, res)) | [
"gzip.open",
"argparse.ArgumentParser",
"json.loads",
"statistics.stdev",
"numpy.percentile",
"numpy.mean",
"networkx.Graph",
"networkx.diameter"
] | [((248, 258), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (256, 258), True, 'import networkx as nx\n'), ((370, 384), 'networkx.diameter', 'nx.diameter', (['g'], {}), '(g)\n', (381, 384), True, 'import networkx as nx\n'), ((426, 442), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (440, 442), False, 'from argparse import ArgumentParser\n'), ((981, 1018), 'numpy.percentile', 'np.percentile', (['diameters', 'percentiles'], {}), '(diameters, percentiles)\n', (994, 1018), True, 'import numpy as np\n'), ((550, 575), 'gzip.open', 'gzip.open', (['args.data', '"""r"""'], {}), "(args.data, 'r')\n", (559, 575), False, 'import gzip\n'), ((635, 651), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (645, 651), False, 'import json\n'), ((847, 865), 'numpy.mean', 'np.mean', (['diameters'], {}), '(diameters)\n', (854, 865), True, 'import numpy as np\n'), ((889, 916), 'statistics.stdev', 'statistics.stdev', (['diameters'], {}), '(diameters)\n', (905, 916), False, 'import statistics\n')] |
# -*- coding: UTF-8 -*-
import math
import numpy as np
from scipy.spatial.transform import Rotation as R
import sys
from dbstep.constants import metals
"""
calculator
Performs calculations for finding angles, translation and rotation of molecules
"""
def unit_vector(vector):
""" Returns the unit vector of the vector """
return vector / np.linalg.norm(vector)
def point_vec(coords, spec_atom_2):
"""returns coordinate vector between any number of atoms """
point = np.array([0.0,0.0,0.0])
for atom in spec_atom_2:
point += coords[atom-1]
return point
def rotate_mol(
coords, spec_atom_1, end_point,
verbose=False, atom3=False, cube_origin=False):
"""Aligns spec_atom_1-end_point bond to the z-axis via rotation about the
x and y axes.
Args:
coords (np.ndarray): 3D coordinates of the all the molecule's atoms
spec_atom_1 (int): non-zero based index of the atom we're looking down
end_point (np.ndarray): 3D coordinate of the atom we're looking to
verbose (bool): should information about rotation be printed or not
atom3 (int, optional): atom to specify rotation around z axis to align atom3 to the positive x direction & y=0
cube_origin (np.ndarray, optional): origin a cube file
Returns:
Rotated version of coords and cube_origin (if provided).
"""
atom_1_index = spec_atom_1 - 1
intersecting_vector = end_point - coords[atom_1_index]
new_coords = np.copy(coords)
new_cube_origin = np.copy(cube_origin)
yaw, pitch, roll = 0, 0, 0
yaw = angle_between_axis(intersecting_vector, 1, 2)
if yaw != 0:
print_rotation_info('x', yaw, verbose)
end_point = apply_rotation(end_point, np.array([yaw, 0, 0]))
intersecting_vector = end_point - coords[atom_1_index]
pitch = angle_between_axis(intersecting_vector, 0, 2)
if pitch != 0:
print_rotation_info('y', pitch, verbose)
end_point = apply_rotation(end_point, np.array([0, pitch, 0]))
if atom3 is not False:
atom3_coords = coords[int(atom3) - 1]
intersecting_vector = atom3_coords - end_point
roll = angle_between_axis(intersecting_vector, 1, 0)
if roll != 0:
roll = check_rotated_atom3_x_direction(atom3_coords, roll)
print_rotation_info('z', roll, verbose)
if yaw != 0 or pitch != 0 or roll != 0:
# rotation of all of coords done here
three_rotations = np.array([yaw, pitch, roll])
new_coords = apply_rotation(new_coords, three_rotations)
if cube_origin is not False:
# effectively rotates whole grid that will be generated later
new_cube_origin = apply_rotation(cube_origin, three_rotations)
else:
if verbose:
print(" No rotation necessary :)")
if cube_origin is False:
return new_coords
else:
return new_coords, new_cube_origin
def angle_between_axis(vector, axis_from_index, axis_to_index):
""" Returns the angle in radians needed to rotate axis_from to be parallel to axis_to. """
v1_u = unit_vector(np.array(vector))
angle = np.arctan2(v1_u[axis_from_index], v1_u[axis_to_index])
# I've found through testing that these from-to combinations need their
# rotations sign to be reversed.
reverse_angle_combinations = [(0, 2), (1, 0), (2, 1)]
if (axis_from_index, axis_to_index) in reverse_angle_combinations:
angle = -angle
return angle
def print_rotation_info(axis, radians, verbose):
"""Prints rotation information if verbose and radians is non-zero."""
if verbose:
print(
f' Rotating molecule about {axis}-axis '
f'{np.degrees(radians):.2f} degrees.')
def apply_rotation(item_to_rotate, radians):
"""Rotates a vector or matrix about x, y and z axes specified by radians array."""
rot = R.from_euler('xyz', radians)
return np.round(rot.apply(item_to_rotate), 8)
def check_rotated_atom3_x_direction(atom3_coords, roll):
"""If x is in negative direction, subtract or add pi to roll and return the result."""
new_atom3_coords = apply_rotation(atom3_coords, np.array([0, 0, roll]))
if new_atom3_coords[0] < 0:
plus_pi, minus_pi = roll + np.pi, roll - np.pi
roll = plus_pi if abs(plus_pi) < abs(minus_pi) else minus_pi
return roll
def translate_mol(MOL, options, origin):
"""# Translates molecule to place center atom at cartesian origin [0,0,0]"""
coords, atoms, spec_atom = MOL.CARTESIANS, MOL.ATOMTYPES, options.spec_atom_1
base_id = spec_atom - 1
base_atom = atoms[base_id]
try:
displacement = coords[base_id] - origin
if np.linalg.norm(displacement) == 0:
if options.verbose: print("\n Molecule is defined with {}{} at the origin".format(base_atom,(base_id+1)))
else:
if options.verbose == True: print("\n Translating molecule by {} to set {}{} at the origin".format(-displacement, base_atom, (base_id+1)))
for n, coord in enumerate(coords):
coords[n] = coords[n] - displacement
except:
sys.exit(" WARNING! Unable to find an atom to set at the origin")
return coords
def translate_dens(mol, options, xmin, xmax, ymin, ymax, zmin, zmax, xyz_max, origin):
""" Translates molecule so that a specified atom (spec_atom) is at the origin. Defaults to a metal if no atom is specified."""
coords, atoms, cube_origin = mol.CARTESIANS, mol.ATOMTYPES,mol.ORIGIN
spec_atom = options.spec_atom_1
for n, atom in enumerate(atoms):
if not spec_atom:
if atom in metals:
base_id, base_atom = n, atom
else:
if n+1 == spec_atom:
base_id, base_atom = n, atom
try:
displacement = coords[base_id] - origin
if np.linalg.norm(displacement) == 0:
if options.verbose: print("\n Molecule is already defined with {}{} at the origin".format(base_atom,(base_id+1)))
else:
if options.verbose: print("\n Translating molecule by {} to set {}{} at the origin".format(-displacement, base_atom, (base_id+1)))
for n, coord in enumerate(coords):
coords[n] = coords[n] - displacement
cube_origin = cube_origin + displacement
xmin -= displacement[0]
xmax -= displacement[0]
ymin -= displacement[1]
ymax -= displacement[1]
zmin -= displacement[2]
zmax -= displacement[2]
xyz_max = max(xmax, ymax, zmax, abs(xmin), abs(ymin), abs(zmin))
except:
sys.exit(" WARNING! Unable to find an atom (e.g. metal) to set at the origin")
return [coords, cube_origin, xmin, xmax, ymin, ymax, zmin, zmax, xyz_max] | [
"numpy.arctan2",
"numpy.copy",
"numpy.degrees",
"numpy.array",
"numpy.linalg.norm",
"sys.exit",
"scipy.spatial.transform.Rotation.from_euler"
] | [((478, 503), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (486, 503), True, 'import numpy as np\n'), ((1404, 1419), 'numpy.copy', 'np.copy', (['coords'], {}), '(coords)\n', (1411, 1419), True, 'import numpy as np\n'), ((1439, 1459), 'numpy.copy', 'np.copy', (['cube_origin'], {}), '(cube_origin)\n', (1446, 1459), True, 'import numpy as np\n'), ((2898, 2952), 'numpy.arctan2', 'np.arctan2', (['v1_u[axis_from_index]', 'v1_u[axis_to_index]'], {}), '(v1_u[axis_from_index], v1_u[axis_to_index])\n', (2908, 2952), True, 'import numpy as np\n'), ((3586, 3614), 'scipy.spatial.transform.Rotation.from_euler', 'R.from_euler', (['"""xyz"""', 'radians'], {}), "('xyz', radians)\n", (3598, 3614), True, 'from scipy.spatial.transform import Rotation as R\n'), ((345, 367), 'numpy.linalg.norm', 'np.linalg.norm', (['vector'], {}), '(vector)\n', (359, 367), True, 'import numpy as np\n'), ((2290, 2318), 'numpy.array', 'np.array', (['[yaw, pitch, roll]'], {}), '([yaw, pitch, roll])\n', (2298, 2318), True, 'import numpy as np\n'), ((2870, 2886), 'numpy.array', 'np.array', (['vector'], {}), '(vector)\n', (2878, 2886), True, 'import numpy as np\n'), ((3858, 3880), 'numpy.array', 'np.array', (['[0, 0, roll]'], {}), '([0, 0, roll])\n', (3866, 3880), True, 'import numpy as np\n'), ((1638, 1659), 'numpy.array', 'np.array', (['[yaw, 0, 0]'], {}), '([yaw, 0, 0])\n', (1646, 1659), True, 'import numpy as np\n'), ((1873, 1896), 'numpy.array', 'np.array', (['[0, pitch, 0]'], {}), '([0, pitch, 0])\n', (1881, 1896), True, 'import numpy as np\n'), ((4343, 4371), 'numpy.linalg.norm', 'np.linalg.norm', (['displacement'], {}), '(displacement)\n', (4357, 4371), True, 'import numpy as np\n'), ((4732, 4799), 'sys.exit', 'sys.exit', (['""" WARNING! Unable to find an atom to set at the origin"""'], {}), "(' WARNING! Unable to find an atom to set at the origin')\n", (4740, 4799), False, 'import sys\n'), ((5363, 5391), 'numpy.linalg.norm', 'np.linalg.norm', (['displacement'], {}), '(displacement)\n', (5377, 5391), True, 'import numpy as np\n'), ((6018, 6103), 'sys.exit', 'sys.exit', (['""" WARNING! Unable to find an atom (e.g. metal) to set at the origin"""'], {}), "(' WARNING! Unable to find an atom (e.g. metal) to set at the origin'\n )\n", (6026, 6103), False, 'import sys\n'), ((3412, 3431), 'numpy.degrees', 'np.degrees', (['radians'], {}), '(radians)\n', (3422, 3431), True, 'import numpy as np\n')] |
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
def gen_data(n):
x = np.random.randn(n, 1)
#print(x)
c = np.random.randn(1, 1) * n**2
#print(c)
P_gen = np.random.randn(n, n)
P_gen = (P_gen + np.transpose(P_gen)) / 2
#print(P_gen)
w, v = linalg.eigh(P_gen)
#print(w)
#print(v)
P_rec = np.dot(np.dot(v, np.diag(w)), np.transpose(v))
assert(np.allclose(P_gen, P_rec))
## make it positive-semidefinite
##
w = np.where(w > 0, w, 0)
#print(w)
P = np.dot(np.dot(v, np.diag(w)), np.transpose(v))
#print(P)
assert(np.allclose(P, np.transpose(P)))
return x, c, P
def make_matrix(x, c, P):
Px = np.dot(P, x)
#print(Px)
F1 = np.hstack((P, Px))
#print(F1)
F2 = np.hstack((np.transpose(Px), c))
#print(F2)
F = np.vstack((F1, F2))
#print(F)
return F
def min_eig(F):
eigF = linalg.eigvalsh(F)
#print(eigF)
min_eigF = np.amin(eigF)
#print(min_eigF)
return min_eigF
def schur_comp(x, c, P):
Px = np.dot(P, x)
#print(Px)
val = c - np.dot(np.transpose(x), Px)
val = val[0, 0]
return val
if __name__ == "__main__":
val = np.zeros((2, 1000))
for i in range(val.shape[1]):
n = np.random.randint(2, 50)
x, c, P = gen_data(n)
F = make_matrix(x, c, P)
val[0, i] = min_eig(F)
val[1, i] = schur_comp(x, c, P)
#print(val[:, i])
plt.plot(val[0], val[1], '.')
plt.hlines([0], np.min(val[0]), np.max(val[0]), linestyle=":", lw=1)
plt.vlines([0], np.min(val[1]), np.max(val[1]), linestyle=":", lw=1)
plt.xlabel('left eval: minimum eigenvalue')
plt.ylabel('right eval: schur complement value')
plt.show()
pass | [
"numpy.amin",
"numpy.allclose",
"numpy.random.randint",
"numpy.diag",
"scipy.linalg.eigvalsh",
"numpy.random.randn",
"numpy.transpose",
"numpy.max",
"scipy.linalg.eigh",
"matplotlib.pyplot.show",
"numpy.hstack",
"numpy.min",
"matplotlib.pyplot.ylabel",
"numpy.dot",
"numpy.vstack",
"mat... | [((107, 128), 'numpy.random.randn', 'np.random.randn', (['n', '(1)'], {}), '(n, 1)\n', (122, 128), True, 'import numpy as np\n'), ((210, 231), 'numpy.random.randn', 'np.random.randn', (['n', 'n'], {}), '(n, n)\n', (225, 231), True, 'import numpy as np\n'), ((312, 330), 'scipy.linalg.eigh', 'linalg.eigh', (['P_gen'], {}), '(P_gen)\n', (323, 330), False, 'from scipy import linalg\n'), ((433, 458), 'numpy.allclose', 'np.allclose', (['P_gen', 'P_rec'], {}), '(P_gen, P_rec)\n', (444, 458), True, 'import numpy as np\n'), ((517, 538), 'numpy.where', 'np.where', (['(w > 0)', 'w', '(0)'], {}), '(w > 0, w, 0)\n', (525, 538), True, 'import numpy as np\n'), ((733, 745), 'numpy.dot', 'np.dot', (['P', 'x'], {}), '(P, x)\n', (739, 745), True, 'import numpy as np\n'), ((772, 790), 'numpy.hstack', 'np.hstack', (['(P, Px)'], {}), '((P, Px))\n', (781, 790), True, 'import numpy as np\n'), ((875, 894), 'numpy.vstack', 'np.vstack', (['(F1, F2)'], {}), '((F1, F2))\n', (884, 894), True, 'import numpy as np\n'), ((959, 977), 'scipy.linalg.eigvalsh', 'linalg.eigvalsh', (['F'], {}), '(F)\n', (974, 977), False, 'from scipy import linalg\n'), ((1012, 1025), 'numpy.amin', 'np.amin', (['eigF'], {}), '(eigF)\n', (1019, 1025), True, 'import numpy as np\n'), ((1111, 1123), 'numpy.dot', 'np.dot', (['P', 'x'], {}), '(P, x)\n', (1117, 1123), True, 'import numpy as np\n'), ((1269, 1288), 'numpy.zeros', 'np.zeros', (['(2, 1000)'], {}), '((2, 1000))\n', (1277, 1288), True, 'import numpy as np\n'), ((1544, 1573), 'matplotlib.pyplot.plot', 'plt.plot', (['val[0]', 'val[1]', '"""."""'], {}), "(val[0], val[1], '.')\n", (1552, 1573), True, 'import matplotlib.pyplot as plt\n'), ((1727, 1770), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""left eval: minimum eigenvalue"""'], {}), "('left eval: minimum eigenvalue')\n", (1737, 1770), True, 'import matplotlib.pyplot as plt\n'), ((1776, 1824), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""right eval: schur complement value"""'], {}), "('right eval: schur complement value')\n", (1786, 1824), True, 'import matplotlib.pyplot as plt\n'), ((1830, 1840), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1838, 1840), True, 'import matplotlib.pyplot as plt\n'), ((153, 174), 'numpy.random.randn', 'np.random.randn', (['(1)', '(1)'], {}), '(1, 1)\n', (168, 174), True, 'import numpy as np\n'), ((404, 419), 'numpy.transpose', 'np.transpose', (['v'], {}), '(v)\n', (416, 419), True, 'import numpy as np\n'), ((593, 608), 'numpy.transpose', 'np.transpose', (['v'], {}), '(v)\n', (605, 608), True, 'import numpy as np\n'), ((652, 667), 'numpy.transpose', 'np.transpose', (['P'], {}), '(P)\n', (664, 667), True, 'import numpy as np\n'), ((1337, 1361), 'numpy.random.randint', 'np.random.randint', (['(2)', '(50)'], {}), '(2, 50)\n', (1354, 1361), True, 'import numpy as np\n'), ((1595, 1609), 'numpy.min', 'np.min', (['val[0]'], {}), '(val[0])\n', (1601, 1609), True, 'import numpy as np\n'), ((1611, 1625), 'numpy.max', 'np.max', (['val[0]'], {}), '(val[0])\n', (1617, 1625), True, 'import numpy as np\n'), ((1669, 1683), 'numpy.min', 'np.min', (['val[1]'], {}), '(val[1])\n', (1675, 1683), True, 'import numpy as np\n'), ((1685, 1699), 'numpy.max', 'np.max', (['val[1]'], {}), '(val[1])\n', (1691, 1699), True, 'import numpy as np\n'), ((254, 273), 'numpy.transpose', 'np.transpose', (['P_gen'], {}), '(P_gen)\n', (266, 273), True, 'import numpy as np\n'), ((391, 401), 'numpy.diag', 'np.diag', (['w'], {}), '(w)\n', (398, 401), True, 'import numpy as np\n'), ((580, 590), 'numpy.diag', 'np.diag', (['w'], {}), '(w)\n', (587, 590), True, 'import numpy as np\n'), ((828, 844), 'numpy.transpose', 'np.transpose', (['Px'], {}), '(Px)\n', (840, 844), True, 'import numpy as np\n'), ((1164, 1179), 'numpy.transpose', 'np.transpose', (['x'], {}), '(x)\n', (1176, 1179), True, 'import numpy as np\n')] |
# -*-coding:utf-8-*-
"""
使用 kNN(k - 临近) 算法实现简易的手写数字识别
"""
import numpy as np
import operator
from os import listdir
def img_to_vector(filename):
"""
将图片转换为向量
:param filename: 图片的文件名
:return: 图片相应的向量
"""
vector = np.zeros((1, 1024))
fr = open(filename)
for i in range(32):
lines = fr.readline()
for j in range(32):
vector[0, 32 * i + j] = int(lines[j])
return vector
def classify0(x, data_set, labels, k):
"""
kNN 算法
:param x: 用于分类的向量
:param data_set: 训练样本
:param labels: 标签向量
:param k: 选择最近邻居的数目
:return: 识别的结果
"""
data_set_size = data_set.shape[0]
# 计算距离
diff_matrix = np.tile(x, (data_set_size, 1)) - data_set
sq_diff_matrix = diff_matrix ** 2
sq_distances = sq_diff_matrix.sum(axis=1)
distances = sq_distances ** 0.5
sorted_distances_indicies = distances.argsort()
class_count = {}
# 选取距离最小的 k 个点
for i in range(k):
vote_label = labels[sorted_distances_indicies[i]]
class_count[vote_label] = class_count.get(vote_label, 0) + 1
sorted_class_count = sorted(class_count.items(), key=operator.itemgetter(1), reverse=True)
return sorted_class_count[0][0]
def handwriting_test():
"""
测试手写数字的识别
"""
hw_labels = []
# 加载训练数据
training_file_list = listdir('training_digits')
m = len(training_file_list)
training_matrix = np.zeros((m, 1024))
for i in range(m):
file_name_str = training_file_list[i]
file_str = file_name_str.split('.')[0]
class_num_str = int(file_str.split('_')[0])
# 将图片对应的数字添加到标签列表
hw_labels.append(class_num_str)
# 将图片转为向量
training_matrix[i, :] = img_to_vector('training_digits/%s' % file_name_str)
# 加载测试数据
test_file_list = listdir('test_digits')
error_count = 0.0
m_test = len(test_file_list)
for i in range(m_test):
file_name_str = test_file_list[i]
file_str = file_name_str.split('.')[0]
class_num_str = int(file_str.split('_')[0])
# 将测试用的图片转化为向量
vector_under_test = img_to_vector('test_digits/%s' % file_name_str)
# 获取识别的结果
classifier_result = classify0(vector_under_test, training_matrix, hw_labels, 3)
print("the classifier came back with: %d, the real answer is: %d" % (classifier_result, class_num_str))
# 统计错误数量
if classifier_result != class_num_str:
error_count += 1.0
# 统计信息
print("\n the total number of errors is: %d" % error_count)
print("\n the total error rate is: %f" % (error_count / float(m_test)))
| [
"numpy.tile",
"operator.itemgetter",
"numpy.zeros",
"os.listdir"
] | [((238, 257), 'numpy.zeros', 'np.zeros', (['(1, 1024)'], {}), '((1, 1024))\n', (246, 257), True, 'import numpy as np\n'), ((1331, 1357), 'os.listdir', 'listdir', (['"""training_digits"""'], {}), "('training_digits')\n", (1338, 1357), False, 'from os import listdir\n'), ((1412, 1431), 'numpy.zeros', 'np.zeros', (['(m, 1024)'], {}), '((m, 1024))\n', (1420, 1431), True, 'import numpy as np\n'), ((1803, 1825), 'os.listdir', 'listdir', (['"""test_digits"""'], {}), "('test_digits')\n", (1810, 1825), False, 'from os import listdir\n'), ((682, 712), 'numpy.tile', 'np.tile', (['x', '(data_set_size, 1)'], {}), '(x, (data_set_size, 1))\n', (689, 712), True, 'import numpy as np\n'), ((1143, 1165), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (1162, 1165), False, 'import operator\n')] |
"""
Implementation on Pyomo of Distribution Expansion Planning Model proposed by Muñoz-Delgado et al. (2014).
Reference:
<NAME>., <NAME>., & <NAME>. (2014). Joint expansion planning of distributed generation and distribution networks. IEEE Transactions on Power Systems, 30(5), 2579-2590.
DOI: 10.1109/TPWRS.2014.2364960
@Code Athor: <NAME>
"""
import pandas as pd
import numpy as np
import pyomo.environ as pyo
from pyomo.environ import *
from pyomo.opt import SolverFactory
from Data_24Bus import *
#from Data_138Bus import *
# =============================================================================
# DG Penetration
# =============================================================================
Vare = 0 #Penetration limit for distributed generation.
# =============================================================================
# Model
# =============================================================================
model = pyo.ConcreteModel()
# =============================================================================
# Variables
# =============================================================================
model.C_E_t = pyo.Var(T,
bounds=(0.0,None)
)
model.C_M_t = pyo.Var(T,
bounds=(0.0,None)
)
model.C_R_t = pyo.Var(T,
bounds=(0.0,None)
)
model.C_U_t = pyo.Var(T,
bounds=(0.0,None)
)
model.C_I_t = pyo.Var(T, #Investment
bounds=(0.0,None)
)
model.C_TPV = pyo.Var(bounds=(0.0,None)
)
model.d_U_stb = pyo.Var(Omega_N,
T,
B,
bounds=(0.0,None)
)
def f_l_rule(m):
index = []
for l in L:
for s in Omega_N:
for O in Omega_l_s[l][s-1]:
for K in K_l[l]:
for t in T:
for b in B:
index.append((l,s,O,K,t,b))
return index
model.f_l_rule = pyo.Set(dimen=6, initialize=f_l_rule)
model.f_l_srktb = pyo.Var(model.f_l_rule,
bounds=(0.0,None)
)
model.ftio_l_srktb = pyo.Var(model.f_l_rule,
bounds=(0.0,None)
)
def g_p_rule(m):
index = []
for p in P:
for O in Omega_N:
for kp in K_p[p]:
for t in T:
for b in B:
index.append((p,O,kp,t,b))
return index
model.g_p_rule = pyo.Set(dimen=5, initialize=g_p_rule)
model.g_p_sktb = pyo.Var(model.g_p_rule,
bounds=(0.0,None)
)
def g_tr_rule(m):
index = []
for tr in TR:
for O in Omega_N:
for ktr in K_tr[tr]:
for t in T:
for b in B:
index.append((tr,O,ktr,t,b))
return index
model.g_tr_rule = pyo.Set(dimen=5, initialize=g_tr_rule)
model.g_tr_sktb = pyo.Var(model.g_tr_rule,
bounds=(0.0,None)
)
model.gtio_SS_stb = pyo.Var(Omega_N,
T,
B,
bounds=(0.0,None)
)
model.V_stb = pyo.Var(Omega_N,
T,
B,
bounds=(0.0,None)
)
def x_l_rule(m):
index = []
for l in ["NRF", "NAF"]:
for s in Omega_N:
for O in Omega_l_s[l][s-1]:
for K in K_l[l]:
for t in T:
index.append((l,s,O,K,t))
return index
model.x_l_rule = pyo.Set(dimen=5, initialize=x_l_rule)
model.x_l_srkt = pyo.Var(model.x_l_rule,
within=pyo.Binary
)
def x_NT_rule(m):
index = []
for SS in Omega_SS:
for k in K_tr["NT"]:
for t in T:
index.append((SS,k,t))
return index
model.x_NT_rule = pyo.Set(dimen=3, initialize=x_NT_rule)
model.x_NT_skt = pyo.Var(model.x_NT_rule,
within=pyo.Binary
)
def x_p_rule(m):
index = []
for p in P:
for O in Omega_p[p]:
for K in K_p[p]:
for t in T:
index.append((p,O,K,t))
return index
model.x_p_rule = pyo.Set(dimen=4, initialize=x_p_rule)
model.x_p_skt = pyo.Var(model.x_p_rule,
within=pyo.Binary
)
def x_SS_rule(model):
index = []
for SS in Omega_SS:
for t in T:
index.append((SS,t))
return index
model.x_SS_rule = pyo.Set(dimen=2, initialize=x_SS_rule)
model.x_SS_st = pyo.Var(model.x_SS_rule,
within=pyo.Binary
)
def y_l_rule(m):
index = []
for l in L:
for s in Omega_N:
for O in Omega_l_s[l][s-1]:
for K in K_l[l]:
for t in T:
index.append((l,s,O,K,t))
return index
model.y_l_rule = pyo.Set(dimen=5, initialize=y_l_rule)
model.y_l_srkt = pyo.Var(model.y_l_rule,
within=pyo.Binary
)
def y_p_rule(m):
index = []
for p in P:
for O in Omega_N:
for K in K_p[p]:
for t in T:
index.append((p,O,K,t))
return index
model.y_p_rule = pyo.Set(dimen=4, initialize=y_p_rule)
model.y_p_skt = pyo.Var(model.y_p_rule,
within=pyo.Binary
)
def y_tr_rule(m):
index = []
for tr in TR:
for O in Omega_N:
for K in K_tr[tr]:
for t in T:
index.append((tr,O,K,t))
return index
model.y_tr_rule = pyo.Set(dimen=4, initialize=y_tr_rule)
model.y_tr_skt = pyo.Var(model.y_tr_rule,
within=pyo.Binary
)
def delta_l_rule(m):
index = []
for l in L:
for s in Omega_N:
for O in Omega_l_s[l][s-1]:
for K in K_l[l]:
for t in T:
for b in B:
for V in range(1,n__V+1):
index.append((l,s,O,K,t,b,V))
return index
model.delta_l_rule = pyo.Set(dimen=7, initialize=delta_l_rule)
model.delta_l_srktbv = pyo.Var(model.delta_l_rule,
bounds=(0.0,None)
)
def delta_tr_rule(m):
index = []
for tr in TR:
for O in Omega_SS:
for K in K_tr[tr]:
for t in T:
for b in B:
for V in range(1,n__V+1):
index.append((tr,O,K,t,b,V))
return index
model.delta_tr_rule = pyo.Set(dimen=6, initialize=delta_tr_rule)
model.delta_tr_sktbv = pyo.Var(model.delta_tr_rule,
bounds=(0.0,None)
)
# =============================================================================
# Objective Function
# =============================================================================
model.Obj = pyo.Objective(expr=model.C_TPV, sense=pyo.minimize)
# =============================================================================
# Costs Constraints
# =============================================================================
def C_TPV_rule(m):
return model.C_TPV == (sum(model.C_I_t[t]*(((1+i)**(-t))/i)
for t in T)
+ sum((model.C_M_t[t] + model.C_E_t[t] + model.C_R_t[t] + model.C_U_t[t])*((1+i)**(-t))
for t in T)
+ ((model.C_M_t[T[-1]] + model.C_E_t[T[-1]] + model.C_R_t[T[-1]] + model.C_U_t[T[-1]])*((1+i)**(-T[-1])/i))
)
model.eq1 = pyo.Constraint(rule=C_TPV_rule)
def eq2_rule(model,t):
return model.C_I_t[t] == (sum(RR_l[l]*sum(sum(C_Il_k[l][k-1]*l__sr[s-1,r-1]*model.x_l_srkt[l,s,r,k,t]
for s,r in Upsilon_l[l])
for k in K_l[l])
for l in ["NRF", "NAF"])
+ RR_SS*sum(C_ISS_s[s]*model.x_SS_st[s,t]
for s in Omega_SS)
+ RR_NT*sum(sum(C_INT_k[k-1]*model.x_NT_skt[s,k,t]
for s in Omega_SS)
for k in K_tr["NT"])
+ sum(RR_p[p]*sum(sum(C_Ip_k[p][k-1]*pf*Gup_p_k[p][k-1]*model.x_p_skt[p,s,k,t]
for s in Omega_p[p])
for k in K_p[p])
for p in P)
)
model.eq2 = pyo.Constraint(T, rule=eq2_rule)
def eq3_rule(model,t):
return model.C_M_t[t] == (sum(sum(sum(C_Ml_k[l][k-1]*(model.y_l_srkt[l,s,r,k,t] + model.y_l_srkt[l,r,s,k,t])
for s,r in Upsilon_l[l])
for k in K_l[l])
for l in L)
+ sum(sum(sum(C_Mtr_k[tr][k-1]*model.y_tr_skt[tr,s,k,t]
for s in Omega_SS)
for k in K_tr[tr])
for tr in TR)
+ sum(sum(sum(C_Mp_k[p][k-1]*model.y_p_skt[p,s,k,t]
for s in Omega_p[p])
for k in K_p[p])
for p in P)
)
model.eq3 = pyo.Constraint(T, rule=eq3_rule)
def eq4_rule(model,t):
return model.C_E_t[t] == (sum(Delta__b[b-1]*pf*(sum(sum(sum(C_SS_b[b-1]*model.g_tr_sktb[tr,s,k,t,b]
for s in Omega_SS)
for k in K_tr[tr])
for tr in TR)
+ sum(sum(sum(C_Ep_k[p][k-1]*model.g_p_sktb[p,s,k,t,b]
for s in Omega_p[p])
for k in K_p[p])
for p in P)
)
for b in B)
)
model.eq4 = pyo.Constraint(T, rule=eq4_rule)
def eq5_rule(model,t):
return model.C_R_t[t] == (sum(Delta__b[b-1]*C_SS_b[b-1]*pf*(sum(sum(sum(sum(
M_tr_kV[tr][k-1][y-1]*model.delta_tr_sktbv[tr,s,k,t,b,y]
for y in range(1,n__V+1))
for s in Omega_SS)
for k in K_tr[tr])
for tr in TR)
+ sum(sum(sum(sum(M_l_kV[l][k-1][z-1]*l__sr[s-1,r-1]*(model.delta_l_srktbv[l,s,r,k,t,b,z] + model.delta_l_srktbv[l,r,s,k,t,b,z])
for z in range(1,n__V+1))
for s, r in Upsilon_l[l])
for k in K_l[l])
for l in L)
)
for b in B)
)
model.eq5 = pyo.Constraint(T, rule=eq5_rule)
model.eq5_aux1 = pyo.ConstraintList()
for tr in TR:
for s in Omega_SS:
for k in K_tr[tr]:
for t in T:
for b in B:
model.eq5_aux1.add(model.g_tr_sktb[tr,s,k,t,b] == sum(model.delta_tr_sktbv[tr,s,k,t,b,V] for V in range(1,n__V+1)))
model.eq5_aux2 = pyo.ConstraintList()
for tr in TR:
for s in Omega_SS:
for k in K_tr[tr]:
for t in T:
for b in B:
for V in range(1,n__V+1):
model.eq5_aux2.add(model.delta_tr_sktbv[tr,s,k,t,b,V] <= A_tr_kV[tr][k-1][V-1])
model.eq5_aux3 = pyo.ConstraintList()
for l in L:
for r in Omega_N:
for s in Omega_l_s[l][r-1]:
for k in K_l[l]:
for t in T:
for b in B:
model.eq5_aux3.add(model.f_l_srktb[l,s,r,k,t,b] == sum(model.delta_l_srktbv[l,s,r,k,t,b,v] for v in range(1,n__V+1)))
model.eq5_aux4 = pyo.ConstraintList()
for l in L:
for r in Omega_N:
for s in Omega_l_s[l][r-1]:
for k in K_l[l]:
for t in T:
for b in B:
for v in range(1,n__V+1):
model.eq5_aux4.add(model.delta_l_srktbv[l,s,r,k,t,b,v] <= A_l_kV[l][k-1][v-1])
def eq6_rule(model,t):
return model.C_U_t[t] == (sum(sum(Delta__b[b-1]*C_U*pf*model.d_U_stb[s,t,b]
for s in Omega_LN_t[t])
for b in B)
)
model.eq6 = pyo.Constraint(T, rule=eq6_rule)
# =============================================================================
# Kirchhoff's Laws and Operational Limits
# =============================================================================
def eq7_rule(model, s, t, b):
return pyo.inequality(V_ ,model.V_stb[s,t,b], Vup)
model.eq7 = pyo.Constraint(Omega_N, T, B, rule=eq7_rule)
def eq7_aux_rule(model, s, t, b):
return model.V_stb[s,t,b] == V_SS
model.eq7_aux = pyo.Constraint(Omega_SS, T, B, rule=eq7_aux_rule)
model.eq8 = pyo.ConstraintList()
for l in L:
for r in Omega_N:
for s in Omega_l_s[l][r-1]:
for k in K_l[l]:
for t in T:
for b in B:
model.eq8.add(model.f_l_srktb[l,s,r,k,t,b] <= model.y_l_srkt[l,s,r,k,t]*Fup_l_k[l][k-1])
model.eq9 = pyo.ConstraintList()
for tr in TR:
for s in Omega_N:
for k in K_tr[tr]:
for t in T:
for b in B:
model.eq9.add(model.g_tr_sktb[tr,s,k,t,b] <= model.y_tr_skt[tr,s,k,t]*Gup_tr_k[tr][k-1])
model.eq10 = pyo.ConstraintList()
for t in T:
for s in Omega_N:
for b in B:
model.eq10.add(model.d_U_stb[s,t,b] <= Mi__b[b-1]*D__st[s-1,t-1])
model.eq11 = pyo.ConstraintList()
for s in Omega_N:
for k in K_p["C"]:
for t in T:
for b in B:
model.eq11.add(model.g_p_sktb["C",s,k,t,b] <= model.y_p_skt["C",s,k,t]*Gup_p_k["C"][k-1])
model.eq12 = pyo.ConstraintList()
for s in Omega_N:
for k in K_p["W"]:
for t in T:
for b in B:
model.eq12.add(model.g_p_sktb["W",s,k,t,b] <= model.y_p_skt["W",s,k,t]*min(Gup_p_k["W"][k-1],Gmax_W_sktb[s-1,k-1,t-1,b-1]))
model.eq13 = pyo.ConstraintList()
for t in T:
for b in B:
model.eq13.add(sum(sum(sum(model.g_p_sktb[p,s,k,t,b]
for s in Omega_p[p])
for k in K_p[p])
for p in P)
<= Vare*sum(Mi__b[b-1]*D__st[s-1,t-1]
for s in Omega_LN_t[t])
)
model.eq14 = pyo.ConstraintList()
for t in T:
for b in B:
for s in Omega_N:
model.eq14.add(sum(sum(sum(model.f_l_srktb[l,s,r,k,t,b] - model.f_l_srktb[l,r,s,k,t,b]
for r in Omega_l_s[l][s-1])
for k in K_l[l])
for l in L) == (sum(sum(model.g_tr_sktb[tr,s,k,t,b]
for k in K_tr[tr])
for tr in TR)
+ sum(sum(model.g_p_sktb[p,s,k,t,b]
for k in K_p[p])
for p in P)
- Mi__b[b-1]*D__st[s-1,t-1]
+ model.d_U_stb[s,t,b]
)
)
model.eq14_aux1 = pyo.ConstraintList() #It allows DG only on candidates nodes
for t in T:
for p in P:
for k in K_p[p]:
for s in Omega_N:
if s not in Omega_p[p]:
model.eq14_aux1.add(model.y_p_skt[p,s,k,t] == 0)
model.eq14_aux2 = pyo.ConstraintList() #It allows transf. only on candidates nodes
for t in T:
for tr in TR:
for k in K_tr[tr]:
for s in Omega_N:
if s not in Omega_SS:
model.eq14_aux2.add(model.y_tr_skt[tr,s,k,t] == 0)
model.eq14_aux3 = pyo.ConstraintList() # It avoids "ET" transf. on new substations
for t in T:
for b in B:
for s in Omega_SSN:
for k in K_tr['ET']:
model.eq14_aux3.add(model.y_tr_skt['ET',s,k,t] == 0)
model.eq14_aux4 = pyo.ConstraintList() # It allows one type of transf. on existing substation nodes
for t in T:
for s in Omega_SSE:
model.eq14_aux4.add(sum(sum(model.y_tr_skt[tr,s,k,t]
for k in K_tr[tr])
for tr in TR) <= 1
)
model.eq16_1 = pyo.ConstraintList()
for t in T:
for b in B:
for l in L:
for r in Omega_N:
for s in Omega_l_s[l][r-1]:
for k in K_l[l]:
model.eq16_1.add((-Z_l_k[l][k-1]*l__sr[s-1,r-1]*model.f_l_srktb[l,s,r,k,t,b]/Vbase + (model.V_stb[s,t,b] - model.V_stb[r,t,b]))
<= H*(1-model.y_l_srkt[l,s,r,k,t]))
model.eq16_2 = pyo.ConstraintList()
for t in T:
for b in B:
for l in L:
for r in Omega_N:
for s in Omega_l_s[l][r-1]:
for k in K_l[l]:
model.eq16_2.add((Z_l_k[l][k-1]*l__sr[s-1,r-1]*model.f_l_srktb[l,s,r,k,t,b]/Vbase - (model.V_stb[s,t,b] - model.V_stb[r,t,b]))
<= H*(1-model.y_l_srkt[l,s,r,k,t]))
# =============================================================================
# Investiment Constraints
# =============================================================================
model.eq17 = pyo.ConstraintList()
for l in ["NRF", "NAF"]:
for s,r in Upsilon_l[l]:
model.eq17.add(sum(sum(model.x_l_srkt[l,s,r,k,t]
for k in K_l[l])
for t in T) <= 1
)
model.eq18 = pyo.ConstraintList()
for s in Omega_SS:
model.eq18.add(sum(model.x_SS_st[s,t]
for t in T) <= 1
)
model.eq19 = pyo.ConstraintList()
for s in Omega_SS:
model.eq19.add(sum(sum(model.x_NT_skt[s,k,t]
for k in K_tr["NT"])
for t in T) <= 1
)
model.eq20 = pyo.ConstraintList()
for p in P:
for s in Omega_p[p]:
model.eq20.add(sum(sum(model.x_p_skt[p,s,k,t]
for k in K_p[p])
for t in T) <= 1
)
model.eq21 = pyo.ConstraintList()
for s in Omega_SS:
for k in K_tr["NT"]:
for t in T:
model.eq21.add(model.x_NT_skt[s,k,t]
<=
sum(model.x_SS_st[s,y]
for y in range(1,t+1))
)
#Eq. updated #Ref: DOI: 10.1109/TSG.2016.2560339
model.eq22 = pyo.ConstraintList()
for t in T:
for l in ["EFF"]:
for k in K_l[l]:
for s,r in Upsilon_l[l]:
model.eq22.add(model.y_l_srkt[l,s,r,k,t] + model.y_l_srkt[l,r,s,k,t]
== 1
)
#Eq. updated #Ref: DOI: 10.1109/TSG.2016.2560339
model.eq23 = pyo.ConstraintList()
for t in T:
for l in ["NRF", "NAF"]:
for k in K_l[l]:
for s,r in Upsilon_l[l]:
model.eq23.add(model.y_l_srkt[l,s,r,k,t] + model.y_l_srkt[l,r,s,k,t]
== sum(model.x_l_srkt[l,s,r,k,y]
for y in range(1,t+1))
)
#Eq. updated #Ref: DOI: 10.1109/TSG.2016.2560339
model.eq24 = pyo.ConstraintList()
for t in T:
for l in ["ERF"]:
for k in K_l[l]:
for s,r in Upsilon_l[l]:
model.eq24.add(model.y_l_srkt[l,s,r,k,t] + model.y_l_srkt[l,r,s,k,t]
== 1 - sum(sum(model.x_l_srkt["NRF",s,r,z,y]
for z in K_l["NRF"])
for y in range(1,t+1))
)
model.eq25 = pyo.ConstraintList()
for t in T:
for s in Omega_SS:
for k in K_tr["NT"]:
model.eq25.add(model.y_tr_skt["NT", s, k, t]
<= sum(model.x_NT_skt[s,k,y]
for y in range(1,t+1))
)
model.eq26 = pyo.ConstraintList()
for t in T:
for p in P:
for s in Omega_p[p]:
for k in K_p[p]:
model.eq26.add(model.y_p_skt[p,s,k,t] <=
sum(model.x_p_skt[p,s,k,y]
for y in range(1,t+1))
)
def eq27_rule(model,t):
return ((sum(sum(sum(C_Il_k[l][k-1]*l__sr[s-1,r-1]*model.x_l_srkt[l,s,r,k,t]
for s,r in Upsilon_l[l])
for k in K_l[l])
for l in ["NRF", "NAF"])
+ sum(C_ISS_s[s]*model.x_SS_st[s,t]
for s in Omega_SS)
+ sum(sum(C_INT_k[k-1]*model.x_SS_st[s,t]
for s in Omega_SS)
for k in K_tr["NT"])
+ sum(sum(sum(C_Ip_k[p][k-1]*pf*Gup_p_k[p][k-1]*model.x_p_skt[p,s,k,t]
for s in Omega_p[p])
for k in K_p[p])
for p in P)
<= IB__t[t-1])
)
model.eq27 = pyo.Constraint(T, rule=eq27_rule)
# =============================================================================
# Radiality Constraints
# =============================================================================
model.eq28 = pyo.ConstraintList()
for t in T:
for r in Omega_LN_t[t]:
model.eq28.add(sum(sum(sum(model.y_l_srkt[l,s,r,k,t]
for k in K_l[l])
for s in Omega_l_s[l][r-1])
for l in L) == 1
)
model.eq29 = pyo.ConstraintList()
for t in T:
for r in Omega_N:
if r not in Omega_LN_t[t]:
model.eq29.add(sum(sum(sum(model.y_l_srkt[l,s,r,k,t]
for k in K_l[l])
for s in Omega_l_s[l][r-1])
for l in L) <= 1
)
model.eq30 = pyo.ConstraintList()
for t in T:
for b in B:
for s in Omega_N:
model.eq30.add(sum(sum(sum(model.ftio_l_srktb[l,s,r,k,t,b] - model.ftio_l_srktb[l,r,s,k,t,b]
for r in Omega_l_s[l][s-1])
for k in K_l[l])
for l in L) == model.gtio_SS_stb[s,t,b] - Dtio_stb[s-1,t-1,b-1]
)
model.eq31 = pyo.ConstraintList()
for t in T:
for b in B:
for l in ["EFF"]:
for r in Omega_N:
for s in Omega_l_s[l][r-1]:
for k in K_l[l]:
model.eq31.add(model.ftio_l_srktb[l,s,r,k,t,b] <= n__DG)
model.eq32 = pyo.ConstraintList()
for t in T:
for b in B:
for l in ["ERF"]:
for s,r in Upsilon_l[l]:
for k in K_l[l]:
model.eq32.add(model.ftio_l_srktb[l,s,r,k,t,b] <= n__DG*(
1 - sum(sum(model.x_l_srkt["NRF",s,r,z,y]
for z in K_l["NRF"])
for y in range(1,t+1))
)
)
model.eq33 = pyo.ConstraintList()
for t in T:
for b in B:
for l in ["ERF"]:
for s,r in Upsilon_l[l]:
for k in K_l[l]:
model.eq33.add(model.ftio_l_srktb[l,r,s,k,t,b] <= n__DG*(
1 - sum(sum(model.x_l_srkt["NRF",s,r,z,y]
for z in K_l["NRF"])
for y in range(1,t+1))
)
)
model.eq34 = pyo.ConstraintList()
for t in T:
for b in B:
for l in ["NRF", "NAF"]:
for k in K_l[l]:
for s,r in Upsilon_l[l]:
model.eq34.add(model.ftio_l_srktb[l,s,r,k,t,b] <= n__DG*(
sum(model.x_l_srkt[l,s,r,k,y]
for y in range(1,t+1))
)
)
model.eq35 = pyo.ConstraintList()
for t in T:
for b in B:
for l in ["NRF", "NAF"]:
for k in K_l[l]:
for s,r in Upsilon_l[l]:
model.eq35.add(model.ftio_l_srktb[l,r,s,k,t,b] <= n__DG*(
sum(model.x_l_srkt[l,s,r,k,y]
for y in range(1,t+1))
)
)
model.eq36 = pyo.ConstraintList()
for t in T:
for b in B:
for s in Omega_SS:
model.eq36.add(model.gtio_SS_stb[s,t,b] <= n__DG)
model.eq36_aux = pyo.ConstraintList()
for t in T:
for b in B:
for s in Omega_N:
if s not in Omega_SS:
model.eq36_aux.add(model.gtio_SS_stb[s,t,b] == 0)
# =============================================================================
# Solver
# =============================================================================
opt = SolverFactory('cplex')
opt.options['threads'] = 16
opt.options['mipgap'] = 0.5/100
opt.solve(model, warmstart=False, tee=True)
# =============================================================================
# Results: Reports
# =============================================================================
#Results -
Yearly_Costs = []
for i in range(1,np.shape(T)[0]+1):
year_aux = {
'Investment':np.round(pyo.value(model.C_I_t[i])/1e6,4),
'Maintenance':np.round(pyo.value(model.C_M_t[i])/1e6,4),
'Production':np.round(pyo.value(model.C_E_t[i])/1e6,4),
'Losses':np.round(pyo.value(model.C_R_t[i])/1e6,4),
'Unserved_energy':np.round(pyo.value(model.C_U_t[i])/1e6,4)
}
Yearly_Costs.append(year_aux)
Yearly_Costs = pd.DataFrame(Yearly_Costs)
#Binary utilization variables for feeders
Variable_Util_l = []
for l in L: #Type of line
for s in Omega_N: #Buses from
for r in Omega_l_s[l][s-1]: #Buses to
for k in K_l[l]: #Line option
for t in T: #Time stage
if pyo.value(model.y_l_srkt[l,s,r,k,t]) == 1:
var_aux ={
'T_Line': l,
'From': s,
'To': r,
'Option': k,
'Stage': t,
'Decision': pyo.value(model.y_l_srkt[l,s,r,k,t])
}
Variable_Util_l.append(var_aux)
Variable_Util_l = pd.DataFrame(Variable_Util_l)
#Binary utilization variables for transformers
Variable_Util_tr = []
for tr in TR:
for s in Omega_N:
for k in K_tr[tr]:
for t in T:
if pyo.value(model.y_tr_skt[tr,s,k,t]) == 1:
var_aux ={
"Trans_T":tr,
"Bus":s,
"Option":k,
"Stage":t,
"Decision":pyo.value(model.y_tr_skt[tr,s,k,t])
}
Variable_Util_tr.append(var_aux)
Variable_Util_tr = pd.DataFrame(Variable_Util_tr)
#Binary utilization variables for DGs
Variable_Util_dg = []
for p in P:
for O in Omega_N:
for K in K_p[p]:
for t in T:
if pyo.value(model.y_p_skt[p,O,K,t]) == 1:
var_aux ={
"DG_P":p,
"Bus":O,
"Option":K,
"Stage":t,
"Decision":pyo.value(model.y_p_skt[p,O,K,t])
}
Variable_Util_dg.append(var_aux)
Variable_Util_dg = pd.DataFrame(Variable_Util_dg)
#Current injections corresponding to transformers
Current_inj_TR = []
for tr in TR:
for s in Omega_N:
for k in K_tr[tr]:
for t in T:
for b in B:
if pyo.value(model.g_tr_sktb[tr,s,k,t,b]) > 0:
aux = {
"TR_Type": tr,
"Bus": s,
"Option": k,
"Stage": t,
"Load_l": b,
"Injection": pyo.value(model.g_tr_sktb[tr,s,k,t,b])
}
Current_inj_TR.append(aux)
Current_inj_TR = pd.DataFrame(Current_inj_TR)
#Current injections corresponding to DG
Current_inj_DG = []
for p in P:
for O in Omega_N:
for kp in K_p[p]:
for t in T:
for b in B:
if pyo.value(model.g_p_sktb[p,O,kp,t,b]) > 0:
aux = {
"DG_Type": p,
"Bus": O,
"Option":kp,
"Stage":t,
"Load_l":b,
"Injection":pyo.value(model.g_p_sktb[p,O,kp,t,b])
}
Current_inj_DG.append(aux)
Current_inj_DG = pd.DataFrame(Current_inj_DG)
#Actual current flows through feeders
Actual_C_Flow_l = []
for l in L: #Type of line
for s in Omega_N: #Buses from
for r in Omega_l_s[l][s-1]: #Buses to
for k in K_l[l]: #Line option
for t in T: #Time stage
for b in B: #Load level
if pyo.value(model.f_l_srktb[l,s,r,k,t,b]) > 0.1:
actual_aux = {
'T_Line': l,
'From': s,
'To': r,
'Option': k,
'Stage': t,
'L_level': b,
'Flow': pyo.value(model.f_l_srktb[l,s,r,k,t,b])
}
Actual_C_Flow_l.append(actual_aux)
Actual_C_Flow_l = pd.DataFrame(Actual_C_Flow_l)
| [
"pandas.DataFrame",
"pyomo.environ.Constraint",
"pyomo.environ.Var",
"pyomo.environ.value",
"pyomo.environ.Objective",
"numpy.shape",
"pyomo.opt.SolverFactory",
"pyomo.environ.ConcreteModel",
"pyomo.environ.Set",
"pyomo.environ.ConstraintList",
"pyomo.environ.inequality"
] | [((949, 968), 'pyomo.environ.ConcreteModel', 'pyo.ConcreteModel', ([], {}), '()\n', (966, 968), True, 'import pyomo.environ as pyo\n'), ((1157, 1187), 'pyomo.environ.Var', 'pyo.Var', (['T'], {'bounds': '(0.0, None)'}), '(T, bounds=(0.0, None))\n', (1164, 1187), True, 'import pyomo.environ as pyo\n'), ((1248, 1278), 'pyomo.environ.Var', 'pyo.Var', (['T'], {'bounds': '(0.0, None)'}), '(T, bounds=(0.0, None))\n', (1255, 1278), True, 'import pyomo.environ as pyo\n'), ((1339, 1369), 'pyomo.environ.Var', 'pyo.Var', (['T'], {'bounds': '(0.0, None)'}), '(T, bounds=(0.0, None))\n', (1346, 1369), True, 'import pyomo.environ as pyo\n'), ((1430, 1460), 'pyomo.environ.Var', 'pyo.Var', (['T'], {'bounds': '(0.0, None)'}), '(T, bounds=(0.0, None))\n', (1437, 1460), True, 'import pyomo.environ as pyo\n'), ((1521, 1551), 'pyomo.environ.Var', 'pyo.Var', (['T'], {'bounds': '(0.0, None)'}), '(T, bounds=(0.0, None))\n', (1528, 1551), True, 'import pyomo.environ as pyo\n'), ((1623, 1650), 'pyomo.environ.Var', 'pyo.Var', ([], {'bounds': '(0.0, None)'}), '(bounds=(0.0, None))\n', (1630, 1650), True, 'import pyomo.environ as pyo\n'), ((1690, 1732), 'pyomo.environ.Var', 'pyo.Var', (['Omega_N', 'T', 'B'], {'bounds': '(0.0, None)'}), '(Omega_N, T, B, bounds=(0.0, None))\n', (1697, 1732), True, 'import pyomo.environ as pyo\n'), ((2149, 2186), 'pyomo.environ.Set', 'pyo.Set', ([], {'dimen': '(6)', 'initialize': 'f_l_rule'}), '(dimen=6, initialize=f_l_rule)\n', (2156, 2186), True, 'import pyomo.environ as pyo\n'), ((2205, 2248), 'pyomo.environ.Var', 'pyo.Var', (['model.f_l_rule'], {'bounds': '(0.0, None)'}), '(model.f_l_rule, bounds=(0.0, None))\n', (2212, 2248), True, 'import pyomo.environ as pyo\n'), ((2300, 2343), 'pyomo.environ.Var', 'pyo.Var', (['model.f_l_rule'], {'bounds': '(0.0, None)'}), '(model.f_l_rule, bounds=(0.0, None))\n', (2307, 2343), True, 'import pyomo.environ as pyo\n'), ((2625, 2662), 'pyomo.environ.Set', 'pyo.Set', ([], {'dimen': '(5)', 'initialize': 'g_p_rule'}), '(dimen=5, initialize=g_p_rule)\n', (2632, 2662), True, 'import pyomo.environ as pyo\n'), ((2680, 2723), 'pyomo.environ.Var', 'pyo.Var', (['model.g_p_rule'], {'bounds': '(0.0, None)'}), '(model.g_p_rule, bounds=(0.0, None))\n', (2687, 2723), True, 'import pyomo.environ as pyo\n'), ((3033, 3071), 'pyomo.environ.Set', 'pyo.Set', ([], {'dimen': '(5)', 'initialize': 'g_tr_rule'}), '(dimen=5, initialize=g_tr_rule)\n', (3040, 3071), True, 'import pyomo.environ as pyo\n'), ((3090, 3134), 'pyomo.environ.Var', 'pyo.Var', (['model.g_tr_rule'], {'bounds': '(0.0, None)'}), '(model.g_tr_rule, bounds=(0.0, None))\n', (3097, 3134), True, 'import pyomo.environ as pyo\n'), ((3209, 3251), 'pyomo.environ.Var', 'pyo.Var', (['Omega_N', 'T', 'B'], {'bounds': '(0.0, None)'}), '(Omega_N, T, B, bounds=(0.0, None))\n', (3216, 3251), True, 'import pyomo.environ as pyo\n'), ((3357, 3399), 'pyomo.environ.Var', 'pyo.Var', (['Omega_N', 'T', 'B'], {'bounds': '(0.0, None)'}), '(Omega_N, T, B, bounds=(0.0, None))\n', (3364, 3399), True, 'import pyomo.environ as pyo\n'), ((3768, 3805), 'pyomo.environ.Set', 'pyo.Set', ([], {'dimen': '(5)', 'initialize': 'x_l_rule'}), '(dimen=5, initialize=x_l_rule)\n', (3775, 3805), True, 'import pyomo.environ as pyo\n'), ((3823, 3865), 'pyomo.environ.Var', 'pyo.Var', (['model.x_l_rule'], {'within': 'pyo.Binary'}), '(model.x_l_rule, within=pyo.Binary)\n', (3830, 3865), True, 'import pyomo.environ as pyo\n'), ((4098, 4136), 'pyomo.environ.Set', 'pyo.Set', ([], {'dimen': '(3)', 'initialize': 'x_NT_rule'}), '(dimen=3, initialize=x_NT_rule)\n', (4105, 4136), True, 'import pyomo.environ as pyo\n'), ((4154, 4197), 'pyomo.environ.Var', 'pyo.Var', (['model.x_NT_rule'], {'within': 'pyo.Binary'}), '(model.x_NT_rule, within=pyo.Binary)\n', (4161, 4197), True, 'import pyomo.environ as pyo\n'), ((4444, 4481), 'pyomo.environ.Set', 'pyo.Set', ([], {'dimen': '(4)', 'initialize': 'x_p_rule'}), '(dimen=4, initialize=x_p_rule)\n', (4451, 4481), True, 'import pyomo.environ as pyo\n'), ((4498, 4540), 'pyomo.environ.Var', 'pyo.Var', (['model.x_p_rule'], {'within': 'pyo.Binary'}), '(model.x_p_rule, within=pyo.Binary)\n', (4505, 4540), True, 'import pyomo.environ as pyo\n'), ((4720, 4758), 'pyomo.environ.Set', 'pyo.Set', ([], {'dimen': '(2)', 'initialize': 'x_SS_rule'}), '(dimen=2, initialize=x_SS_rule)\n', (4727, 4758), True, 'import pyomo.environ as pyo\n'), ((4775, 4818), 'pyomo.environ.Var', 'pyo.Var', (['model.x_SS_rule'], {'within': 'pyo.Binary'}), '(model.x_SS_rule, within=pyo.Binary)\n', (4782, 4818), True, 'import pyomo.environ as pyo\n'), ((5134, 5171), 'pyomo.environ.Set', 'pyo.Set', ([], {'dimen': '(5)', 'initialize': 'y_l_rule'}), '(dimen=5, initialize=y_l_rule)\n', (5141, 5171), True, 'import pyomo.environ as pyo\n'), ((5189, 5231), 'pyomo.environ.Var', 'pyo.Var', (['model.y_l_rule'], {'within': 'pyo.Binary'}), '(model.y_l_rule, within=pyo.Binary)\n', (5196, 5231), True, 'import pyomo.environ as pyo\n'), ((5474, 5511), 'pyomo.environ.Set', 'pyo.Set', ([], {'dimen': '(4)', 'initialize': 'y_p_rule'}), '(dimen=4, initialize=y_p_rule)\n', (5481, 5511), True, 'import pyomo.environ as pyo\n'), ((5528, 5570), 'pyomo.environ.Var', 'pyo.Var', (['model.y_p_rule'], {'within': 'pyo.Binary'}), '(model.y_p_rule, within=pyo.Binary)\n', (5535, 5570), True, 'import pyomo.environ as pyo\n'), ((5819, 5857), 'pyomo.environ.Set', 'pyo.Set', ([], {'dimen': '(4)', 'initialize': 'y_tr_rule'}), '(dimen=4, initialize=y_tr_rule)\n', (5826, 5857), True, 'import pyomo.environ as pyo\n'), ((5875, 5918), 'pyomo.environ.Var', 'pyo.Var', (['model.y_tr_rule'], {'within': 'pyo.Binary'}), '(model.y_tr_rule, within=pyo.Binary)\n', (5882, 5918), True, 'import pyomo.environ as pyo\n'), ((6325, 6366), 'pyomo.environ.Set', 'pyo.Set', ([], {'dimen': '(7)', 'initialize': 'delta_l_rule'}), '(dimen=7, initialize=delta_l_rule)\n', (6332, 6366), True, 'import pyomo.environ as pyo\n'), ((6390, 6437), 'pyomo.environ.Var', 'pyo.Var', (['model.delta_l_rule'], {'bounds': '(0.0, None)'}), '(model.delta_l_rule, bounds=(0.0, None))\n', (6397, 6437), True, 'import pyomo.environ as pyo\n'), ((6824, 6866), 'pyomo.environ.Set', 'pyo.Set', ([], {'dimen': '(6)', 'initialize': 'delta_tr_rule'}), '(dimen=6, initialize=delta_tr_rule)\n', (6831, 6866), True, 'import pyomo.environ as pyo\n'), ((6890, 6938), 'pyomo.environ.Var', 'pyo.Var', (['model.delta_tr_rule'], {'bounds': '(0.0, None)'}), '(model.delta_tr_rule, bounds=(0.0, None))\n', (6897, 6938), True, 'import pyomo.environ as pyo\n'), ((7169, 7220), 'pyomo.environ.Objective', 'pyo.Objective', ([], {'expr': 'model.C_TPV', 'sense': 'pyo.minimize'}), '(expr=model.C_TPV, sense=pyo.minimize)\n', (7182, 7220), True, 'import pyomo.environ as pyo\n'), ((7865, 7896), 'pyomo.environ.Constraint', 'pyo.Constraint', ([], {'rule': 'C_TPV_rule'}), '(rule=C_TPV_rule)\n', (7879, 7896), True, 'import pyomo.environ as pyo\n'), ((8956, 8988), 'pyomo.environ.Constraint', 'pyo.Constraint', (['T'], {'rule': 'eq2_rule'}), '(T, rule=eq2_rule)\n', (8970, 8988), True, 'import pyomo.environ as pyo\n'), ((9615, 9647), 'pyomo.environ.Constraint', 'pyo.Constraint', (['T'], {'rule': 'eq3_rule'}), '(T, rule=eq3_rule)\n', (9629, 9647), True, 'import pyomo.environ as pyo\n'), ((10230, 10262), 'pyomo.environ.Constraint', 'pyo.Constraint', (['T'], {'rule': 'eq4_rule'}), '(T, rule=eq4_rule)\n', (10244, 10262), True, 'import pyomo.environ as pyo\n'), ((11038, 11070), 'pyomo.environ.Constraint', 'pyo.Constraint', (['T'], {'rule': 'eq5_rule'}), '(T, rule=eq5_rule)\n', (11052, 11070), True, 'import pyomo.environ as pyo\n'), ((11089, 11109), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (11107, 11109), True, 'import pyomo.environ as pyo\n'), ((11380, 11400), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (11398, 11400), True, 'import pyomo.environ as pyo\n'), ((11685, 11705), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (11703, 11705), True, 'import pyomo.environ as pyo\n'), ((12025, 12045), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (12043, 12045), True, 'import pyomo.environ as pyo\n'), ((12552, 12584), 'pyomo.environ.Constraint', 'pyo.Constraint', (['T'], {'rule': 'eq6_rule'}), '(T, rule=eq6_rule)\n', (12566, 12584), True, 'import pyomo.environ as pyo\n'), ((12886, 12930), 'pyomo.environ.Constraint', 'pyo.Constraint', (['Omega_N', 'T', 'B'], {'rule': 'eq7_rule'}), '(Omega_N, T, B, rule=eq7_rule)\n', (12900, 12930), True, 'import pyomo.environ as pyo\n'), ((13020, 13069), 'pyomo.environ.Constraint', 'pyo.Constraint', (['Omega_SS', 'T', 'B'], {'rule': 'eq7_aux_rule'}), '(Omega_SS, T, B, rule=eq7_aux_rule)\n', (13034, 13069), True, 'import pyomo.environ as pyo\n'), ((13083, 13103), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (13101, 13103), True, 'import pyomo.environ as pyo\n'), ((13413, 13433), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (13431, 13433), True, 'import pyomo.environ as pyo\n'), ((13672, 13692), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (13690, 13692), True, 'import pyomo.environ as pyo\n'), ((13839, 13859), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (13857, 13859), True, 'import pyomo.environ as pyo\n'), ((14065, 14085), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (14083, 14085), True, 'import pyomo.environ as pyo\n'), ((14325, 14345), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (14343, 14345), True, 'import pyomo.environ as pyo\n'), ((14638, 14658), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (14656, 14658), True, 'import pyomo.environ as pyo\n'), ((15426, 15446), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (15444, 15446), True, 'import pyomo.environ as pyo\n'), ((15717, 15737), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (15735, 15737), True, 'import pyomo.environ as pyo\n'), ((15997, 16017), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (16015, 16017), True, 'import pyomo.environ as pyo\n'), ((16239, 16259), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (16257, 16259), True, 'import pyomo.environ as pyo\n'), ((16522, 16542), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (16540, 16542), True, 'import pyomo.environ as pyo\n'), ((16947, 16967), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (16965, 16967), True, 'import pyomo.environ as pyo\n'), ((17584, 17604), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (17602, 17604), True, 'import pyomo.environ as pyo\n'), ((17826, 17846), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (17844, 17846), True, 'import pyomo.environ as pyo\n'), ((17968, 17988), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (17986, 17988), True, 'import pyomo.environ as pyo\n'), ((18147, 18167), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (18165, 18167), True, 'import pyomo.environ as pyo\n'), ((18345, 18365), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (18363, 18365), True, 'import pyomo.environ as pyo\n'), ((18699, 18719), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (18717, 18719), True, 'import pyomo.environ as pyo\n'), ((19018, 19038), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (19036, 19038), True, 'import pyomo.environ as pyo\n'), ((19431, 19451), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (19449, 19451), True, 'import pyomo.environ as pyo\n'), ((19860, 19880), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (19878, 19880), True, 'import pyomo.environ as pyo\n'), ((20156, 20176), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (20174, 20176), True, 'import pyomo.environ as pyo\n'), ((21091, 21124), 'pyomo.environ.Constraint', 'pyo.Constraint', (['T'], {'rule': 'eq27_rule'}), '(T, rule=eq27_rule)\n', (21105, 21124), True, 'import pyomo.environ as pyo\n'), ((21323, 21343), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (21341, 21343), True, 'import pyomo.environ as pyo\n'), ((21580, 21600), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (21598, 21600), True, 'import pyomo.environ as pyo\n'), ((21886, 21906), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (21904, 21906), True, 'import pyomo.environ as pyo\n'), ((22290, 22310), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (22308, 22310), True, 'import pyomo.environ as pyo\n'), ((22607, 22627), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (22625, 22627), True, 'import pyomo.environ as pyo\n'), ((23063, 23083), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (23081, 23083), True, 'import pyomo.environ as pyo\n'), ((23519, 23539), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (23537, 23539), True, 'import pyomo.environ as pyo\n'), ((23916, 23936), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (23934, 23936), True, 'import pyomo.environ as pyo\n'), ((24313, 24333), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (24331, 24333), True, 'import pyomo.environ as pyo\n'), ((24469, 24489), 'pyomo.environ.ConstraintList', 'pyo.ConstraintList', ([], {}), '()\n', (24487, 24489), True, 'import pyomo.environ as pyo\n'), ((24821, 24843), 'pyomo.opt.SolverFactory', 'SolverFactory', (['"""cplex"""'], {}), "('cplex')\n", (24834, 24843), False, 'from pyomo.opt import SolverFactory\n'), ((25632, 25658), 'pandas.DataFrame', 'pd.DataFrame', (['Yearly_Costs'], {}), '(Yearly_Costs)\n', (25644, 25658), True, 'import pandas as pd\n'), ((26393, 26422), 'pandas.DataFrame', 'pd.DataFrame', (['Variable_Util_l'], {}), '(Variable_Util_l)\n', (26405, 26422), True, 'import pandas as pd\n'), ((26983, 27013), 'pandas.DataFrame', 'pd.DataFrame', (['Variable_Util_tr'], {}), '(Variable_Util_tr)\n', (26995, 27013), True, 'import pandas as pd\n'), ((27553, 27583), 'pandas.DataFrame', 'pd.DataFrame', (['Variable_Util_dg'], {}), '(Variable_Util_dg)\n', (27565, 27583), True, 'import pandas as pd\n'), ((28250, 28278), 'pandas.DataFrame', 'pd.DataFrame', (['Current_inj_TR'], {}), '(Current_inj_TR)\n', (28262, 28278), True, 'import pandas as pd\n'), ((28926, 28954), 'pandas.DataFrame', 'pd.DataFrame', (['Current_inj_DG'], {}), '(Current_inj_DG)\n', (28938, 28954), True, 'import pandas as pd\n'), ((29827, 29856), 'pandas.DataFrame', 'pd.DataFrame', (['Actual_C_Flow_l'], {}), '(Actual_C_Flow_l)\n', (29839, 29856), True, 'import pandas as pd\n'), ((12830, 12875), 'pyomo.environ.inequality', 'pyo.inequality', (['V_', 'model.V_stb[s, t, b]', 'Vup'], {}), '(V_, model.V_stb[s, t, b], Vup)\n', (12844, 12875), True, 'import pyomo.environ as pyo\n'), ((25176, 25187), 'numpy.shape', 'np.shape', (['T'], {}), '(T)\n', (25184, 25187), True, 'import numpy as np\n'), ((25250, 25275), 'pyomo.environ.value', 'pyo.value', (['model.C_I_t[i]'], {}), '(model.C_I_t[i])\n', (25259, 25275), True, 'import pyomo.environ as pyo\n'), ((25323, 25348), 'pyomo.environ.value', 'pyo.value', (['model.C_M_t[i]'], {}), '(model.C_M_t[i])\n', (25332, 25348), True, 'import pyomo.environ as pyo\n'), ((25395, 25420), 'pyomo.environ.value', 'pyo.value', (['model.C_E_t[i]'], {}), '(model.C_E_t[i])\n', (25404, 25420), True, 'import pyomo.environ as pyo\n'), ((25463, 25488), 'pyomo.environ.value', 'pyo.value', (['model.C_R_t[i]'], {}), '(model.C_R_t[i])\n', (25472, 25488), True, 'import pyomo.environ as pyo\n'), ((25540, 25565), 'pyomo.environ.value', 'pyo.value', (['model.C_U_t[i]'], {}), '(model.C_U_t[i])\n', (25549, 25565), True, 'import pyomo.environ as pyo\n'), ((26599, 26637), 'pyomo.environ.value', 'pyo.value', (['model.y_tr_skt[tr, s, k, t]'], {}), '(model.y_tr_skt[tr, s, k, t])\n', (26608, 26637), True, 'import pyomo.environ as pyo\n'), ((27177, 27213), 'pyomo.environ.value', 'pyo.value', (['model.y_p_skt[p, O, K, t]'], {}), '(model.y_p_skt[p, O, K, t])\n', (27186, 27213), True, 'import pyomo.environ as pyo\n'), ((25936, 25976), 'pyomo.environ.value', 'pyo.value', (['model.y_l_srkt[l, s, r, k, t]'], {}), '(model.y_l_srkt[l, s, r, k, t])\n', (25945, 25976), True, 'import pyomo.environ as pyo\n'), ((26849, 26887), 'pyomo.environ.value', 'pyo.value', (['model.y_tr_skt[tr, s, k, t]'], {}), '(model.y_tr_skt[tr, s, k, t])\n', (26858, 26887), True, 'import pyomo.environ as pyo\n'), ((27421, 27457), 'pyomo.environ.value', 'pyo.value', (['model.y_p_skt[p, O, K, t]'], {}), '(model.y_p_skt[p, O, K, t])\n', (27430, 27457), True, 'import pyomo.environ as pyo\n'), ((27793, 27835), 'pyomo.environ.value', 'pyo.value', (['model.g_tr_sktb[tr, s, k, t, b]'], {}), '(model.g_tr_sktb[tr, s, k, t, b])\n', (27802, 27835), True, 'import pyomo.environ as pyo\n'), ((28475, 28516), 'pyomo.environ.value', 'pyo.value', (['model.g_p_sktb[p, O, kp, t, b]'], {}), '(model.g_p_sktb[p, O, kp, t, b])\n', (28484, 28516), True, 'import pyomo.environ as pyo\n'), ((26252, 26292), 'pyomo.environ.value', 'pyo.value', (['model.y_l_srkt[l, s, r, k, t]'], {}), '(model.y_l_srkt[l, s, r, k, t])\n', (26261, 26292), True, 'import pyomo.environ as pyo\n'), ((28113, 28155), 'pyomo.environ.value', 'pyo.value', (['model.g_tr_sktb[tr, s, k, t, b]'], {}), '(model.g_tr_sktb[tr, s, k, t, b])\n', (28122, 28155), True, 'import pyomo.environ as pyo\n'), ((28790, 28831), 'pyomo.environ.value', 'pyo.value', (['model.g_p_sktb[p, O, kp, t, b]'], {}), '(model.g_p_sktb[p, O, kp, t, b])\n', (28799, 28831), True, 'import pyomo.environ as pyo\n'), ((29277, 29321), 'pyomo.environ.value', 'pyo.value', (['model.f_l_srktb[l, s, r, k, t, b]'], {}), '(model.f_l_srktb[l, s, r, k, t, b])\n', (29286, 29321), True, 'import pyomo.environ as pyo\n'), ((29671, 29715), 'pyomo.environ.value', 'pyo.value', (['model.f_l_srktb[l, s, r, k, t, b]'], {}), '(model.f_l_srktb[l, s, r, k, t, b])\n', (29680, 29715), True, 'import pyomo.environ as pyo\n')] |
"""
File: examples/distribution/sech_distribution.py
Author: <NAME>
Date: Oct 15 2019
Description: Example of using the SechDistribution class to represent 1D random
variates.
"""
import os, time
import numpy as np
import matplotlib.pyplot as pl
from distpy import SechDistribution
sample_size = int(1e5)
umean = 12.5
uvar = 2.5
distribution = SechDistribution(umean, uvar)
hdf5_file_name = 'TEST_DELETE_THIS.hdf5'
distribution.save(hdf5_file_name)
try:
assert distribution == SechDistribution.load(hdf5_file_name)
except:
os.remove(hdf5_file_name)
raise
else:
os.remove(hdf5_file_name)
assert distribution.numparams == 1
t0 = time.time()
sample = distribution.draw(sample_size)
print(('It took {0:.5f} s for a sample of size {1} to be drawn from a sech ' +\
'distribution.').format(time.time() - t0, sample_size))
print('Sample mean was {0:.3g}, while expected mean was {1:.3g}.'.format(\
np.mean(sample), distribution.mean))
print(('Sample standard deviation was {0:.3g}, while expected standard ' +\
'deviation was {1:.3g}.').format(np.std(sample),\
distribution.standard_deviation))
fig = pl.figure()
ax = fig.add_subplot(111)
ax.hist(sample, bins=100, histtype='step', color='b', linewidth=2,\
label='sampled', density=True)
xs = np.arange(5., 20., 0.01)
distribution.plot(xs, ax=ax, show=False, linewidth=2, color='r',\
label='e^(log_value)')
ylim = ax.get_ylim()
for xval in distribution.central_confidence_interval(0.6827):
ax.plot(2 * [xval], ylim, color='k')
ax.set_ylim(ylim)
ax.set_title('sech distribution with mean={0!s} and variance={1!s}'.format(\
umean, uvar), size='xx-large')
ax.set_xlabel('Value', size='xx-large')
ax.set_ylabel('PDF', size='xx-large')
ax.tick_params(labelsize='xx-large', width=2, length=6)
ax.legend(fontsize='xx-large')
pl.show()
| [
"os.remove",
"matplotlib.pyplot.show",
"numpy.std",
"time.time",
"distpy.SechDistribution",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"distpy.SechDistribution.load"
] | [((359, 388), 'distpy.SechDistribution', 'SechDistribution', (['umean', 'uvar'], {}), '(umean, uvar)\n', (375, 388), False, 'from distpy import SechDistribution\n'), ((658, 669), 'time.time', 'time.time', ([], {}), '()\n', (667, 669), False, 'import os, time\n'), ((1140, 1151), 'matplotlib.pyplot.figure', 'pl.figure', ([], {}), '()\n', (1149, 1151), True, 'import matplotlib.pyplot as pl\n'), ((1286, 1312), 'numpy.arange', 'np.arange', (['(5.0)', '(20.0)', '(0.01)'], {}), '(5.0, 20.0, 0.01)\n', (1295, 1312), True, 'import numpy as np\n'), ((1823, 1832), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (1830, 1832), True, 'import matplotlib.pyplot as pl\n'), ((592, 617), 'os.remove', 'os.remove', (['hdf5_file_name'], {}), '(hdf5_file_name)\n', (601, 617), False, 'import os, time\n'), ((496, 533), 'distpy.SechDistribution.load', 'SechDistribution.load', (['hdf5_file_name'], {}), '(hdf5_file_name)\n', (517, 533), False, 'from distpy import SechDistribution\n'), ((546, 571), 'os.remove', 'os.remove', (['hdf5_file_name'], {}), '(hdf5_file_name)\n', (555, 571), False, 'import os, time\n'), ((929, 944), 'numpy.mean', 'np.mean', (['sample'], {}), '(sample)\n', (936, 944), True, 'import numpy as np\n'), ((1079, 1093), 'numpy.std', 'np.std', (['sample'], {}), '(sample)\n', (1085, 1093), True, 'import numpy as np\n'), ((818, 829), 'time.time', 'time.time', ([], {}), '()\n', (827, 829), False, 'import os, time\n')] |
#!/usr/bin/env python3
"""Utilities for cross-validation.
Notice data/folds-10.pkl we use in 10-fold cross-val. Keep it to replicate our results"""
import numpy as np
import glob
from os.path import basename, join
from sklearn.model_selection import StratifiedKFold
import pickle
def load_data(in_dir, folds=None, split=None):
"""Builds train/test data from preprocessed features for a given split
# Arguments
in_dir: Input directory containing *.npy CNN feature files.
folds: None or list of splits dict{
"train": {
"x": train files list,
"y": train labels},
"test": {
"x": test files list,
"y": test labels}}
}
split: None or split number.
# Returns
Tran/test data (features and labels) for a given split, if `folds` is not None
Test data (only features) and file names, if `folds` is None
"""
if folds:
y_train = []
x_train = []
for f, l in zip(folds[split]["train"]["x"], folds[split]["train"]["y"]):
x = np.load(join(in_dir, f))
x_train.append(x)
y_train.append([l] * len(x))
x_train = np.vstack(x_train)
y_train = np.concatenate(y_train)
y_test = []
x_test = []
for f, l in zip(folds[split]["test"]["x"], folds[split]["test"]["y"]):
x = np.load(join(in_dir, f))
x_test.append(x)
y_test.append([l] * len(x))
x_test = np.vstack(x_test)
y_test = np.concatenate(y_test)
return x_train, y_train, x_test, y_test
else:
files = glob.glob(in_dir + "/*.npy")
x = []
for f in files:
x.append(np.load(f))
return np.vstack(x), np.array([basename(f) for f in files])
def make_folds():
"""Creates stratified splits based on train directory listing
# Dumps
folds: list of splits dict{
"train": {
"x": train files list,
"y": train labels},
"test": {
"x": test files list,
"y": test labels}}
}
"""
files = np.array([basename(f) for f in glob.glob("data/preprocessed/train/ResNet-0.5-400/*.npy")])
labels = []
classes = np.array([0, 1, 2, 3])
for f in files:
lb = np.array([f.startswith("n"),
f.startswith("b"),
f.startswith("is"),
f.startswith("iv")])
labels.append(classes[np.argmax(lb)])
labels = np.array(labels)
folds = []
skf = StratifiedKFold(n_splits=10, shuffle=True)
for train_index, test_index in skf.split(files, labels):
f_train, f_test = files[train_index], files[test_index]
y_train, y_test = labels[train_index], labels[test_index]
folds.append({"train": {"x": f_train, "y": y_train}, "test": {"x": f_test, "y": y_test}})
with open("data/folds-10.pkl", "wb") as f:
pickle.dump(folds, f)
| [
"pickle.dump",
"numpy.load",
"os.path.basename",
"numpy.argmax",
"numpy.array",
"sklearn.model_selection.StratifiedKFold",
"glob.glob",
"numpy.vstack",
"os.path.join",
"numpy.concatenate"
] | [((2764, 2786), 'numpy.array', 'np.array', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (2772, 2786), True, 'import numpy as np\n'), ((3037, 3053), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (3045, 3053), True, 'import numpy as np\n'), ((3080, 3122), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(10)', 'shuffle': '(True)'}), '(n_splits=10, shuffle=True)\n', (3095, 3122), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((1481, 1499), 'numpy.vstack', 'np.vstack', (['x_train'], {}), '(x_train)\n', (1490, 1499), True, 'import numpy as np\n'), ((1518, 1541), 'numpy.concatenate', 'np.concatenate', (['y_train'], {}), '(y_train)\n', (1532, 1541), True, 'import numpy as np\n'), ((1789, 1806), 'numpy.vstack', 'np.vstack', (['x_test'], {}), '(x_test)\n', (1798, 1806), True, 'import numpy as np\n'), ((1824, 1846), 'numpy.concatenate', 'np.concatenate', (['y_test'], {}), '(y_test)\n', (1838, 1846), True, 'import numpy as np\n'), ((1922, 1950), 'glob.glob', 'glob.glob', (["(in_dir + '/*.npy')"], {}), "(in_dir + '/*.npy')\n", (1931, 1950), False, 'import glob\n'), ((3468, 3489), 'pickle.dump', 'pickle.dump', (['folds', 'f'], {}), '(folds, f)\n', (3479, 3489), False, 'import pickle\n'), ((2038, 2050), 'numpy.vstack', 'np.vstack', (['x'], {}), '(x)\n', (2047, 2050), True, 'import numpy as np\n'), ((2653, 2664), 'os.path.basename', 'basename', (['f'], {}), '(f)\n', (2661, 2664), False, 'from os.path import basename, join\n'), ((1375, 1390), 'os.path.join', 'join', (['in_dir', 'f'], {}), '(in_dir, f)\n', (1379, 1390), False, 'from os.path import basename, join\n'), ((1686, 1701), 'os.path.join', 'join', (['in_dir', 'f'], {}), '(in_dir, f)\n', (1690, 1701), False, 'from os.path import basename, join\n'), ((2011, 2021), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (2018, 2021), True, 'import numpy as np\n'), ((2674, 2731), 'glob.glob', 'glob.glob', (['"""data/preprocessed/train/ResNet-0.5-400/*.npy"""'], {}), "('data/preprocessed/train/ResNet-0.5-400/*.npy')\n", (2683, 2731), False, 'import glob\n'), ((3008, 3021), 'numpy.argmax', 'np.argmax', (['lb'], {}), '(lb)\n', (3017, 3021), True, 'import numpy as np\n'), ((2062, 2073), 'os.path.basename', 'basename', (['f'], {}), '(f)\n', (2070, 2073), False, 'from os.path import basename, join\n')] |
import json
import logging
import os
import shutil
import torch
import numpy as np
import scipy.misc
from io import BytesIO
import sys
import time
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from pathlib import Path
from scipy.stats import norm
from scipy.optimize import minimize
from torch.autograd import Variable
from scipy import stats
from statsmodels.stats import weightstats as stests
import torch.optim as optim
from sklearn.metrics import log_loss
device = 'cuda' if torch.cuda.is_available() else 'cpu'
if device == 'cuda':
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
class Params():
"""Class that loads hyperparameters from a json file.
Example:
```
params = Params(json_path)
print(params.learning_rate)
params.learning_rate = 0.5 # change the value of learning_rate in params
```
"""
def __init__(self, json_path):
with open(json_path) as f:
params = json.load(f)
self.__dict__.update(params)
def save(self, json_path):
with open(json_path, 'w') as f:
json.dump(self.__dict__, f, indent=4)
def update(self, json_path):
"""Loads parameters from json file"""
with open(json_path) as f:
params = json.load(f)
self.__dict__.update(params)
@property
def dict(self):
"""Gives dict-like access to Params instance by `params.dict['learning_rate']"""
return self.__dict__
class RunningAverage():
"""A simple class that maintains the running average of a quantity
Example:
```
loss_avg = RunningAverage()
loss_avg.update(2)
loss_avg.update(4)
loss_avg() = 3
```
"""
def __init__(self):
self.steps = 0
self.total = 0
def update(self, val):
self.total += val
self.steps += 1
def __call__(self):
return self.total/float(self.steps)
def set_logger(log_path):
"""Set the logger to log info in terminal and file `log_path`.
In general, it is useful to have a logger so that every output to the terminal is saved
in a permanent file. Here we save it to `model_dir/train.log`.
Example:
```
logging.info("Starting training...")
```
Args:
log_path: (string) where to log
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if not logger.handlers:
# Logging to a file
file_handler = logging.FileHandler(log_path)
file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logger.addHandler(file_handler)
# Logging to console
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(stream_handler)
def save_dict_to_json(d, json_path):
"""Saves dict of floats in json file
Args:
d: (dict) of float-castable values (np.float, int, float, etc.)
json_path: (string) path to json file
"""
with open(json_path, 'w') as f:
# We need to convert the values to float for json (it doesn't accept np.array, np.float, )
d = {k: float(v) for k, v in d.items()}
json.dump(d, f, indent=4)
def save_checkpoint(state, is_best, checkpoint):
"""Saves model and training parameters at checkpoint + 'last.pth.tar'. If is_best==True, also saves
checkpoint + 'best.pth.tar'
Args:
state: (dict) contains model's state_dict, may contain other keys such as epoch, optimizer state_dict
is_best: (bool) True if it is the best model seen till now
checkpoint: (string) folder where parameters are to be saved
"""
filepath = os.path.join(checkpoint, 'last.pth.tar')
if not os.path.exists(checkpoint):
print("Checkpoint Directory does not exist! Making directory {}".format(checkpoint))
os.mkdir(checkpoint)
else:
print("Checkpoint Directory exists! ")
torch.save(state, filepath)
if is_best:
shutil.copyfile(filepath, os.path.join(checkpoint, 'best.pth.tar'))
def load_checkpoint(checkpoint, model, optimizer=None):
"""Loads model parameters (state_dict) from file_path. If optimizer is provided, loads state_dict of
optimizer assuming it is present in checkpoint.
Args:
checkpoint: (string) filename which needs to be loaded
model: (torch.nn.Module) model for which the parameters are loaded
optimizer: (torch.optim) optional: resume optimizer from checkpoint
"""
if not os.path.exists(checkpoint):
print('-->', checkpoint)
raise("File doesn't exist {}".format(checkpoint))
if torch.cuda.is_available():
checkpoint = torch.load(checkpoint)
else:
# this helps avoid errors when loading single-GPU-trained weights onto CPU-model
checkpoint = torch.load(checkpoint, map_location=lambda storage, loc: storage)
#orig_net = orig_net.to(device)
model.load_state_dict(checkpoint)
if optimizer:
optimizer.load_state_dict(checkpoint['optim_dict'])
return checkpoint
def get_mean_and_std(dataset):
'''Compute the mean and std value of dataset.'''
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:,i,:,:].mean()
std[i] += inputs[:,i,:,:].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias:
init.constant(m.bias, 0)
term_width = int(80)
TOTAL_BAR_LENGTH = 65.
last_time = time.time()
begin_time = last_time
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
def save_model(model, model_path):
if isinstance(model_path, Path):
model_path = str(model_path)
if isinstance(model, nn.DataParallel):
model = model.module
state_dict = model.state_dict()
for key in state_dict:
state_dict[key] = state_dict[key].cpu()
torch.save(state_dict, model_path)
#### Temperature Scaling
class TemperatureScaling():
def __init__(self, model, temp = 1., maxiter = 50000, solver = "L-BFGS-B"):#"Grid"): #L-BFGS-B --> another solver
self.model = model
self.temp = temp
self.maxiter = maxiter
self.solver = solver
def _loss_fun(self, x, probs, true):
# Calculates the loss using log-loss (cross-entropy loss)
scaled_probs = self.predict(probs, x)
try:
return log_loss(y_true=true, y_pred=scaled_probs)
except:
return 1e15
def fit(self, valid_loader, seed = 0):
np.random.seed(seed)
logits_list = []; labels_list = []
with torch.no_grad():
for input, label in valid_loader:
input, label = input.to(device), label.to(device)
logits = self.model(input)
try:
logits_list.extend(logits.numpy()); labels_list.extend(label.numpy())
except:
logits_list.extend(logits.cpu().numpy()); labels_list.extend(label.cpu().numpy())
logits = np.array(logits_list); true = np.array(labels_list)
#print('SHAPES: ', logits.shape, ' --- ', true.shape)
if self.solver != 'Grid':
opt = minimize(self._loss_fun, x0 = self.temp, args=(logits, true), options={'maxiter':self.maxiter}, method = self.solver, bounds = [(0.8, 3.0)])
self.temp = opt.x[0]
else:
temps = np.linspace(0.8, 10.0, num=921)
best_loss = 1e12
for temp in temps:
loss = self._loss_fun(temp, logits, true)
if loss < best_loss:
#print('Found better Temperature:', temp, '--> VL_Loss:', loss)
best_loss = loss; self.temp = temp
#return nn.Parameter(torch.ones(1) * self.temp)
return self.temp
def predict(self, logits, temp):
return softmax(logits/temp)
def softmax(x):
return np.exp(x) / np.exp(x).sum(axis=-1, keepdims=1) | [
"sys.stdout.write",
"os.mkdir",
"numpy.random.seed",
"logging.Formatter",
"sys.stdout.flush",
"numpy.exp",
"torch.no_grad",
"os.path.join",
"scipy.optimize.minimize",
"logging.FileHandler",
"torch.utils.data.DataLoader",
"torch.load",
"sklearn.metrics.log_loss",
"os.path.exists",
"numpy.... | [((6273, 6284), 'time.time', 'time.time', ([], {}), '()\n', (6282, 6284), False, 'import time\n'), ((517, 542), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (540, 542), False, 'import torch\n'), ((2407, 2426), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2424, 2426), False, 'import logging\n'), ((3796, 3836), 'os.path.join', 'os.path.join', (['checkpoint', '"""last.pth.tar"""'], {}), "(checkpoint, 'last.pth.tar')\n", (3808, 3836), False, 'import os\n'), ((4059, 4086), 'torch.save', 'torch.save', (['state', 'filepath'], {}), '(state, filepath)\n', (4069, 4086), False, 'import torch\n'), ((4763, 4788), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4786, 4788), False, 'import torch\n'), ((5301, 5380), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': '(1)', 'shuffle': '(True)', 'num_workers': '(2)'}), '(dataset, batch_size=1, shuffle=True, num_workers=2)\n', (5328, 5380), False, 'import torch\n'), ((5392, 5406), 'torch.zeros', 'torch.zeros', (['(3)'], {}), '(3)\n', (5403, 5406), False, 'import torch\n'), ((5417, 5431), 'torch.zeros', 'torch.zeros', (['(3)'], {}), '(3)\n', (5428, 5431), False, 'import torch\n'), ((6568, 6590), 'sys.stdout.write', 'sys.stdout.write', (['""" ["""'], {}), "(' [')\n", (6584, 6590), False, 'import sys\n'), ((6654, 6675), 'sys.stdout.write', 'sys.stdout.write', (['""">"""'], {}), "('>')\n", (6670, 6675), False, 'import sys\n'), ((6740, 6761), 'sys.stdout.write', 'sys.stdout.write', (['"""]"""'], {}), "(']')\n", (6756, 6761), False, 'import sys\n'), ((6778, 6789), 'time.time', 'time.time', ([], {}), '()\n', (6787, 6789), False, 'import time\n'), ((7072, 7093), 'sys.stdout.write', 'sys.stdout.write', (['msg'], {}), '(msg)\n', (7088, 7093), False, 'import sys\n'), ((7323, 7373), 'sys.stdout.write', 'sys.stdout.write', (["(' %d/%d ' % (current + 1, total))"], {}), "(' %d/%d ' % (current + 1, total))\n", (7339, 7373), False, 'import sys\n'), ((7475, 7493), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (7491, 7493), False, 'import sys\n'), ((8554, 8588), 'torch.save', 'torch.save', (['state_dict', 'model_path'], {}), '(state_dict, model_path)\n', (8564, 8588), False, 'import torch\n'), ((2541, 2570), 'logging.FileHandler', 'logging.FileHandler', (['log_path'], {}), '(log_path)\n', (2560, 2570), False, 'import logging\n'), ((2761, 2784), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (2782, 2784), False, 'import logging\n'), ((3304, 3329), 'json.dump', 'json.dump', (['d', 'f'], {'indent': '(4)'}), '(d, f, indent=4)\n', (3313, 3329), False, 'import json\n'), ((3848, 3874), 'os.path.exists', 'os.path.exists', (['checkpoint'], {}), '(checkpoint)\n', (3862, 3874), False, 'import os\n'), ((3977, 3997), 'os.mkdir', 'os.mkdir', (['checkpoint'], {}), '(checkpoint)\n', (3985, 3997), False, 'import os\n'), ((4637, 4663), 'os.path.exists', 'os.path.exists', (['checkpoint'], {}), '(checkpoint)\n', (4651, 4663), False, 'import os\n'), ((4811, 4833), 'torch.load', 'torch.load', (['checkpoint'], {}), '(checkpoint)\n', (4821, 4833), False, 'import torch\n'), ((4954, 5019), 'torch.load', 'torch.load', (['checkpoint'], {'map_location': '(lambda storage, loc: storage)'}), '(checkpoint, map_location=lambda storage, loc: storage)\n', (4964, 5019), False, 'import torch\n'), ((6427, 6438), 'time.time', 'time.time', ([], {}), '()\n', (6436, 6438), False, 'import time\n'), ((6628, 6649), 'sys.stdout.write', 'sys.stdout.write', (['"""="""'], {}), "('=')\n", (6644, 6649), False, 'import sys\n'), ((6714, 6735), 'sys.stdout.write', 'sys.stdout.write', (['"""."""'], {}), "('.')\n", (6730, 6735), False, 'import sys\n'), ((7167, 7188), 'sys.stdout.write', 'sys.stdout.write', (['""" """'], {}), "(' ')\n", (7183, 7188), False, 'import sys\n'), ((7296, 7320), 'sys.stdout.write', 'sys.stdout.write', (['"""\x08"""'], {}), "('\\x08')\n", (7312, 7320), False, 'import sys\n'), ((7407, 7429), 'sys.stdout.write', 'sys.stdout.write', (["'\\r'"], {}), "('\\r')\n", (7423, 7429), False, 'import sys\n'), ((7448, 7470), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (7464, 7470), False, 'import sys\n'), ((9203, 9223), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (9217, 9223), True, 'import numpy as np\n'), ((9715, 9736), 'numpy.array', 'np.array', (['logits_list'], {}), '(logits_list)\n', (9723, 9736), True, 'import numpy as np\n'), ((9745, 9766), 'numpy.array', 'np.array', (['labels_list'], {}), '(labels_list)\n', (9753, 9766), True, 'import numpy as np\n'), ((10620, 10629), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (10626, 10629), True, 'import numpy as np\n'), ((1008, 1020), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1017, 1020), False, 'import json\n'), ((1146, 1183), 'json.dump', 'json.dump', (['self.__dict__', 'f'], {'indent': '(4)'}), '(self.__dict__, f, indent=4)\n', (1155, 1183), False, 'import json\n'), ((1332, 1344), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1341, 1344), False, 'import json\n'), ((2605, 2664), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s:%(levelname)s: %(message)s"""'], {}), "('%(asctime)s:%(levelname)s: %(message)s')\n", (2622, 2664), False, 'import logging\n'), ((2821, 2853), 'logging.Formatter', 'logging.Formatter', (['"""%(message)s"""'], {}), "('%(message)s')\n", (2838, 2853), False, 'import logging\n'), ((4137, 4177), 'os.path.join', 'os.path.join', (['checkpoint', '"""best.pth.tar"""'], {}), "(checkpoint, 'best.pth.tar')\n", (4149, 4177), False, 'import os\n'), ((5839, 5884), 'torch.nn.init.kaiming_normal', 'init.kaiming_normal', (['m.weight'], {'mode': '"""fan_out"""'}), "(m.weight, mode='fan_out')\n", (5858, 5884), True, 'import torch.nn.init as init\n'), ((9064, 9106), 'sklearn.metrics.log_loss', 'log_loss', ([], {'y_true': 'true', 'y_pred': 'scaled_probs'}), '(y_true=true, y_pred=scaled_probs)\n', (9072, 9106), False, 'from sklearn.metrics import log_loss\n'), ((9280, 9295), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9293, 9295), False, 'import torch\n'), ((9881, 10021), 'scipy.optimize.minimize', 'minimize', (['self._loss_fun'], {'x0': 'self.temp', 'args': '(logits, true)', 'options': "{'maxiter': self.maxiter}", 'method': 'self.solver', 'bounds': '[(0.8, 3.0)]'}), "(self._loss_fun, x0=self.temp, args=(logits, true), options={\n 'maxiter': self.maxiter}, method=self.solver, bounds=[(0.8, 3.0)])\n", (9889, 10021), False, 'from scipy.optimize import minimize\n'), ((10089, 10120), 'numpy.linspace', 'np.linspace', (['(0.8)', '(10.0)'], {'num': '(921)'}), '(0.8, 10.0, num=921)\n', (10100, 10120), True, 'import numpy as np\n'), ((5924, 5948), 'torch.nn.init.constant', 'init.constant', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (5937, 5948), True, 'import torch.nn.init as init\n'), ((6005, 6031), 'torch.nn.init.constant', 'init.constant', (['m.weight', '(1)'], {}), '(m.weight, 1)\n', (6018, 6031), True, 'import torch.nn.init as init\n'), ((6044, 6068), 'torch.nn.init.constant', 'init.constant', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (6057, 6068), True, 'import torch.nn.init as init\n'), ((10632, 10641), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (10638, 10641), True, 'import numpy as np\n'), ((6120, 6152), 'torch.nn.init.normal', 'init.normal', (['m.weight'], {'std': '(0.001)'}), '(m.weight, std=0.001)\n', (6131, 6152), True, 'import torch.nn.init as init\n'), ((6191, 6215), 'torch.nn.init.constant', 'init.constant', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (6204, 6215), True, 'import torch.nn.init as init\n')] |
from numpy.testing import assert_almost_equal, assert_array_equal
from model import *
def test_sample_ecc():
for _ in range(10):
ecc = np.random.randint(100)
x, y = sample_ecc(ecc)
assert_almost_equal(x**2 + y**2, ecc**2)
def test_generate_params():
cov = np.array([[0, 0], [0, 1]])
rng = np.random.RandomState(32)
params = generate_params(2, 2, cov, 10, rng=rng, scale=1.)
rng_ = np.random.RandomState(32)
params_ = generate_params(2, 2, cov, 10, rng=rng_, scale=1.)
assert_array_equal(params, params_)
assert params.shape == (10, 3)
# check eccentricities
xy = params[:, :2]
assert_almost_equal(np.sum(xy ** 2, axis=1), [2**2] * 10)
# check it does some sampling
cov = np.eye(2)
tol = 10**-9
cov *= tol
params = generate_params(2, 2, cov, 10, rng=rng, scale=1.)
xy = params[:, :2]
assert_almost_equal(np.sum(xy ** 2, axis=1), [2**2] * 10, decimal=3)
assert_almost_equal(params[:, 2], [2] * 10, decimal=3)
def test_activation():
data = np.zeros((100, 100))
data[30, 30] = 1.
# check that the gain is indeed different
act_g1 = activation(data, 30, 30, 2, gain=1.0)
act_g2 = activation(data, 30, 30, 2, gain=4.0)
assert_almost_equal(act_g2/act_g1, 4.0)
def test_VoxelPopulation():
n_voxels = 10
n_stim = 4
xs = ys = np.random.randn(n_voxels)*10 + 50
sigmas = np.random.randn(n_voxels)*2
pop = VoxelPopulation(xs, ys, sigmas, gain=2., n=0.5)
gains = np.array([2.] * n_voxels)
ns = np.array([0.5] * n_voxels)
pop_v = VoxelPopulation(xs, ys, sigmas, gain=gains, n=ns)
stim = np.zeros((n_stim, 100, 100))
stim[0, 50, 50] = 1.
act = pop.activate(stim)
act_v = pop_v.activate(stim)
assert act.shape == (n_stim, n_voxels)
assert act_v.shape == (n_stim, n_voxels)
assert_array_equal(act, act_v)
| [
"numpy.testing.assert_array_equal",
"numpy.testing.assert_almost_equal"
] | [((524, 559), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['params', 'params_'], {}), '(params, params_)\n', (542, 559), False, 'from numpy.testing import assert_almost_equal, assert_array_equal\n'), ((957, 1011), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['params[:, 2]', '([2] * 10)'], {'decimal': '(3)'}), '(params[:, 2], [2] * 10, decimal=3)\n', (976, 1011), False, 'from numpy.testing import assert_almost_equal, assert_array_equal\n'), ((1243, 1284), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['(act_g2 / act_g1)', '(4.0)'], {}), '(act_g2 / act_g1, 4.0)\n', (1262, 1284), False, 'from numpy.testing import assert_almost_equal, assert_array_equal\n'), ((1851, 1881), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['act', 'act_v'], {}), '(act, act_v)\n', (1869, 1881), False, 'from numpy.testing import assert_almost_equal, assert_array_equal\n'), ((211, 257), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['(x ** 2 + y ** 2)', '(ecc ** 2)'], {}), '(x ** 2 + y ** 2, ecc ** 2)\n', (230, 257), False, 'from numpy.testing import assert_almost_equal, assert_array_equal\n')] |
import numpy as np
class Experiment:
"""This class describes the experiment setup.
Both the characteristics of a specific experiment and
of the simulation are included.
Moreover, the state during a simulation run can be acessed.
"""
def __init__(self, id, nr_wires, nr_strands, stress_range_0, fctm, wires):
"""Initialised the experiment.
Parameters
-----------
id: string
Identifier of the test beam.
nr_wires: int
Total number of wires in the test beam.
nr_strands: int
Total number of strands.
stress_range_0: float
Initial stress range in N/mm2.
fctm: float
Concrete tensile strength in MPa.
wires: wire object
All wires created with corresponding class.
"""
self.id = id
self.nr_wires = nr_wires
self.nr_strands = nr_strands
self.stress_range_0 = stress_range_0 #[N/mm2]
self.stress_range = self.stress_range_0 #[N/mm2]
self.nr_cycles = 0
self.crack_width = 0 # [mm]
self.nr_broken = 0
self.arr = []
self.fctm = fctm # in MPA
self.wires = wires
self.lambda_fatigue_fretting_crack = 5e-7
self.lambda_fatigue_fretting_stress = 5e-8
self.lambda_fatigue = 3e-7
def update_wires(self):
"""Method to update all wires.
Each wire is updated, dependent on the stress range and the crack width.
In case a wire breaks, the experiment is updated as well
(since the number of broken wires influences the current stress range).
"""
update = False
self.nr_cycles += 1
for w in self.wires:
update_exp = w.update_wire(self.stress_range,
self.crack_width,
self.lambda_fatigue,
self.lambda_fatigue_fretting_crack,
self.lambda_fatigue_fretting_stress)
if update_exp:
update = True
if update:
self.update_experiment(self.nr_cycles)
def update_experiment(self, nr_cycles):
"""Method to update the experiment.
If a wire broke, the stress range, the number of broken wires,
and the crack width needs to be updated.
Moreover, the information on the cycle where the wire broke is appended
to an array for further references, as well as the corresponding crack width.
"""
self.nr_cycles = nr_cycles
nr_broken = np.sum([self.wires[i].broken for i in range(self.nr_wires)])
if self.nr_broken != nr_broken:
self.nr_broken = nr_broken
self.stress_range = ( self.nr_wires / (self.nr_wires - self.nr_broken)
* self.stress_range_0 )
self.crack_width = self.calc_crack_width()
self.write()
def write(self):
"""Method to append information.
When called, the number of cycles, the number of broken wires
and the crack width is noted."""
self.arr.append([self.nr_cycles, self.nr_broken, self.crack_width])
def calc_crack_width(self):
"""Method to calculate the crack width.
The function to calculate it is from SFB823 Discussion Paper Nr.25/2015.
It's formula (7).
The used parameters are:
k_t = 0.6
E_p = 195e3 MPa
A_p = 1000 mm2
Returns
----------
crack width: float
The crack width in mm.
"""
k_t = 0.6 # no unit
E_p = 195e3 # [MPa = N/mm2]
A_p = 1000 # [mm**2] ###10000 #?? # 4.5m x 0.3m = 1.35m2 ?
A_pn = (1. - self.nr_broken / self.nr_wires) * A_p
cw = (1. - k_t) * self.stress_range ** 2 * A_pn
cw = cw / ( 0.72 * np.pi * self.fctm * E_p * np.sqrt(A_pn))
return cw #[mm]
class Wire:
"""This class describes a single wire.
Each wire has the information if it is a inner or outer wire,
and if its strand is an inner or outer strand.
Moreover, it stores the current fatigue value and if it is broken or not.
"""
def __init__(self, id, inner_wire, inner_strand, fatigue):
"""Initialised a single wire.
Parameters
-----------
id: string
Identifier of the wire.
inner_wire: bool
Specifies if it is an inner wire or not.
inner_strand: bool
Specifies if it is in an inner strand or not.
fatigue: float
The initial fatigue of a wire.
"""
self.id = id
self.inner_wire = inner_wire
self.inner_strand = inner_strand
self.fatigue = fatigue
self.broken = False
def update_wire(self, stress_range, crack_width, lambda_fatigue,
lambda_fatigue_fretting_crack, lambda_fatigue_stress):
"""This methods updates a single wire.
The fatigue value of a wire is updated dependent on the stress range
and the crack width.
The total fatigue is a sum of the fatigue from stress and from fretting.
"""
if self.broken == True:
return False
else:
self.fatigue = (self.fatigue
- self.calc_fatigue_exp(
stress_range,
lambda_fatigue
)
- self.calc_fatigue_fretting(
crack_width,
stress_range,
lambda_fatigue_fretting_crack,
lambda_fatigue_stress
)
)
if self.fatigue < 0:
self.broken = True
return True
else:
return False
def calc_fatigue_fretting(self,
crack_width,
stress_range,
lambda_fatigue_fretting_crack,
lambda_fatigue_fretting_stress):
"""Method to calculate the fretting.
Fretting does not occur for inner wires and strands.
Fretting depends on the current stress range and crack width.
Returns
---------
fretting: float
The fretting for one cycle.
"""
if self.inner_strand or self.inner_wire:
return 0
else:
fretting_crack = np.random.exponential(
lambda_fatigue_fretting_crack * crack_width)
fretting_stress = np.random.exponential(
lambda_fatigue_fretting_stress * stress_range)
return fretting_crack + fretting_stress
def calc_fatigue_exp(self, stress_range, lambda_fatigue):
"""Method to calculate fatigue.
It depends on the current stress range.
Returns
---------
fatigue: float
The fatigue for one cycle.
"""
return np.random.exponential(lambda_fatigue * stress_range)
| [
"numpy.random.exponential",
"numpy.sqrt"
] | [((7463, 7515), 'numpy.random.exponential', 'np.random.exponential', (['(lambda_fatigue * stress_range)'], {}), '(lambda_fatigue * stress_range)\n', (7484, 7515), True, 'import numpy as np\n'), ((6916, 6982), 'numpy.random.exponential', 'np.random.exponential', (['(lambda_fatigue_fretting_crack * crack_width)'], {}), '(lambda_fatigue_fretting_crack * crack_width)\n', (6937, 6982), True, 'import numpy as np\n'), ((7030, 7098), 'numpy.random.exponential', 'np.random.exponential', (['(lambda_fatigue_fretting_stress * stress_range)'], {}), '(lambda_fatigue_fretting_stress * stress_range)\n', (7051, 7098), True, 'import numpy as np\n'), ((4004, 4017), 'numpy.sqrt', 'np.sqrt', (['A_pn'], {}), '(A_pn)\n', (4011, 4017), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 30 19:56:57 2022
@author: <NAME>
"""
import numpy as np
nrandG=np.random.default_rng()
#### for the purposes of calculations hbar=1
#### what is mu? can it be 0? it says tuning paramtere, so i assume constant.
#H_el = sum_k (hbar k )^2/(2 m ) aDag a
#H_ph = hbar omega_ph Sum( bdag b)
#H _el-ph = Sum_k,q( V(q)(bdag-b)adag a)
#V(q)=i hbar omaga_ph/q (4 pi alpha /V)^.5 (h/(2m pmega_ph)^.25)
#predefine a momentum complex time space
def inBetween(a,b,c,d):
'''
takes 2 ordered pairs (a,b),(c,d) and checks if any of the vertex are
contained in the other ordered pair.
Parameters
----------
a : Float
lower of the ab ordered pair.
b : Float
DESCRIPTION.
c : Float
DESCRIPTION.
d : Float
DESCRIPTION.
Returns
-------
bool
if true then an arc passes through another arc
if false an arc that is alone.
'''
w=c<a<d
x=c<b<d
y=a<c<b
z=a<d<b
if x!=w ^ y!=z:
return True
else:
return False
def r(pins,prem,tauL,tauR,tauOne,tauTwo,q,k,mu,alpha,order,m=1,omegaPH=1):
q=np.linalg.norm(q)
rad=q**2/2/m*(tauTwo-tauOne)
if 0<rad<1E-10:
rad=0
if -1E-10<rad<0:
rad=0
if 1E2<rad:
rad=1E2
if rad<-1E2:
rad=-1E2
r1=2*(2*np.pi)**.5*alpha**2*np.exp(rad)*prem*(tauR-tauL)*m**(3/2)
r2=((1+order)*pins*q**2*omegaPH)*((tauTwo-tauOne))**(3/2)
#for some reason tauTwo-tauOne is negative so square root is beeing rejected
#only happening in remove update
return r1/r2
def phononProp(tau,omegaPH=1):
return np.exp(-omegaPH*(tau))
def greensFunction(k,tau,mu,m=1):
'''
returns the greens function of the Hamiltonian
for this case function is
G(k,tau)=-Theta(tau)e^(-(eps_k-mu)tau)
Parameters
----------
k : TYPE
DESCRIPTION.
tau : TYPE
Complex Time parameter.
m : TYPE
mass of the phonon.
mu : TYPE
Energy shift used as a tuning parameter.
Returns
-------
int
DESCRIPTION.
'''
epsilonK=k**2/(2*m)
if tau<=0:
return 0
else:
return -np.exp(-tau*(epsilonK-mu))
#need to define Z_k ask about this, something with partition function
class diagMC:
def timeSpaceLin(tauMax,Ntau,j):
'''
Parameters
----------
tauMax : TYPE
DESCRIPTION.
Ntau : TYPE
DESCRIPTION.
j : TYPE
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
'''
deltaTau=tauMax/Ntau
return (j+1/2)*deltaTau
def momSpaceLin(kMax,Nk,j):
deltaK=kMax/Nk
return j*deltaK
def Z_k(k):
#residue of the particle k
return
#should also do for nonlinear spaces but paper says its does not matter
def normalise():
#not needed yet
#norm is sum over all momenta on the interval of the 0 order Green func
return
def pChange ():
#not needed yet
#only when expansion is of order 0
#select new momentum
#accept only if metroplis with min of [1,r] where r is G_0(p new,tau)/G_)(p old,tau)
return
def tauChange(p,tauMax,tau,mu,m=1):
'''
only acepts if order=0
selesct new tau using exponential distrobution
dispersion eta_p=epsilon_p-mu
mu is raondom number [0,1]
tau = -ln (mu)/abs(eta_p)
accept only if tau< tau max
Parameters
----------
p : Float
External momentum.
tauMax : Float
External time max.
m : Float, optional
Mass of the particle. The default is 1.
Returns
-------
Float
new time.
'''
#tau=nrandG.exponential(scale=beta)
#numpy random use 1/eta
u=nrandG.uniform(low=0,high=1)
epsilonP=p**2/(2*m)
eta_p=epsilonP-mu
tauPrime=-np.log(u)/abs(eta_p)
if tauPrime<tauMax:
return tauPrime,1
else:
return tau,0
'''def genWorldLine(kMax,tauMax):
k=1
#what is list of allowable k?
qList=[0,tauMax]
#needs to genrate k, tauMax
return k,qList'''
def insertProp(qList,tau,k,mu,alpha,order,pIns,pRem,m=1,omegaPH=1):
'''
qList needs to be list with tuples that are the end points of the
electron prop and momenta
takes a electron propogator finds nearest neighbors
select a time in between left and right times uniform
pick a point where there is only k
Parameters
----------
qList : list
List of tuples that contains q, tau1, tau2
tauMax : float
maximum allowed tau for the pruposes of bounding world line .
k : float
momentum of bare electron.
mu : float
energy shift used as a tuning parameter.
alpha : float
coupling constant.
order : int
order of the expansion.
pIns : float
probability weight for an insert.
pRem : float
probabliity weight for a remove.
m : float, optional
mass of electron usualy =1.
omegaPH : float, optional
frequency of the phonon. The default is 1.
Returns
-------
qList : list
new list of tuples depending on if acceptance is rejected or accepted.
bool : boolian
true if accepted, false if rejected.
'''
tauList=[0]
for i in qList:
q,a,b=i
tauList.extend([a,b])
#creates a list of all the verticies then sorts them
tauList.sort()
#picks a random vertex then adds the maximum vertex
index=nrandG.integers(len(tauList))
tauList.append(tau)
#defines tauL, tauR by the random vertex and the next greater one
tauLeft,tauRight=tauList[index],tauList[index+1]
#picks a point unformly between the two vertex picked previously
tauOne=nrandG.uniform(tauLeft,tauRight)
u=nrandG.uniform()
tauTwo=tauOne-np.log(u)/omegaPH
if tauTwo>tauRight:
return qList,0
mean=np.zeros(3)
variance=np.diag(m/(tauTwo-tauOne)*np.ones(3))
q=nrandG.multivariate_normal(mean,variance)
#does not use box muller but maybe not an issue?
dummy=q,tauOne,tauTwo
x=nrandG.uniform()
R=r(pIns,pRem,tauLeft,tauRight,tauOne,tauTwo,q,k,mu,alpha,order)
if x<R:
qList.append(dummy)
return qList,1
else:
return qList,0
def removeProp(qList,tau,k,mu,alpha,order,pIns,pRem,m=1,omegaPH=1):
'''
Parameters
----------
qList : list
List of tuples that contains q, tau1, tau2
k : float
momentum of bare electron.
mu : float
energy shift used as a tuning parameter.
alpha : float
coupling constant.
order : int
order of the expansion.
pIns : float
probability weight for an insert.
pRem : float
probabliity weight for a remove.
m : float, optional
mass of electron usualy =1.
omegaPH : float, optional
frequency of the phonon. The default is 1.
Returns
-------
qList : list
new list of tuples depending on if acceptance is rejected or accepted.
bool : boolian
true if accepted, false if rejected.
'''
#pick random prop
index=nrandG.integers(len(qList))
q,tauLeft,tauRight=qList[index]
#currently my list is unordered this would be faster if it was ordered
#this loop both checks if the picked ark is being crossed and unpacks
#the vertex to a list
endList=[0,tau]
for i in qList:
dummy,t1,t2=i
endList.extend([t1,t2])
if inBetween(tauLeft,tauRight,t1,t2)==True:
return qList,0
endList.sort()
#should pick the nearest endpoints to the ark needed to be removed
iL=endList.index(tauLeft)
iR=endList.index(tauRight)
tL=endList[iL-1]
tR=endList[iR+1]
print('re',tauLeft,tauRight)
R=r(pIns,pRem,tL,tR,tauLeft,tauRight,q,k,mu,alpha,order)**-1
if nrandG.uniform(1)<R:
qList.pop(index)
return qList,1
else:
return qList,0
#need to compute inverse r if passes then remove index value from q list
def swap(qList,k,tau,mu,order):
'''
Parameters
----------
qList : list
List of tuples that contains q, tau1, tau2
k : float
momentum of bare electron.
tauMax : float
maximum allowed tau for the pruposes of bounding world line.
mu : float
energy shift used as a tuning parameter.
order : int
order of the expansion.
Returns
-------
qList : TYPE
DESCRIPTION.
'''
#picks a random arc
index=nrandG.integers(order)
qOne,tauOne,tauA=qList[index]
#creates dummy comparison variable
#uses tau as the difference cannot be greater
dummy=tau
#iterable
i=0
#loops over all arcs
while i<order:
#skips the ark that was picked as this will give a differnece of zero
if i==index:
i+=1
continue
#takes the current list value and gives it dummy variables
qP,tauBP,tauP=qList[i]
#checks to see if the difference between the left picked arc and
#think this is broke
if abs(tauOne-tauP)<dummy:
index2=i
dummy=abs(tauOne-tauP)
qTwo,tauTwo,tauB=qP,tauP,tauBP
i+=1
q1=np.linalg.norm(qOne)
q2=np.linalg.norm(qTwo)
kP=k-q1-q2
#wY is returning 0 alot
wX=greensFunction(k, tauOne-tauTwo, mu)*phononProp(abs(tauOne-tauA))/q1**2\
*phononProp(abs(tauTwo-tauB))/q2**2
wY=-greensFunction(kP, tauTwo-tauOne, mu)*phononProp(abs(tauOne-tauB))/q2**2\
*phononProp(abs(tauTwo-tauA))/q1**2
if 0<wX<1E-8:
wX=1E-8
if -1E-8<wX<0:
wX=-1E-8
if nrandG.uniform(1)<wY/wX:
if index<index2:
qList.pop(index2)
qList.pop(index)
else:
qList.pop(index)
qList.pop(index2)
phonon1=qOne,tauTwo,tauA
qList.append(phonon1)
phonon2=qTwo,tauOne,tauB
qList.append(phonon2)
return qList,1
else:
return qList,0
def order(qList):
return len(qList)
#current issue this only looks under arcs
#need to create list of nodes
| [
"numpy.log",
"numpy.zeros",
"numpy.ones",
"numpy.random.default_rng",
"numpy.linalg.norm",
"numpy.exp"
] | [((136, 159), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (157, 159), True, 'import numpy as np\n'), ((1227, 1244), 'numpy.linalg.norm', 'np.linalg.norm', (['q'], {}), '(q)\n', (1241, 1244), True, 'import numpy as np\n'), ((1762, 1784), 'numpy.exp', 'np.exp', (['(-omegaPH * tau)'], {}), '(-omegaPH * tau)\n', (1768, 1784), True, 'import numpy as np\n'), ((6800, 6811), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (6808, 6811), True, 'import numpy as np\n'), ((10914, 10934), 'numpy.linalg.norm', 'np.linalg.norm', (['qOne'], {}), '(qOne)\n', (10928, 10934), True, 'import numpy as np\n'), ((10946, 10966), 'numpy.linalg.norm', 'np.linalg.norm', (['qTwo'], {}), '(qTwo)\n', (10960, 10966), True, 'import numpy as np\n'), ((2315, 2345), 'numpy.exp', 'np.exp', (['(-tau * (epsilonK - mu))'], {}), '(-tau * (epsilonK - mu))\n', (2321, 2345), True, 'import numpy as np\n'), ((4356, 4365), 'numpy.log', 'np.log', (['u'], {}), '(u)\n', (4362, 4365), True, 'import numpy as np\n'), ((6705, 6714), 'numpy.log', 'np.log', (['u'], {}), '(u)\n', (6711, 6714), True, 'import numpy as np\n'), ((6855, 6865), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (6862, 6865), True, 'import numpy as np\n'), ((1464, 1475), 'numpy.exp', 'np.exp', (['rad'], {}), '(rad)\n', (1470, 1475), True, 'import numpy as np\n')] |
# -*- coding: UTF-8 -*-
"""Learner for ProGANs (Progressively Growing GANs).
Typical usage example:
First configure your desired GAN on the command-line:
go to root directory...
$ python config.py progan
$ python data_config.py CelebA-HQ path/to/datasets/celeba_hq
Then write a custom script (or use train.py):
from gan_lab import get_current_configuration
from gan_lab.utils.data_utils import prepare_dataset, prepare_dataloader
from gan_lab.progan.learner import ProGANLearner
# get most recent configurations:
config = get_current_configuration( 'config' )
data_config = get_current_configuration( 'data_config' )
# get DataLoader(s)
train_ds, valid_ds = prepare_dataset( data_config )
train_dl, valid_dl, z_valid_dl = prepare_dataloader( config, data_config, train_ds, valid_ds )
# instantiate ProGANLearner and train:
learner = ProGANLearner( config )
learner.train( train_dl, valid_dl, z_valid_dl ) # train for config.num_main_iters iterations
learner.config.num_main_iters = 300000 # this is one example of changing your instantiated learner's configurations
learner.train( train_dl, valid_dl, z_valid_dl ) # train for another 300000 iterations
Note that the above custom script is just a more flexible alternative to running
train.py (you can, for example, run the above on a Jupyter Notebook). You can
always just run train.py.
"""
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
from .base import ProGAN
from .architectures import ProGenerator, ProDiscriminator
from _int import get_current_configuration, LearnerConfigCopy
from resnetgan.learner import GANLearner
from utils.latent_utils import gen_rand_latent_vars
from utils.backprop_utils import calc_gp, configure_adam_for_gan
from utils.custom_layers import Conv2dBias, LinearBias
import os
import sys
from abc import ABC
import copy
import logging
import warnings
from pathlib import Path
from functools import partial
from timeit import default_timer as timer
import numpy as np
from PIL import Image
from indexed import IndexedOrderedDict
import matplotlib.pyplot as plt
plt.rcParams.update( { 'figure.max_open_warning': 0 } )
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import transforms
from tqdm import tqdm
from tqdm.autonotebook import tqdm as tqdma
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
NONREDEFINABLE_ATTRS = ( 'model', 'init_res', 'res_samples', 'res_dataset', 'len_latent',
'num_classes', 'class_condition', 'use_auxiliary_classifier',
'model_upsample_type', 'model_downsample_type', 'align_corners',
'blur_type', 'nonlinearity', 'use_equalized_lr', 'normalize_z',
'use_pixelnorm', 'mbstd_group_size', 'use_ewma_gen', )
REDEFINABLE_FROM_LEARNER_ATTRS = ( 'batch_size', 'loss', 'gradient_penalty',
'optimizer', 'lr_sched', 'latent_distribution', )
COMPUTE_EWMA_VIA_HALFLIFE = True
EWMA_SMOOTHING_HALFLIFE = 10.
EWMA_SMOOTHING_BETA = .999
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
class ProGANLearner( GANLearner ):
"""GAN Learner specifically designed for ProGAN architectures.
Once instantiated, the ProGANLearner object's configuration can be changed, but only via
its self.config attribute (i.e. running 'python config.py [model]' post-instantiation
will not affect this learner's configuration).
"""
def __init__( self, config ):
super( ProGANLearner, self ).__init__( config )
# Training data's skip-connection resizing:
self.ds_sc_upsampler = None
self.ds_sc_downsampler = None
self.ds_sc_resizer = None
_model_selected = False
if self.model == 'ProGAN':
# If you want to change an attribute in an already-instantiated ProGANLearner's config or data_config,
# change self.config (below) instead of config and self.data_config (also below) instead of data_config:
self.config = LearnerConfigCopy( config,
self.__class__.__name__,
NONREDEFINABLE_ATTRS,
REDEFINABLE_FROM_LEARNER_ATTRS )
# pretrained models loaded later on for evaluation should not require data_config.py to have been run:
self._is_data_configed = False; self._stats_set = False
self._update_data_config( raise_exception = False )
self.latent_distribution = self.config.latent_distribution
# Instantiate Neural Networks:
global ProGAN, ProGenerator, ProDiscriminator
ProGAN = type( 'ProGAN', ( nn.Module, ABC, ), dict( ProGAN.__dict__ ) )
ProGAN.reset_state( )
ProGenerator = type( 'ProGenerator', ( ProGAN, ), dict( ProGenerator.__dict__ ) )
self.gen_model = ProGenerator(
final_res = self.config.res_samples,
len_latent = self.config.len_latent,
upsampler = self.gen_model_upsampler,
blur_type = self.config.blur_type,
nl = self.nl,
num_classes = self.num_classes_gen,
equalized_lr = self.config.use_equalized_lr,
normalize_z = self.config.normalize_z,
use_pixelnorm = self.config.use_pixelnorm
)
ProDiscriminator = type( 'ProDiscriminator', ( ProGAN, ), dict( ProDiscriminator.__dict__ ) )
self.disc_model = ProDiscriminator(
final_res = self.config.res_samples,
pooler = self.disc_model_downsampler,
blur_type = self.config.blur_type,
nl = self.nl,
num_classes = self.num_classes_disc,
equalized_lr = self.config.use_equalized_lr,
mbstd_group_size = self.config.mbstd_group_size
)
# If one wants to start at a higher resolution than 4:
assert self.config.init_res <= self.config.res_samples
if self.config.init_res > 4:
_init_res_log2 = int( np.log2( self.config.init_res ) )
if float( self.config.init_res ) != 2**_init_res_log2:
raise ValueError( 'Only resolutions that are powers of 2 are supported.' )
num_scale_inc = _init_res_log2 - 2
for _ in range( num_scale_inc ):
self.gen_model.increase_scale()
self.disc_model.increase_scale()
self.gen_model.fade_in_phase = False # this applies it to both networks simultaneously
# Generator and Discriminator state data must match:
assert self.gen_model.cls_base.__dict__ == \
self.disc_model.cls_base.__dict__
# Initialize EWMA Generator Model:
self.gen_model_lagged = None
if self.config.use_ewma_gen:
_orig_mode = self.gen_model.training
self.gen_model.train()
self.gen_model.to( 'cpu' )
with torch.no_grad():
self.gen_model_lagged = copy.deepcopy( self.gen_model ) # for memory efficiency in GPU
self.gen_model_lagged.to( self.config.metrics_dev )
self.gen_model_lagged.train( mode = _orig_mode )
self.gen_model.train( mode = _orig_mode )
self.gen_model.to( self.config.dev )
self.disc_model.to( self.config.dev )
self.batch_size = self.config.bs_dict[ self.gen_model.curr_res ]
if self.cond_gen:
self.labels_one_hot_disc = self._tensor( self.batch_size, self.num_classes )
self.labels_one_hot_gen = self._tensor( self.batch_size * self.config.gen_bs_mult, self.num_classes )
# Loss Function:
self._loss = config.loss.casefold()
self._set_loss( )
# Gradient Regularizer:
self.gp_func = partial(
calc_gp,
gp_type = self._gradient_penalty,
nn_disc = self.disc_model,
lda = self.config.lda,
gamma = self.config.gamma
)
# Optimizer:
self._set_optimizer( )
# Epsilon Loss to punish possible outliers from training distribution:
self.eps = False
if self.config.eps_drift > 0:
self.eps = True
_model_selected = True
# Training Set-specific Inits:
self.curr_phase_num = 0
# Validation Set-specific Inits:
self.lagged_params = None
# Whether in progressive growing stage:
self._progressively_grow = True
# Print configuration:
if _model_selected:
print( '-------- Initialized Model Configuration --------' )
print( self.config )
print( '-------------------------------------------------' )
# print( " If you would like to alter any of the above configurations,\n" + \
# " please do so via altering your instantiated ProGANLearner().config's attributes." )
print( '\n Ready to train!\n' )
def _apply_lagged_weights( self, m ):
# TODO: Include support for other learnable layers such as BatchNorm
_keys = m.state_dict().keys()
if isinstance( m, ( nn.Linear, nn.Conv2d, LinearBias, Conv2dBias, ) ):
if 'weight' in _keys:
m.weight = nn.Parameter( self.lagged_params.values()[ self._param_tensor_num ] )
self._param_tensor_num += 1
if 'bias' in _keys:
m.bias = nn.Parameter( self.lagged_params.values()[ self._param_tensor_num ] )
self._param_tensor_num += 1
@torch.no_grad()
def _update_gen_lagged( self ):
self.gen_model_lagged = copy.deepcopy( self.gen_model ) # for memory efficiency in GPU
self.gen_model_lagged.to( self.config.dev )
self.gen_model_lagged.train()
if self.beta:
self.gen_model_lagged.apply( self._apply_lagged_weights )
# print( f'{self._param_tensor_num} parameters in Generator.' )
self._param_tensor_num = 0
# TODO: Implement:
# 1.) 'ac loss' metric
# 2.) class determined for random generated sample,
# 3.) class determined for random real sample (only if `self.ac` is `True`)
@torch.no_grad()
def compute_metrics( self, metrics:str, metrics_type:str, z_valid_dl, valid_dl = None ):
"""Metric evaluation, run periodically during training or independently by the user."""
if not self._is_data_configed:
self._update_data_config( raise_exception = True )
self.data_config = get_current_configuration( 'data_config', raise_exception = True )
_ds_mean_unsq = self.ds_mean.unsqueeze( dim = 0 )
_ds_std_unsq = self.ds_std.unsqueeze( dim = 0 )
metrics_type = metrics_type.casefold()
if metrics_type not in ( 'generator', 'critic', 'discriminator', ):
raise Exception( 'Invalid metrics_type. Only "generator", "critic", or "discriminator" are accepted.' )
metrics = [ metric.casefold() for metric in metrics ]
self.disc_model.to( self.config.metrics_dev )
self.disc_model.eval()
self.gen_model.to( self.config.metrics_dev )
if self.config.use_ewma_gen:
if metrics_type == 'generator':
self.gen_model.train()
self._update_gen_lagged( )
self.gen_model_lagged.to( self.config.metrics_dev )
self.gen_model_lagged.eval()
self.gen_model.eval()
valid_dataiter = None
z_valid_dataiter = None
if valid_dl is not None:
valid_dataiter = iter( valid_dl )
if z_valid_dl is not None:
z_valid_dataiter = iter( z_valid_dl )
_len_z_valid_dl = len( z_valid_dl )
_len_z_valid_ds = len( z_valid_dl.dataset )
if not self.grid_inputs_constructed and 'image grid' in metrics:
assert ( self.config.img_grid_sz**2 <= _len_z_valid_ds )
self.rand_idxs = torch.multinomial( input = torch.ones( _len_z_valid_ds, dtype = torch.float32, device = 'cpu' ),
num_samples = self.config.img_grid_sz**2, replacement = False )
self._img_grid_constructed = False
metrics_tensors = { metric : torch.zeros( self.batch_size, _len_z_valid_dl, \
device = self.config.metrics_dev, dtype = torch.float32 ) for metric in metrics }
if z_valid_dl is not None:
pbarv = tqdma( total = _len_z_valid_ds, unit = ' imgs', dynamic_ncols = True )
for n in range( _len_z_valid_dl ):
# Uncomment the below if validation set is taking up too much memory
# zb = gen_rand_latent_vars( num_samples = self.batch_size, length = LEN_Z, distribution = 'normal', device = self.config.dev )
zbatch = next( z_valid_dataiter )
zb = ( zbatch[0] ).to( self.config.metrics_dev )
gen_labels = None
if len( zbatch ) > 1: gen_labels = ( zbatch[1] ).to( 'cpu' )
_len_zb = len( zb )
_xgenb = self.gen_model( zb )
if metrics_type == 'generator':
if 'fake realness' in metrics:
if self.ac:
metrics_tensors['fake realness'][:_len_zb, n], _ = self.disc_model( _xgenb )
else:
metrics_tensors['fake realness'][:_len_zb, n] = self.disc_model( _xgenb )
if 'generator loss' in metrics:
if 'fake realness' in metrics:
_ygenb = metrics_tensors['fake realness'][:_len_zb, n]
else:
if self.ac: _ygenb, _ = self.disc_model( _xgenb )
else: _ygenb = self.disc_model( _xgenb )
metrics_tensors['generator loss'][:_len_zb, n] = self.loss_func_gen( _ygenb )
if 'image grid' in metrics:
if self.valid_z is None:
self.valid_z = torch.FloatTensor( self.config.img_grid_sz**2, zb.shape[1] ).to( self.config.metrics_dev )
if self.valid_label is None and gen_labels is not None and self.config.img_grid_show_labels:
self.valid_label = torch.LongTensor( self.config.img_grid_sz**2 ).to( 'cpu' )
_idx = 0
if not self.grid_inputs_constructed:
for o in range( _len_zb ):
if ( n*self.batch_size + o ) in self.rand_idxs:
self.valid_z[ _idx ] = zb[ o ]
if gen_labels is not None and self.config.img_grid_show_labels:
self.valid_label[ _idx ] = gen_labels[ o ]
_idx += 1
if _idx == self.config.img_grid_sz**2:
self.grid_inputs_constructed = True
if self.grid_inputs_constructed and not self._img_grid_constructed:
if self.config.use_ewma_gen:
# print( 'TIME-AVERAGED GENERATOR OUTPUT:\n------------------------' )
save_ewma_img_grid_dir = \
self.config.save_samples_dir/self.model.casefold().replace( " ", "" )/self.data_config.dataset/'image_grid'/'time_averaged'
save_ewma_img_grid_dir.mkdir( parents = True, exist_ok = True )
fig, _ = self.make_image_grid( zs = self.valid_z, labels = self.valid_label, time_average = True,
save_path = str( save_ewma_img_grid_dir/( str( self.gen_metrics_num ) + '.png' ) ) )
# plt.show( )
# print( 'ORIGINAL SNAPSHOT GENERATOR OUTPUT:\n--------------------------' )
save_original_img_grid_dir = \
self.config.save_samples_dir/self.model.casefold().replace( " ", "" )/self.data_config.dataset/'image_grid'/'original'
save_original_img_grid_dir.mkdir( parents = True, exist_ok = True )
fig, _ = self.make_image_grid( zs = self.valid_z, labels = self.valid_label, time_average = False,
save_path = str( save_original_img_grid_dir/( str( self.gen_metrics_num ) + '.png' ) ) )
# plt.show( )
self._img_grid_constructed = True
self.gen_metrics_num += 1 if n == ( _len_z_valid_dl - 1 ) else 0
if metrics_type in ( 'critic', 'discriminator', ):
if 'fake realness' in metrics:
if self.ac:
metrics_tensors['fake realness'][:_len_zb, n], _ = self.disc_model( _xgenb )
else:
metrics_tensors['fake realness'][:_len_zb, n] = self.disc_model( _xgenb )
if valid_dl is not None:
xbatch = next( valid_dataiter )
# xb = ( xbatch[0] ).to( self.config.metrics_dev )
xb = xbatch[0]
# Fade in the real images the same way the generated images are being faded in:
if self.gen_model.fade_in_phase:
if self.config.bit_exact_resampling:
xb_low_res = xb.clone().mul( _ds_std_unsq ).add( _ds_mean_unsq )
for sample_idx in range( len( xb_low_res ) ):
xb_low_res[ sample_idx ] = self.ds_sc_resizer( xb_low_res[ sample_idx ] )
xb = torch.add( xb_low_res.mul( 1. - self.gen_model.alpha ), xb.mul( self.gen_model.alpha ) )
else:
xb = self.ds_sc_upsampler( self.ds_sc_downsampler( xb ) ) * ( 1. - self.gen_model.alpha ) + \
xb * ( self.gen_model.alpha )
xb = xb.to( self.config.metrics_dev )
if self.ac: real_labels = ( xbatch[1] ).to( 'cpu' )
if 'real realness' in metrics:
if self.ac:
metrics_tensors['real realness'][:_len_zb, n], _ = self.disc_model( xb )
else:
metrics_tensors['real realness'][:_len_zb, n] = self.disc_model( xb )
if 'discriminator loss' in metrics:
if 'fake realness' in metrics:
_ygenb = metrics_tensors['fake realness'][:_len_zb, n]
else:
if self.ac: _ygenb, _ = self.disc_model( _xgenb )
else: _ygenb = self.disc_model( _xgenb )
if 'real realness' in metrics:
_yb = metrics_tensors['real realness'][:_len_zb, n]
else:
if self.ac: _yb, _ = self.disc_model( xb )
else: _yb = self.disc_model( xb )
metrics_tensors['discriminator loss'][:_len_zb, n] = \
self.loss_func_disc(
_ygenb,
_yb
)
# TODO: Include gradient penalty despite the fact that this method is under @torch.no_grad().
# if self.gradient_penalty is not None:
# metrics_tensors['generator loss'][:len(zb), n] += self.gp_func( _xgenb, xb )
self.disc_metrics_num += 1 if n == ( _len_z_valid_dl - 1 ) else 0
pbarv.set_description( ' Img' )
pbarv.update( _len_zb )
pbarv.close()
metrics_vals = [
( vals_tensor.sum() / _len_z_valid_ds ).item() \
for vals_tensor in metrics_tensors.values()
]
_max_len = '%-' + str( max( [ len( s ) for s in metrics ] ) + 3 ) + 's'
metrics_vals = [ ' ' + ( _max_len % ( metric + ':' ) ) + '%.4g' % metrics_vals[n] + '\n' for n, metric in enumerate( metrics ) if metric != 'image grid' ]
self.gen_model.to( self.config.dev )
self.disc_model.to( self.config.dev )
self.gen_model.train()
self.disc_model.train()
return metrics_vals
def train( self, train_dl, valid_dl = None, z_valid_dl = None,
num_main_iters = None, num_gen_iters = None, num_disc_iters = None ):
"""Efficient & fast implementation of ProGAN training (at the expense of some messy/repetitive code).
Arguments num_main_iters, num_gen_iters, and num_disc_iters are taken from self.config if not
explicitly specified when running this method (this is usually the case). Typically, one just specifies
train_dl (and maybe valid_dl and z_valid_dl as well if one wants to periodically evaluate metrics).
"""
# If custom number of iterations are not input, use self.config's number of iterations as the default
if num_main_iters is None:
num_main_iters = self.config.num_main_iters
if num_gen_iters is None:
num_gen_iters = self.config.num_gen_iters
if num_disc_iters is None:
num_disc_iters = self.config.num_disc_iters
self.num_main_iters = num_main_iters
self.dataset_sz = len( train_dl.dataset )
self._update_data_config( raise_exception = True )
_ds_mean_unsq = self.ds_mean.unsqueeze( dim = 0 )
_ds_std_unsq = self.ds_std.unsqueeze( dim = 0 )
self.gen_model.to( self.config.dev )
self.gen_model.train()
self.disc_model.to( self.config.dev )
self.disc_model.train()
if self.config.use_ewma_gen:
self.gen_model_lagged.to( self.config.metrics_dev )
self.gen_model_lagged.train()
# Initialize number of images before transition to next fade-in/stabilization phase:
if self.not_trained_yet or self.pretrained_model:
if ( self.config.nimg_transition % self.batch_size ) != 0:
self.nimg_transition = self.batch_size * ( int( self.config.nimg_transition / self.batch_size ) + 1 )
else:
self.nimg_transition = self.config.nimg_transition
if self.not_trained_yet:
self.nimg_transition_lst = [ self.nimg_transition ]
# Initialize validation EWMA smoothing of generator weights & biases:
if self.not_trained_yet or self.pretrained_model:
self.beta = None
if self.config.use_ewma_gen:
if COMPUTE_EWMA_VIA_HALFLIFE:
self.beta = self.get_smoothing_ewma_beta( half_life = EWMA_SMOOTHING_HALFLIFE )
else:
self.beta = EWMA_SMOOTHING_BETA
# TODO: Can this be done more memory-efficiently (w/o sacrificing speed)?
with torch.no_grad():
# dict is not that much slower than list (according to `timeit` tests)
self.lagged_params = IndexedOrderedDict( self.gen_model.named_parameters( ) )
# Set scheduler on every run:
if self.sched_bool:
if not self.pretrained_model:
self.sched_stop_step = 0
self._set_scheduler( )
elif self.pretrained_model:
self.scheduler_gen = self._scheduler_gen_state_dict # = None
self.scheduler_disc = self._scheduler_disc_state_dict # = None
# Allows one to start from where they left off:
if self.not_trained_yet:
print( 'STARTING FROM ITERATION 0:\n' )
self.train_dataiter = iter( train_dl )
else:
print( 'CONTINUING FROM WHERE YOU LEFT OFF:\n' )
if self.pretrained_model:
train_dl.batch_sampler.batch_size = self.batch_size
train_dl.dataset.transforms.transform.transforms = \
self.increase_real_data_res( transforms_lst = train_dl.dataset.transforms.transform.transforms )
self.train_dataiter = iter( train_dl )
if valid_dl is not None:
valid_dl.batch_sampler.batch_size = self.batch_size
valid_dl.dataset.transforms.transform.transforms = \
self.increase_real_data_res( transforms_lst = valid_dl.dataset.transforms.transform.transforms )
if z_valid_dl is not None:
z_valid_dl.batch_sampler.batch_size = self.batch_size
if self.pretrained_model and self.gen_model.fade_in_phase:
if self.config.bit_exact_resampling:
for transform in self.train_dataiter._dataset.transforms.transform.transforms:
if isinstance( transform, transforms.Normalize ):
_nrmlz_transform = transform
self.ds_sc_resizer = \
self.get_real_data_skip_connection_transforms( self.data_config.dataset_downsample_type, _nrmlz_transform )
else:
# matches the model's skip-connection upsampler
self.ds_sc_upsampler = lambda xb: F.interpolate( xb, scale_factor = 2, mode = 'nearest' )
# matches the dataset's downsampler
if self.data_config.dataset_downsample_type in ( Image.BOX, Image.BILINEAR, ):
self.ds_sc_downsampler = lambda xb: F.avg_pool2d( xb, kernel_size = 2, stride = 2 )
elif self.data_config.dataset_downsample_type == Image.NEAREST:
self.ds_sc_downsampler = lambda xb: F.interpolate( xb, scale_factor = .5, mode = 'nearest' )
self.delta_alpha = self.batch_size / ( ( self.nimg_transition / num_disc_iters ) - self.batch_size )
if self.tot_num_epochs is None:
_tmpd1 = self.nimg_transition // self.config.bs_dict[ self.config.init_res ]
_tmpd20 = { k:v for k,v in self.config.bs_dict.items() if self.config.init_res < k < self.config.res_samples }
_tmpd20 = np.unique( np.array( list( _tmpd20.values() ) ), return_counts = True )
_tmpd2 = ( self.nimg_transition // _tmpd20[0] ) * 2 * _tmpd20[1]
_tmpd3 = num_main_iters - ( _tmpd1 + _tmpd2.sum() )
self.tot_num_epochs = _tmpd1*self.config.bs_dict[ self.config.init_res ] + ( _tmpd2*_tmpd20[0] ).sum() + _tmpd3*self.config.bs_dict[ self.config.res_samples ]
self.tot_num_epochs *= num_disc_iters
self.tot_num_epochs //= self.dataset_sz // self.batch_size * self.batch_size
self.tot_num_epochs += 1
pbar = tqdm( total = self.dataset_sz // self.batch_size * self.batch_size, unit = ' imgs' )
# ------------------------------------------------------------------------ #
try:
for itr in range( num_main_iters ):
if self.sched_bool:
with warnings.catch_warnings():
warnings.simplefilter( 'ignore' )
tqdm_lr = '%9s' % ( '%g' % self.scheduler_disc.get_lr()[0] )
tqdm_lr += '%9s' % ( '%g' % self.scheduler_gen.get_lr()[0] )
# tqdm_desc += f'Generator LR: { " ".join( [ str(s) for s in self.scheduler_gen.get_lr() ] ) } | ' + \
# f'Discriminator LR: { " ".join( [ str(s) for s in self.scheduler_disc.get_lr() ] ) }'
# print( f'Generator LR: {*self.scheduler_gen.get_lr()} |', \
# f'Discriminator LR: {*self.scheduler_disc.get_lr()}'
# )
else:
tqdm_lr = '%9s' % ( '%g' % self.config.lr_base )
tqdm_lr += '%9s' % ( '%g' % self.config.lr_base )
# these are set to `False` for the Generator because you don't need
# the Discriminator's parameters' gradients when chain-ruling back to the generator
for p in self.disc_model.parameters(): p.requires_grad_( True )
# Determine whether it is time to switch to next fade-in/stabilization phase:
if self.gen_model.curr_res < self.gen_model.final_res:
if self.curr_img_num == sum( self.nimg_transition_lst ):
self.curr_phase_num += 1
if self.curr_phase_num % 2 == 1:
_prev_res = self.gen_model.curr_res
self.gen_model.zero_grad()
self.disc_model.zero_grad()
self.gen_model.increase_scale()
self.disc_model.increase_scale()
# Generator and Discriminator state data must match:
assert self.gen_model.cls_base.__dict__ == \
self.disc_model.cls_base.__dict__
self.gen_model.to( self.config.dev )
self.disc_model.to( self.config.dev )
# ---------------- #
# Update Optimizer:
self._set_optimizer( )
# Update Scheduler:
if self.sched_bool:
self.sched_stop_step += self.scheduler_gen._step_count
self._set_scheduler( )
# ---------------- #
print( f'\n\n\nRESOLUTION INCREASED FROM {_prev_res}x{_prev_res} to ' + \
f'{int( self.gen_model.curr_res )}x{int( self.gen_model.curr_res )}\n' )
print( f'FADING IN {int( self.gen_model.curr_res )}x' + \
f'{int( self.gen_model.curr_res )} RESOLUTION...\n' )
print( ('\n' + '%9s' * 8 ) % ( 'Epoch', 'Res', 'Phase', 'D LR', 'G LR', 'D Loss', 'G Loss', 'Itr' ) )
# ---------------- #
# Update resolution-specific batch size:
self.batch_size = self.config.bs_dict[ self.gen_model.curr_res ]
# ---------------- #
self._set_loss( )
# ---------------- #
train_dl.batch_sampler.batch_size = self.batch_size
# self.train_dataiter._index_sampler.batch_size = self.batch_size
train_dl.dataset.transforms.transform.transforms = \
self.increase_real_data_res( transforms_lst = train_dl.dataset.transforms.transform.transforms )
if isinstance( self.train_dataiter, torch.utils.data.dataloader._MultiProcessingDataLoaderIter ):
self.train_dataiter = iter( train_dl ) # TODO: this makes you re-use your data before epoch end.
if self.config.bit_exact_resampling:
for transform in self.train_dataiter._dataset.transforms.transform.transforms:
if isinstance( transform, transforms.Normalize ):
_nrmlz_transform = transform
self.ds_sc_resizer = \
self.get_real_data_skip_connection_transforms( self.data_config.dataset_downsample_type, _nrmlz_transform )
else:
# matches the model's skip-connection upsampler
self.ds_sc_upsampler = lambda xb: F.interpolate( xb, scale_factor = 2, mode = 'nearest' )
# matches the dataset's downsampler
if self.data_config.dataset_downsample_type in ( Image.BOX, Image.BILINEAR, ):
self.ds_sc_downsampler = lambda xb: F.avg_pool2d( xb, kernel_size = 2, stride = 2 )
elif self.data_config.dataset_downsample_type == Image.NEAREST:
self.ds_sc_downsampler = lambda xb: F.interpolate( xb, scale_factor = .5, mode = 'nearest' )
# ---------------- #
if valid_dl is not None:
valid_dl.batch_sampler.batch_size = self.batch_size
valid_dl.dataset.transforms.transform.transforms = \
self.increase_real_data_res( transforms_lst = valid_dl.dataset.transforms.transform.transforms )
# ---------------- #
if z_valid_dl is not None:
z_valid_dl.batch_sampler.batch_size = self.batch_size
# ---------------- #
# Number of images before switching to next fade-in/stabilization phase:
if ( self.config.nimg_transition % self.batch_size ) != 0:
self.nimg_transition = self.batch_size * ( int( self.config.nimg_transition / self.batch_size ) + 1 )
else:
self.nimg_transition = self.config.nimg_transition
# ---------------- #
self.delta_alpha = self.batch_size / ( ( self.nimg_transition / num_disc_iters ) - self.batch_size )
self.gen_model.alpha = 0 # this applies to both networks simultaneously
# ---------------- #
if self.config.use_ewma_gen:
if COMPUTE_EWMA_VIA_HALFLIFE:
self.beta = self.get_smoothing_ewma_beta( half_life = EWMA_SMOOTHING_HALFLIFE )
# Update `self.lagged_params`:
with torch.no_grad():
self.lagged_params[ 'prev_torgb.conv2d.weight' ] = self.lagged_params.pop( 'torgb.conv2d.weight' )
self.lagged_params[ 'prev_torgb.conv2d.bias' ] = self.lagged_params.pop( 'torgb.conv2d.bias' )
for name, param in IndexedOrderedDict( self.gen_model.named_parameters() ).items():
if name not in self.lagged_params:
self.lagged_params[ name ] = param
# Order the names to match that of `self.gen_model`'s order:
for name, param in IndexedOrderedDict( self.gen_model.named_parameters() ).items():
if 'fc_mapping_model' in name:
self.lagged_params.move_to_end( name, last = True )
for name, param in IndexedOrderedDict( self.gen_model.named_parameters() ).items():
if name == 'torgb.conv2d.weight':
self.lagged_params.move_to_end( name, last = True )
for name, param in IndexedOrderedDict( self.gen_model.named_parameters() ).items():
if name == 'torgb.conv2d.bias':
self.lagged_params.move_to_end( name, last = True )
for name, param in IndexedOrderedDict( self.gen_model.named_parameters() ).items():
if name == 'prev_torgb.conv2d.weight':
self.lagged_params.move_to_end( name, last = True )
for name, param in IndexedOrderedDict( self.gen_model.named_parameters() ).items():
if name == 'prev_torgb.conv2d.bias':
self.lagged_params.move_to_end( name, last = True )
else:
# Update Optimizer:
self._set_optimizer( )
# Update Scheduler:
if self.sched_bool:
self.sched_stop_step += self.scheduler_gen._step_count
self._set_scheduler( )
# ---------------- #
print( '\nSTABILIZING...\n' )
self.nimg_transition_lst.append( self.nimg_transition )
else:
if not itr:
if self.gen_model.fade_in_phase:
print( f'\nFADING IN {int( self.gen_model.curr_res )}x' + \
f'{int( self.gen_model.curr_res )} RESOLUTION...\n' )
else:
print( '\nSTABILIZING...\n' )
if not itr and not self.progressively_grow:
print( '\nSTABILIZING (FINAL)...\n' )
# Final phase:
if self.curr_img_num == sum( self.nimg_transition_lst ):
# Update Optimizer:
self._set_optimizer( )
# Update Scheduler:
if self.sched_bool:
self.sched_stop_step += self.scheduler_gen._step_count
self._set_scheduler( )
# ---------------- #
print( '\nSTABILIZING (FINAL)...\n' )
self.curr_phase_num += 1
self.nimg_transition_lst.append( np.inf )
self._progressively_grow = False
#------------------------- TRAIN DISCRIMINATOR ----------------------------
# loss_train_disc = None
for disc_iter in range( num_disc_iters ):
self.disc_model.zero_grad()
# Sample latent vector z:
if self.cond_gen:
# Class-conditioning in the latent space:
# TODO: Implement embedding-style conditioning from "Which Training Methods for
# GANs do actually Converge" & discriminator conditioning.
gen_labels = torch.randint( 0, self.num_classes, ( self.batch_size, 1, ), dtype = torch.int64, device = self.config.dev )
zb = gen_rand_latent_vars( num_samples = self.batch_size, length = self.config.len_latent,
distribution = self.latent_distribution, device = self.config.dev )
self.labels_one_hot_disc.zero_()
self.labels_one_hot_disc.scatter_( 1, gen_labels, 1 )
if self.ac: gen_labels.squeeze_()
zb = torch.cat( ( zb, self.labels_one_hot_disc, ), dim = 1 )
else:
zb = gen_rand_latent_vars( num_samples = self.batch_size, length = self.config.len_latent,
distribution = self.latent_distribution, device = self.config.dev )
with torch.no_grad(): zbv = zb # makes sure to totally freeze the generator when training discriminator
_xgenb = self.gen_model( zbv ).detach()
# Sample real data x:
batch = next( self.train_dataiter, None )
if batch is None:
self.curr_epoch_num += 1
pbar.close()
print( f'\n\nEPOCH # {self.curr_epoch_num - 1} COMPLETE. BEGIN EPOCH #', \
f'{self.curr_epoch_num}\n' )
print( ('\n' + '%9s' * 8 ) % ( 'Epoch', 'Res', 'Phase', 'D LR', 'G LR', 'D Loss', 'G Loss', 'Itr' ) )
pbar = tqdm( total = self.dataset_sz // self.batch_size * self.batch_size, unit = ' imgs' )
self.train_dataiter = iter( train_dl )
batch = next( self.train_dataiter )
# xb = ( batch[0] ).to( self.config.dev )
xb = batch[0]
# Fade in the real images the same way the generated images are being faded in:
if self.gen_model.fade_in_phase:
with torch.no_grad():
if self.config.bit_exact_resampling:
xb_low_res = xb.clone().mul( _ds_std_unsq ).add( _ds_mean_unsq )
for sample_idx in range( len( xb_low_res ) ):
xb_low_res[ sample_idx ] = self.ds_sc_resizer( xb_low_res[ sample_idx ] )
xb = torch.add( xb_low_res.mul( 1. - self.gen_model.alpha ), xb.mul( self.gen_model.alpha ) )
else:
xb = self.ds_sc_upsampler( self.ds_sc_downsampler( xb ) ) * ( 1. - self.gen_model.alpha ) + \
xb * ( self.gen_model.alpha )
xb = xb.to( self.config.dev )
if self.ac: real_labels = ( batch[1] ).to( self.config.dev )
# Forward prop:
if self.ac:
discriminative_gen, gen_preds = self.disc_model( _xgenb )
discriminative_real, real_preds = self.disc_model( xb )
else:
discriminative_gen = self.disc_model( _xgenb )
discriminative_real = self.disc_model( xb )
if self.loss == 'wgan':
loss_train_disc = ( discriminative_gen - discriminative_real ).mean()
elif self.loss in ( 'nonsaturating', 'minimax' ):
loss_train_disc = \
F.binary_cross_entropy_with_logits( input = discriminative_gen,
target = self._dummy_target_gen,
reduction = 'mean' ) + \
F.binary_cross_entropy_with_logits( input = discriminative_real,
target = self._dummy_target_real,
reduction = 'mean' )
if self.ac:
loss_train_disc += \
( self.loss_func_aux( gen_preds, gen_labels ) + \
self.loss_func_aux( real_preds, real_labels )
).mean() * self.config.ac_disc_scale
if self.gradient_penalty is not None:
loss_train_disc += self.calc_gp( _xgenb, xb )
if self.eps:
loss_train_disc += ( discriminative_real**2 ).mean() * self.config.eps_drift
# Backprop:
loss_train_disc.backward() # compute the gradients
self.opt_disc.step() # update the parameters you specified to the optimizer with backprop
# self.opt_disc.zero_grad()
# Compute metrics for discriminator (validation metrics should be for entire validation set):
metrics_vals = []
_valid_title = []
if z_valid_dl is not None and valid_dl is not None and self.config.disc_metrics:
if ( disc_iter == num_disc_iters - 1 ) and ( ( itr + 1 ) % self.config.num_iters_valid == 0 or itr == 0 ):
if itr != 0:
end = timer(); print( f'\n\nTime since last Validation Set: {end - start} seconds.' )
metrics_vals = self.compute_metrics(
metrics = self.config.disc_metrics, metrics_type = 'Discriminator',
z_valid_dl = z_valid_dl, valid_dl = valid_dl
)
_valid_title = [ '|\n', 'Discriminator Validation Metrics:\n' ]
print( *_valid_title, *metrics_vals )
tqdm_loss_disc = '%9.4g' % loss_train_disc.item()
if itr:
tqdm_desc = '%9s' % f'{self.curr_epoch_num}/{self.tot_num_epochs}'
tqdm_desc += '%9s' % f'{self.gen_model.curr_res}X{self.gen_model.curr_res}'
tqdm_desc += '%9s' % ( 'Fade In' if self.gen_model.fade_in_phase else 'Stab.' )
tqdm_desc += tqdm_lr
tqdm_desc += tqdm_loss_disc + tqdm_loss_gen
tqdm_desc += '%9s' % itr
tqdm_desc += ' Img'
pbar.set_description( tqdm_desc )
pbar.update( xb.shape[0] )
self.curr_dataset_batch_num += 1
self.curr_img_num += self.batch_size
#------------------------ TRAIN GENERATOR --------------------------
# these are set to `False` for the Generator because you don't need
# the Discriminator's parameters' gradients when chain-ruling back to the generator
for p in self.disc_model.parameters(): p.requires_grad_( False )
# loss_train_gen = None
for gen_iter in range( num_gen_iters ):
self.gen_model.zero_grad()
# Sample latent vector z:
if self.cond_gen:
# Class-conditioning in the latent space:
# TODO: Implement embedding-style conditioning from "Which Training Methods for
# GANs do actually Converge" & discriminator conditioning.
gen_labels = torch.randint( 0, self.num_classes, ( self.batch_size * self.config.gen_bs_mult, 1, ), dtype = torch.int64, device = self.config.dev )
zb = gen_rand_latent_vars( num_samples = self.batch_size * self.config.gen_bs_mult, length = self.config.len_latent,
distribution = self.latent_distribution, device = self.config.dev )
self.labels_one_hot_gen.zero_()
self.labels_one_hot_gen.scatter_( 1, gen_labels, 1 )
if self.ac: gen_labels.squeeze_()
zb = torch.cat( ( zb, self.labels_one_hot_gen, ), dim = 1 )
else:
zb = gen_rand_latent_vars( num_samples = self.batch_size * self.config.gen_bs_mult, length = self.config.len_latent,
distribution = self.latent_distribution, device = self.config.dev )
zb.requires_grad_( True )
# Forward prop:
if self.ac:
loss_train_gen, gen_preds = self.disc_model( self.gen_model( zb ) )
else:
loss_train_gen = self.disc_model( self.gen_model( zb ) )
if self.loss == 'wgan':
loss_train_gen = -loss_train_gen.mean()
elif self.loss == 'nonsaturating':
loss_train_gen = F.binary_cross_entropy_with_logits(
input = loss_train_gen,
target = self._dummy_target_real,
reduction = 'mean'
)
elif self.loss == 'minimax':
loss_train_gen = -F.binary_cross_entropy_with_logits(
input = loss_train_gen,
target = self._dummy_target_gen,
reduction = 'mean'
)
if self.ac:
loss_train_gen += self.loss_func_aux( gen_preds, gen_labels ).mean() * self.config.ac_gen_scale
# Backprop:
loss_train_gen.backward() # compute the gradients
# loss_train_gen = -loss_train_gen
self.opt_gen.step() # update the parameters you specified to the optimizer with backprop
# self.opt_gen.zero_grad()
# Calculate validation EWMA smoothing of generator weights & biases:
# TODO: Should this not be applied to the biases (i.e. just the weights)?
if self.config.use_ewma_gen:
with torch.no_grad():
for name, param in self.gen_model.named_parameters():
if self.beta:
self.lagged_params[ name ] = param * ( 1. - self.beta ) + \
self.lagged_params[ name ] * ( self.beta )
else:
self.lagged_params[ name ] = param
# Compute metrics for generator (validation metrics should be for entire validation set):
metrics_vals = []
_valid_title = []
if z_valid_dl is not None and self.config.gen_metrics:
if ( gen_iter == num_gen_iters - 1 ) and ( ( itr + 1 ) % self.config.num_iters_valid == 0 or itr == 0 ):
metrics_vals = self.compute_metrics(
metrics = self.config.gen_metrics, metrics_type = 'Generator',
z_valid_dl = z_valid_dl, valid_dl = None
)
_valid_title = [ '|\n', 'Generator Validation Metrics:\n' ]
print( *_valid_title, *metrics_vals )
if itr:
print( ('\n' + '%9s' * 8 ) % ( 'Epoch', 'Res', 'Phase', 'D LR', 'G LR', 'D Loss', 'G Loss', 'Itr' ) )
start = timer()
tqdm_loss_gen = '%9.4g' % loss_train_gen.item()
if itr:
tqdm_desc = '%9s' % f'{self.curr_epoch_num}/{self.tot_num_epochs}'
tqdm_desc += '%9s' % f'{self.gen_model.curr_res}X{self.gen_model.curr_res}'
tqdm_desc += '%9s' % ( 'Fade In' if self.gen_model.fade_in_phase else 'Stab.' )
tqdm_desc += tqdm_lr
tqdm_desc += tqdm_loss_disc + tqdm_loss_gen
tqdm_desc += '%9s' % itr
tqdm_desc += ' Img'
pbar.set_description( tqdm_desc )
if not itr:
print( ('\n' + '%9s' * 8 ) % ( 'Epoch', 'Res', 'Phase', 'D LR', 'G LR', 'D Loss', 'G Loss', 'Itr' ) )
# pbar.set_postfix( tqdm_desc )
# tqdm.write( tqdm_desc )
# Update fading-in paramater alpha:
if self.gen_model.fade_in_phase:
self.gen_model.alpha += self.delta_alpha # this affects both networks
if self.sched_bool:
self.scheduler_gen.step()
self.scheduler_disc.step()
if self.not_trained_yet:
self.not_trained_yet = False
# Save model every self.config.num_iters_save_model iterations:
if ( itr + 1 ) % self.config.num_iters_save_model == 0:
self._set_optimizer( ) # for niche case when training ends right when alpha becomes 1
# update time-averaged generator
with torch.no_grad():
self.gen_model.to( 'cpu' )
if self.config.use_ewma_gen:
self._update_gen_lagged( )
self.gen_model_lagged.to( 'cpu' )
self.gen_model_lagged.eval()
self.gen_model.eval()
self.disc_model.to( 'cpu' )
self.disc_model.eval()
self.save_model( self.config.save_model_dir/( self.model.casefold().replace( " ", "" ) + '_model.tar' ) )
self.gen_model.to( self.config.dev )
self.gen_model.train()
self.disc_model.to( self.config.dev )
self.disc_model.train()
if self.config.use_ewma_gen:
self.gen_model_lagged.to( self.config.metrics_dev )
self.gen_model_lagged.train()
except KeyboardInterrupt:
pbar.close()
self._set_optimizer( ) # for niche case when training ends right when alpha becomes 1
# update time-averaged generator
with torch.no_grad():
self.gen_model.to( 'cpu' )
if self.config.use_ewma_gen:
self._update_gen_lagged( )
self.gen_model_lagged.to( 'cpu' )
self.gen_model_lagged.eval()
self.gen_model.eval()
self.disc_model.to( 'cpu' )
self.disc_model.eval()
self.save_model( self.config.save_model_dir/( self.model.casefold().replace( " ", "" ) + '_model.tar' ) )
print( f'\nTraining interrupted. Saved latest checkpoint into "{self.config.save_model_dir}/".\n' )
try:
sys.exit( 0 )
except SystemExit:
os._exit( 0 )
pbar.close()
self._set_optimizer( ) # for niche case when training ends right when alpha becomes 1
# update time-averaged generator
with torch.no_grad():
self.gen_model.to( 'cpu' )
if self.config.use_ewma_gen:
self._update_gen_lagged( )
self.gen_model_lagged.to( 'cpu' )
self.gen_model_lagged.eval()
self.gen_model.eval()
self.disc_model.to( 'cpu' )
self.disc_model.eval()
# .......................................................................... #
def _set_scheduler( self ):
if self._lr_sched == 'resolution dependent':
self.scheduler_fn = lambda _: self.config.lr_fctr_dict[ self.gen_model.curr_res ]
# self.tot_num_epochs = ( self.batch_size * num_main_iters * num_disc_iters * 1. ) / self.dataset_sz
elif self._lr_sched == 'linear decay':
# self.scheduler_fn = lambda epoch: 1. - ( epoch + self.sched_stop_step ) * ( 1. / ( self.tot_num_epochs//1 ) )
self.scheduler_fn = lambda main_iter: 1. - ( main_iter + self.sched_stop_step ) * ( 1. / self.num_main_iters )
elif self._lr_sched == 'custom':
self.scheduler_fn = eval( self.config.lr_sched_custom )
# TODO: add more types of LR scheduling
else:
raise ValueError( "config does not support this LR scheduler.\n" + \
"Currently supported LR Schedulers are: [ 'resolution dependent', 'linear decay', 'custom' ]" )
self.scheduler_gen = \
torch.optim.lr_scheduler.LambdaLR( self.opt_gen, self.scheduler_fn, last_epoch = -1 )
self.scheduler_disc = \
torch.optim.lr_scheduler.LambdaLR( self.opt_disc, self.scheduler_fn, last_epoch = -1 )
with warnings.catch_warnings():
warnings.simplefilter( 'ignore', UserWarning )
self._scheduler_gen_state_dict = self.scheduler_gen.state_dict()
self._scheduler_disc_state_dict = self.scheduler_disc.state_dict()
if self.pretrained_model and not self._sched_state_dict_set:
self.scheduler_gen.load_state_dict( self._scheduler_gen_state_dict )
self.scheduler_disc.load_state_dict( self._scheduler_disc_state_dict )
self._scheduler_gen_state_dict = self.scheduler_gen.state_dict()
self._scheduler_disc_state_dict = self.scheduler_disc.state_dict()
self._sched_state_dict_set = True
def _set_optimizer( self ):
if self._optimizer == 'adam':
adam_gan = configure_adam_for_gan(
lr_base = self.config.lr_base,
betas = ( self.config.beta1, self.config.beta2 ),
eps = self.config.eps,
wd = self.config.wd
)
if self.gen_model.fade_in_phase:
self.opt_gen = adam_gan( params = self.gen_model.parameters() )
self.opt_disc = adam_gan( params = self.disc_model.parameters() )
else:
# don't need `prev_torgb` and `prev_fromrgb` during stabilization
self.opt_gen = adam_gan(
params = self.gen_model.most_parameters(
excluded_params = [ 'prev_torgb.conv2d.weight', 'prev_torgb.conv2d.bias' ]
)
)
self.opt_disc = adam_gan(
params = self.disc_model.most_parameters(
excluded_params = [ 'prev_fromrgb.0.conv2d.weight', 'prev_fromrgb.0.conv2d.bias' ]
)
)
elif self._optimizer == 'rmsprop':
raise NotImplementedError( 'RMSprop optimizer not yet implemented.' ) # TODO:
elif self._optimizer == 'momentum':
raise NotImplementedError( 'Momentum optimizer not yet implemented.' ) # TODO:
elif self._optimizer == 'sgd':
raise NotImplementedError( 'SGD optimizer not yet implemented.' ) # TODO:
else:
raise ValueError( "config does not support this optimizer.\n" + \
"Supported Optimizers are: [ 'adam', 'rmsprop', 'momentum', 'sgd' ]" )
# .......................................................................... #
def increase_real_data_res( self, transforms_lst:list ):
if not self._is_data_configed:
self._update_data_config( raise_exception = True )
_num_rsz = 0
for n, transform in enumerate( transforms_lst ):
if isinstance( transform, transforms.Resize ):
_num_rsz += 1
if _num_rsz < 2:
transforms_lst[n] = transforms.Resize( size = ( self.gen_model.curr_res, self.gen_model.curr_res, ),
interpolation = self.data_config.dataset_downsample_type )
else:
raise RuntimeWarning( 'Warning: More than 1 `Resize` transform found; only resized the first `Resize` in transforms list.' )
return transforms_lst
def get_real_data_skip_connection_transforms(self, ds_downsampler_type, nrmlz_transform ):
return transforms.Compose( [ transforms.ToPILImage(),
transforms.Resize( size = ( self.disc_model.prev_res, self.disc_model.prev_res, ),
interpolation = ds_downsampler_type ),
transforms.Resize( size = ( self.disc_model.curr_res, self.disc_model.curr_res, ),
interpolation = Image.NEAREST ),
transforms.ToTensor(),
nrmlz_transform
] )
def get_smoothing_ewma_beta( self, half_life ):
assert isinstance( half_life, float )
return .5 ** ( ( self.batch_size * self.config.gen_bs_mult ) / ( half_life * 1000. ) ) if half_life > 0. else 0.
# .......................................................................... #
# def reset_progan_state( self ):
# self.gen_model.cls_base.reset_state( ) # this applies to both networks simultaneously
@property
def progressively_grow( self ):
return self._progressively_grow
@progressively_grow.setter
def progressively_grow( self, new_progressively_grow ):
assert isinstance( new_progressively_grow, bool )
self._progressively_grow = new_progressively_grow
# TODO:
raise NotImplementedError( 'Setter self.progressively_grow not yet fully implemented.' )
# .......................................................................... #
@torch.no_grad()
def plot_sample( self, z_test, label = None, time_average = True ):
"""Plots and shows 1 sample from input latent code."""
if self.ds_mean is None or self.ds_std is None:
raise ValueError( "This model does not hold any information about your dataset's mean and/or std.\n" + \
"Please provide these (either from your current data configuration or from your pretrained model)." )
if z_test.dim() == 2:
if z_test.shape[0] != 1:
raise IndexError( 'This method only permits plotting 1 generated sample at a time.' )
elif z_test.dim() != 1:
raise IndexError( 'Incorrect dimensions of input latent vector. Must be either `dim == 1` or `dim == 2`.' )
if not self.cond_gen:
if z_test.shape[-1] != self.config.len_latent:
message = f"Input latent vector must be of size {self.config.len_latent}."
raise IndexError( message )
else:
if z_test.shape[-1] != self.config.len_latent + self.num_classes_gen:
message = f"This is a generator class-conditioned model. So please make sure to append a one-hot encoded vector\n" + \
f"of size {self.num_classes_gen} that indicates the to-be generated sample's class to a latent vector of\n" + \
f"size {self.config.len_latent}. Total input size must therefore be {self.config.len_latent + self.num_classes_gen}."
raise IndexError( message )
# z_test = z_test.to( self.config.dev )
x_test = self.gen_model_lagged( z_test ).squeeze() if time_average else self.gen_model( z_test ).squeeze()
if label is not None:
print( f'Label Index for Generated Image: {label}' )
logger = logging.getLogger()
_old_level = logger.level
logger.setLevel( 100 ) # ignores potential "clipping input data" warning
plt.imshow( ( ( ( x_test ) \
.cpu().detach() * self.ds_std ) + self.ds_mean ) \
.numpy().transpose( 1, 2, 0 ), interpolation = 'none'
)
logger.setLevel( _old_level )
plt.show()
@torch.no_grad()
def make_image_grid( self, zs, labels = None, time_average = True, save_path = None ):
"""Generates grid of images from input latent codes, whose size is `np.sqrt( len( zs ) )`."""
if self.ds_mean is None or self.ds_std is None:
raise ValueError( "This model does not hold any information about your dataset's mean and/or std.\n" + \
"Please provide these (either from your current data configuration or from your pretrained model)." )
if not zs.dim() == 2:
raise IndexError( 'Incorrect dimensions of input latent vector. Must be `dim == 2`.' )
if not self.cond_gen:
if zs.shape[1] != self.config.len_latent:
message = f"Input latent vector must be of size {self.config.len_latent}."
raise IndexError( message )
else:
if zs.shape[1] != self.config.len_latent + self.num_classes_gen:
message = f"This is a generator class-conditioned model. So please make sure to append a one-hot encoded vector\n" + \
f"of size {self.num_classes_gen} that indicates the to-be generated sample's class to a latent vector of\n" + \
f"size {self.config.len_latent}. Total input size must therefore be {self.config.len_latent + self.num_classes_gen}."
raise IndexError( message )
if np.sqrt( len( zs ) ) % 1 != 0:
raise ValueError( 'Argument `zs` must be a perfect square-length in order to make image grid.' )
sz = int( np.sqrt( len( zs ) ) )
fig = plt.figure( figsize = ( 8, 8 if labels is None else 9, ) )
axs = [ fig.add_subplot( sz, sz, i + 1 ) for i in range( sz**2 ) ]
logger = logging.getLogger()
_old_level = logger.level
logger.setLevel( 100 ) # ignores potential "clipping input data" warning
for n, ax in enumerate( axs ):
x = self.gen_model_lagged( zs[ n ] ).squeeze() if time_average else self.gen_model( zs[ n ] ).squeeze()
ax.imshow(
( ( x.cpu().detach() * self.ds_std ) + self.ds_mean ).numpy().transpose( 1, 2, 0 ),
interpolation = 'none'
)
if labels is not None:
ax.set_title( str( labels[ n ].item() ) )
ax.axis( 'off' )
ax.set_aspect( 'equal' )
logger.setLevel( _old_level )
fig.subplots_adjust( left = 0, right = 1, bottom = 0, top = 1, wspace = 0, hspace = 0 )
# maintain resolution of images and save
if save_path is not None:
bbox = axs[-1].get_window_extent().transformed( fig.dpi_scale_trans.inverted() )
dpi = ( self.gen_model_lagged.curr_res if time_average else self.gen_model.curr_res ) / bbox.height
fig.savefig( save_path, dpi = dpi, pad_inches = 0 )
return ( fig, axs, )
# .......................................................................... #
def save_model( self, save_path:Path ):
if self.not_trained_yet:
raise Exception( 'Please train your model for atleast 1 iteration before saving.' )
warnings.filterwarnings( 'ignore', category = UserWarning )
self.gen_model_metadata = { 'gen_model_upsampler': self.gen_model_upsampler,
'num_classes_gen': self.num_classes_gen }
self.disc_model_metadata = { 'disc_model_downsampler': self.disc_model_downsampler,
'num_classes_disc': self.num_classes_disc }
scheduler_state_dict_save_args = {
'scheduler_gen_state_dict': self.scheduler_gen.state_dict() if self.sched_bool else None,
'scheduler_disc_state_dict': self.scheduler_disc.state_dict() if self.sched_bool else None
}
save_path = Path( save_path )
save_path.parents[0].mkdir( parents = True, exist_ok = True )
torch.save( {
'config': self.config,
'curr_res': self.gen_model.curr_res,
'alpha': self.gen_model.alpha,
'gen_model_metadata': self.gen_model_metadata,
'gen_model_state_dict': self.gen_model.state_dict(),
'gen_model_lagged_state_dict': self.gen_model_lagged.state_dict() if self.config.use_ewma_gen else None,
'disc_model_metadata': self.disc_model_metadata,
'disc_model_state_dict': self.disc_model.state_dict(),
'nl': self.nl,
'sched_stop_step': self.sched_stop_step,
'lr_sched': self.lr_sched,
**scheduler_state_dict_save_args,
'optimizer': self.optimizer,
'opt_gen_state_dict': self.opt_gen.state_dict(),
'opt_disc_state_dict': self.opt_disc.state_dict(),
'loss': self.loss,
'gradient_penalty': self.gradient_penalty,
'batch_size': self.batch_size,
'curr_dataset_batch_num': self.curr_dataset_batch_num,
'curr_epoch_num': self.curr_epoch_num,
'tot_num_epochs': self.tot_num_epochs,
'dataset_sz': self.dataset_sz,
'ac': self.ac,
'cond_gen': self.cond_gen,
'cond_disc': self.cond_disc,
'valid_z': self.valid_z.to( 'cpu' ),
'valid_label': self.valid_label,
'grid_inputs_constructed': self.grid_inputs_constructed,
'rand_idxs': self.rand_idxs,
'gen_metrics_num': self.gen_metrics_num,
'disc_metrics_num': self.disc_metrics_num,
'curr_img_num': self.curr_img_num,
'nimg_transition_lst': self.nimg_transition_lst,
'not_trained_yet': self.not_trained_yet,
'ds_mean': self.ds_mean,
'ds_std': self.ds_std,
'latent_distribution': self.latent_distribution,
'curr_phase_num': self.curr_phase_num,
'lagged_params': self.lagged_params,
'progressively_grow': self.progressively_grow }, save_path
)
def load_model( self, load_path, dev_of_saved_model = 'cpu' ):
dev_of_saved_model = dev_of_saved_model.casefold()
assert ( dev_of_saved_model == 'cpu' or dev_of_saved_model == 'cuda' )
_map_location = lambda storage,loc: storage if dev_of_saved_model == 'cpu' else None
checkpoint = torch.load( load_path, map_location = _map_location )
self.config = checkpoint[ 'config' ]
self.nl = checkpoint[ 'nl' ]
# Load pretrained neural networks:
global ProGAN, ProGenerator, ProDiscriminator
ProGAN = type( 'ProGAN', ( nn.Module, ABC, ), dict( ProGAN.__dict__ ) )
ProGAN.reset_state( )
ProGenerator = type( 'ProGenerator', ( ProGAN, ), dict( ProGenerator.__dict__ ) )
self.gen_model_metadata = checkpoint[ 'gen_model_metadata' ]
self.gen_model_upsampler = self.gen_model_metadata[ 'gen_model_upsampler' ]
self.num_classes_gen = self.gen_model_metadata[ 'num_classes_gen' ]
self.gen_model = ProGenerator(
final_res = self.config.res_samples,
len_latent = self.config.len_latent,
upsampler = self.gen_model_upsampler,
blur_type = self.config.blur_type,
nl = self.nl,
num_classes = self.num_classes_gen,
equalized_lr = self.config.use_equalized_lr,
normalize_z = self.config.normalize_z,
use_pixelnorm = self.config.use_pixelnorm
)
ProDiscriminator = type( 'ProDiscriminator', ( ProGAN, ), dict( ProDiscriminator.__dict__ ) )
self.disc_model_metadata = checkpoint[ 'disc_model_metadata' ]
self.disc_model_downsampler = self.disc_model_metadata[ 'disc_model_downsampler' ]
self.num_classes_disc = self.disc_model_metadata[ 'num_classes_disc' ]
self.disc_model = ProDiscriminator(
final_res = self.config.res_samples,
pooler = self.disc_model_downsampler,
blur_type = self.config.blur_type,
nl = self.nl,
num_classes = self.num_classes_disc,
equalized_lr = self.config.use_equalized_lr,
mbstd_group_size = self.config.mbstd_group_size
)
assert self.config.init_res <= self.config.res_samples
# If pretrained model started at a higher resolution than 4:
_curr_res = checkpoint[ 'curr_res' ]
if _curr_res > 4:
_init_res_log2 = int( np.log2( _curr_res ) )
if float( _curr_res ) != 2**_init_res_log2:
raise ValueError( 'Only resolutions that are powers of 2 are supported.' )
num_scale_inc = _init_res_log2 - 2
for _ in range( num_scale_inc ):
self.gen_model.increase_scale()
self.disc_model.increase_scale()
# this applies to both networks simultaneously and takes care of fade_in_phase
self.gen_model.alpha = checkpoint[ 'alpha' ]
# Generator and Discriminator state data must match:
assert self.gen_model.cls_base.__dict__ == \
self.disc_model.cls_base.__dict__
# Initialize EWMA Generator Model:
self.gen_model_lagged = None
if self.config.use_ewma_gen:
_orig_mode = self.gen_model.training
self.gen_model.train()
self.gen_model.to( 'cpu' )
with torch.no_grad():
self.gen_model_lagged = copy.deepcopy( self.gen_model ) # for memory efficiency in GPU
self.gen_model_lagged.to( self.config.metrics_dev )
self.gen_model_lagged.train( mode = _orig_mode )
self.gen_model.train( mode = _orig_mode )
self.gen_model.to( self.config.dev )
self.gen_model.load_state_dict( checkpoint[ 'gen_model_state_dict' ] )
self.gen_model.zero_grad()
if self.config.use_ewma_gen:
self.gen_model_lagged.load_state_dict( checkpoint[ 'gen_model_lagged_state_dict' ] )
self.gen_model_lagged.to( self.config.metrics_dev )
self.gen_model_lagged.zero_grad()
self.disc_model.to( self.config.dev )
self.disc_model.load_state_dict( checkpoint[ 'disc_model_state_dict' ] )
self.disc_model.zero_grad()
self.sched_stop_step = checkpoint[ 'sched_stop_step' ]
self.lr_sched = checkpoint[ 'lr_sched' ]
self._scheduler_gen_state_dict = checkpoint['scheduler_gen_state_dict']
self._scheduler_disc_state_dict = checkpoint['scheduler_disc_state_dict']
self._sched_state_dict_set = False
self.optimizer = checkpoint['optimizer']
self.opt_gen.load_state_dict( checkpoint['opt_gen_state_dict'] )
self.opt_disc.load_state_dict( checkpoint['opt_disc_state_dict'] )
self.batch_size = checkpoint['batch_size']
self.loss = checkpoint['loss']
self.gradient_penalty = checkpoint['gradient_penalty']
self.curr_dataset_batch_num = checkpoint['curr_dataset_batch_num']
self.curr_epoch_num = checkpoint['curr_epoch_num']
self.tot_num_epochs = checkpoint['tot_num_epochs']
self.dataset_sz = checkpoint['dataset_sz']
self.ac = checkpoint['ac']
self.cond_gen = checkpoint['cond_gen']
self._tensor = torch.FloatTensor
if self.config.dev == torch.device( 'cuda' ):
self._tensor = torch.cuda.FloatTensor
if self.cond_gen:
self.labels_one_hot_disc = self._tensor( self.batch_size, self.num_classes )
self.labels_one_hot_gen = self._tensor( self.batch_size * self.config.gen_bs_mult, self.num_classes )
self.cond_disc = checkpoint['cond_disc']
self.valid_z = checkpoint['valid_z'].to( self.config.metrics_dev )
self.valid_label = checkpoint['valid_label']
if self.valid_label is not None:
self.valid_label = self.valid_label.to( 'cpu' )
self.grid_inputs_constructed = checkpoint['grid_inputs_constructed']
self.rand_idxs = checkpoint['rand_idxs']
self.gen_metrics_num = checkpoint['gen_metrics_num']
self.disc_metrics_num = checkpoint['disc_metrics_num']
self.curr_img_num = checkpoint[ 'curr_img_num' ]
self.nimg_transition_lst = checkpoint[ 'nimg_transition_lst' ]
self.not_trained_yet = checkpoint['not_trained_yet']
# By default, use the pretrained model's statistics (you can change this by changing ds_mean and ds_std manually)
self.ds_mean = checkpoint['ds_mean']
self.ds_std = checkpoint['ds_std']
if self._is_data_configed:
self.data_config.ds_mean = self.ds_mean.squeeze().tolist()
self.data_config.ds_std = self.ds_std.squeeze().tolist()
self.latent_distribution = checkpoint[ 'latent_distribution' ]
self.curr_phase_num = checkpoint[ 'curr_phase_num' ]
self.eps = False
if self.config.eps_drift > 0:
self.eps = True
self.lagged_params = checkpoint[ 'lagged_params' ]
self._progressively_grow = checkpoint[ 'progressively_grow' ]
self.pretrained_model = True
# Print configuration:
print( '---------- Loaded Model Configuration -----------' )
print( self.config )
print( '-------------------------------------------------' )
print( "\n If you would like to change any of the above configurations,\n" + \
" please do so via setting the attributes of your instantiated ProGANLearner().config object.\n" ) | [
"torch.cat",
"matplotlib.pyplot.figure",
"pathlib.Path",
"torch.optim.lr_scheduler.LambdaLR",
"torch.device",
"torch.no_grad",
"torch.ones",
"warnings.simplefilter",
"torch.nn.functional.avg_pool2d",
"torch.load",
"torchvision.transforms.ToPILImage",
"torch.nn.functional.binary_cross_entropy_w... | [((2174, 2225), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.max_open_warning': 0}"], {}), "({'figure.max_open_warning': 0})\n", (2193, 2225), True, 'import matplotlib.pyplot as plt\n'), ((9260, 9275), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9273, 9275), False, 'import torch\n'), ((9875, 9890), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9888, 9890), False, 'import torch\n'), ((53432, 53447), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (53445, 53447), False, 'import torch\n'), ((55506, 55521), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (55519, 55521), False, 'import torch\n'), ((9338, 9367), 'copy.deepcopy', 'copy.deepcopy', (['self.gen_model'], {}), '(self.gen_model)\n', (9351, 9367), False, 'import copy\n'), ((10190, 10252), '_int.get_current_configuration', 'get_current_configuration', (['"""data_config"""'], {'raise_exception': '(True)'}), "('data_config', raise_exception=True)\n", (10215, 10252), False, 'from _int import get_current_configuration, LearnerConfigCopy\n'), ((24640, 24718), 'tqdm.tqdm', 'tqdm', ([], {'total': '(self.dataset_sz // self.batch_size * self.batch_size)', 'unit': '""" imgs"""'}), "(total=self.dataset_sz // self.batch_size * self.batch_size, unit=' imgs')\n", (24644, 24718), False, 'from tqdm import tqdm\n'), ((48697, 48782), 'torch.optim.lr_scheduler.LambdaLR', 'torch.optim.lr_scheduler.LambdaLR', (['self.opt_gen', 'self.scheduler_fn'], {'last_epoch': '(-1)'}), '(self.opt_gen, self.scheduler_fn,\n last_epoch=-1)\n', (48730, 48782), False, 'import torch\n'), ((48817, 48903), 'torch.optim.lr_scheduler.LambdaLR', 'torch.optim.lr_scheduler.LambdaLR', (['self.opt_disc', 'self.scheduler_fn'], {'last_epoch': '(-1)'}), '(self.opt_disc, self.scheduler_fn,\n last_epoch=-1)\n', (48850, 48903), False, 'import torch\n'), ((55129, 55148), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (55146, 55148), False, 'import logging\n'), ((55491, 55501), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (55499, 55501), True, 'import matplotlib.pyplot as plt\n'), ((57011, 57062), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8 if labels is None else 9)'}), '(figsize=(8, 8 if labels is None else 9))\n', (57021, 57062), True, 'import matplotlib.pyplot as plt\n'), ((57154, 57173), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (57171, 57173), False, 'import logging\n'), ((58436, 58491), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning'}), "('ignore', category=UserWarning)\n", (58459, 58491), False, 'import warnings\n'), ((59073, 59088), 'pathlib.Path', 'Path', (['save_path'], {}), '(save_path)\n', (59077, 59088), False, 'from pathlib import Path\n'), ((61696, 61745), 'torch.load', 'torch.load', (['load_path'], {'map_location': '_map_location'}), '(load_path, map_location=_map_location)\n', (61706, 61745), False, 'import torch\n'), ((4119, 4227), '_int.LearnerConfigCopy', 'LearnerConfigCopy', (['config', 'self.__class__.__name__', 'NONREDEFINABLE_ATTRS', 'REDEFINABLE_FROM_LEARNER_ATTRS'], {}), '(config, self.__class__.__name__, NONREDEFINABLE_ATTRS,\n REDEFINABLE_FROM_LEARNER_ATTRS)\n', (4136, 4227), False, 'from _int import get_current_configuration, LearnerConfigCopy\n'), ((7661, 7784), 'functools.partial', 'partial', (['calc_gp'], {'gp_type': 'self._gradient_penalty', 'nn_disc': 'self.disc_model', 'lda': 'self.config.lda', 'gamma': 'self.config.gamma'}), '(calc_gp, gp_type=self._gradient_penalty, nn_disc=self.disc_model,\n lda=self.config.lda, gamma=self.config.gamma)\n', (7668, 7784), False, 'from functools import partial\n'), ((11753, 11856), 'torch.zeros', 'torch.zeros', (['self.batch_size', '_len_z_valid_dl'], {'device': 'self.config.metrics_dev', 'dtype': 'torch.float32'}), '(self.batch_size, _len_z_valid_dl, device=self.config.\n metrics_dev, dtype=torch.float32)\n', (11764, 11856), False, 'import torch\n'), ((11963, 12025), 'tqdm.autonotebook.tqdm', 'tqdma', ([], {'total': '_len_z_valid_ds', 'unit': '""" imgs"""', 'dynamic_ncols': '(True)'}), "(total=_len_z_valid_ds, unit=' imgs', dynamic_ncols=True)\n", (11968, 12025), True, 'from tqdm.autonotebook import tqdm as tqdma\n'), ((47397, 47412), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (47410, 47412), False, 'import torch\n'), ((48914, 48939), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (48937, 48939), False, 'import warnings\n'), ((48947, 48991), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'UserWarning'], {}), "('ignore', UserWarning)\n", (48968, 48991), False, 'import warnings\n'), ((49633, 49775), 'utils.backprop_utils.configure_adam_for_gan', 'configure_adam_for_gan', ([], {'lr_base': 'self.config.lr_base', 'betas': '(self.config.beta1, self.config.beta2)', 'eps': 'self.config.eps', 'wd': 'self.config.wd'}), '(lr_base=self.config.lr_base, betas=(self.config.\n beta1, self.config.beta2), eps=self.config.eps, wd=self.config.wd)\n', (49655, 49775), False, 'from utils.backprop_utils import calc_gp, configure_adam_for_gan\n'), ((66245, 66265), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (66257, 66265), False, 'import torch\n'), ((51957, 51980), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (51978, 51980), False, 'from torchvision import transforms\n'), ((52015, 52130), 'torchvision.transforms.Resize', 'transforms.Resize', ([], {'size': '(self.disc_model.prev_res, self.disc_model.prev_res)', 'interpolation': 'ds_downsampler_type'}), '(size=(self.disc_model.prev_res, self.disc_model.prev_res),\n interpolation=ds_downsampler_type)\n', (52032, 52130), False, 'from torchvision import transforms\n'), ((52222, 52331), 'torchvision.transforms.Resize', 'transforms.Resize', ([], {'size': '(self.disc_model.curr_res, self.disc_model.curr_res)', 'interpolation': 'Image.NEAREST'}), '(size=(self.disc_model.curr_res, self.disc_model.curr_res),\n interpolation=Image.NEAREST)\n', (52239, 52331), False, 'from torchvision import transforms\n'), ((52432, 52453), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (52451, 52453), False, 'from torchvision import transforms\n'), ((63627, 63645), 'numpy.log2', 'np.log2', (['_curr_res'], {}), '(_curr_res)\n', (63634, 63645), True, 'import numpy as np\n'), ((64451, 64466), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (64464, 64466), False, 'import torch\n'), ((64500, 64529), 'copy.deepcopy', 'copy.deepcopy', (['self.gen_model'], {}), '(self.gen_model)\n', (64513, 64529), False, 'import copy\n'), ((6012, 6041), 'numpy.log2', 'np.log2', (['self.config.init_res'], {}), '(self.config.init_res)\n', (6019, 6041), True, 'import numpy as np\n'), ((6853, 6868), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6866, 6868), False, 'import torch\n'), ((6904, 6933), 'copy.deepcopy', 'copy.deepcopy', (['self.gen_model'], {}), '(self.gen_model)\n', (6917, 6933), False, 'import copy\n'), ((11504, 11566), 'torch.ones', 'torch.ones', (['_len_z_valid_ds'], {'dtype': 'torch.float32', 'device': '"""cpu"""'}), "(_len_z_valid_ds, dtype=torch.float32, device='cpu')\n", (11514, 11566), False, 'import torch\n'), ((21304, 21319), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (21317, 21319), False, 'import torch\n'), ((23287, 23336), 'torch.nn.functional.interpolate', 'F.interpolate', (['xb'], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(xb, scale_factor=2, mode='nearest')\n", (23300, 23336), True, 'import torch.nn.functional as F\n'), ((46637, 46652), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (46650, 46652), False, 'import torch\n'), ((47178, 47189), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (47186, 47189), False, 'import sys\n'), ((51466, 51600), 'torchvision.transforms.Resize', 'transforms.Resize', ([], {'size': '(self.gen_model.curr_res, self.gen_model.curr_res)', 'interpolation': 'self.data_config.dataset_downsample_type'}), '(size=(self.gen_model.curr_res, self.gen_model.curr_res),\n interpolation=self.data_config.dataset_downsample_type)\n', (51483, 51600), False, 'from torchvision import transforms\n'), ((23520, 23561), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['xb'], {'kernel_size': '(2)', 'stride': '(2)'}), '(xb, kernel_size=2, stride=2)\n', (23532, 23561), True, 'import torch.nn.functional as F\n'), ((24904, 24929), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (24927, 24929), False, 'import warnings\n'), ((24943, 24974), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (24964, 24974), False, 'import warnings\n'), ((34334, 34437), 'torch.randint', 'torch.randint', (['(0)', 'self.num_classes', '(self.batch_size, 1)'], {'dtype': 'torch.int64', 'device': 'self.config.dev'}), '(0, self.num_classes, (self.batch_size, 1), dtype=torch.int64,\n device=self.config.dev)\n', (34347, 34437), False, 'import torch\n'), ((34460, 34608), 'utils.latent_utils.gen_rand_latent_vars', 'gen_rand_latent_vars', ([], {'num_samples': 'self.batch_size', 'length': 'self.config.len_latent', 'distribution': 'self.latent_distribution', 'device': 'self.config.dev'}), '(num_samples=self.batch_size, length=self.config.\n len_latent, distribution=self.latent_distribution, device=self.config.dev)\n', (34480, 34608), False, 'from utils.latent_utils import gen_rand_latent_vars\n'), ((34826, 34874), 'torch.cat', 'torch.cat', (['(zb, self.labels_one_hot_disc)'], {'dim': '(1)'}), '((zb, self.labels_one_hot_disc), dim=1)\n', (34835, 34874), False, 'import torch\n'), ((34915, 35063), 'utils.latent_utils.gen_rand_latent_vars', 'gen_rand_latent_vars', ([], {'num_samples': 'self.batch_size', 'length': 'self.config.len_latent', 'distribution': 'self.latent_distribution', 'device': 'self.config.dev'}), '(num_samples=self.batch_size, length=self.config.\n len_latent, distribution=self.latent_distribution, device=self.config.dev)\n', (34935, 35063), False, 'from utils.latent_utils import gen_rand_latent_vars\n'), ((35122, 35137), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (35135, 35137), False, 'import torch\n'), ((35714, 35792), 'tqdm.tqdm', 'tqdm', ([], {'total': '(self.dataset_sz // self.batch_size * self.batch_size)', 'unit': '""" imgs"""'}), "(total=self.dataset_sz // self.batch_size * self.batch_size, unit=' imgs')\n", (35718, 35792), False, 'from tqdm import tqdm\n'), ((40799, 40929), 'torch.randint', 'torch.randint', (['(0)', 'self.num_classes', '(self.batch_size * self.config.gen_bs_mult, 1)'], {'dtype': 'torch.int64', 'device': 'self.config.dev'}), '(0, self.num_classes, (self.batch_size * self.config.\n gen_bs_mult, 1), dtype=torch.int64, device=self.config.dev)\n', (40812, 40929), False, 'import torch\n'), ((40951, 41128), 'utils.latent_utils.gen_rand_latent_vars', 'gen_rand_latent_vars', ([], {'num_samples': '(self.batch_size * self.config.gen_bs_mult)', 'length': 'self.config.len_latent', 'distribution': 'self.latent_distribution', 'device': 'self.config.dev'}), '(num_samples=self.batch_size * self.config.gen_bs_mult,\n length=self.config.len_latent, distribution=self.latent_distribution,\n device=self.config.dev)\n', (40971, 41128), False, 'from utils.latent_utils import gen_rand_latent_vars\n'), ((41341, 41388), 'torch.cat', 'torch.cat', (['(zb, self.labels_one_hot_gen)'], {'dim': '(1)'}), '((zb, self.labels_one_hot_gen), dim=1)\n', (41350, 41388), False, 'import torch\n'), ((41429, 41606), 'utils.latent_utils.gen_rand_latent_vars', 'gen_rand_latent_vars', ([], {'num_samples': '(self.batch_size * self.config.gen_bs_mult)', 'length': 'self.config.len_latent', 'distribution': 'self.latent_distribution', 'device': 'self.config.dev'}), '(num_samples=self.batch_size * self.config.gen_bs_mult,\n length=self.config.len_latent, distribution=self.latent_distribution,\n device=self.config.dev)\n', (41449, 41606), False, 'from utils.latent_utils import gen_rand_latent_vars\n'), ((45676, 45691), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (45689, 45691), False, 'import torch\n'), ((47225, 47236), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (47233, 47236), False, 'import os\n'), ((23686, 23737), 'torch.nn.functional.interpolate', 'F.interpolate', (['xb'], {'scale_factor': '(0.5)', 'mode': '"""nearest"""'}), "(xb, scale_factor=0.5, mode='nearest')\n", (23699, 23737), True, 'import torch.nn.functional as F\n'), ((36125, 36140), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (36138, 36140), False, 'import torch\n'), ((42058, 42169), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', ([], {'input': 'loss_train_gen', 'target': 'self._dummy_target_real', 'reduction': '"""mean"""'}), "(input=loss_train_gen, target=self.\n _dummy_target_real, reduction='mean')\n", (42092, 42169), True, 'import torch.nn.functional as F\n'), ((43087, 43102), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (43100, 43102), False, 'import torch\n'), ((44275, 44282), 'timeit.default_timer', 'timer', ([], {}), '()\n', (44280, 44282), True, 'from timeit import default_timer as timer\n'), ((37392, 37506), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', ([], {'input': 'discriminative_gen', 'target': 'self._dummy_target_gen', 'reduction': '"""mean"""'}), "(input=discriminative_gen, target=self.\n _dummy_target_gen, reduction='mean')\n", (37426, 37506), True, 'import torch.nn.functional as F\n'), ((37628, 37744), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', ([], {'input': 'discriminative_real', 'target': 'self._dummy_target_real', 'reduction': '"""mean"""'}), "(input=discriminative_real, target=self.\n _dummy_target_real, reduction='mean')\n", (37662, 37744), True, 'import torch.nn.functional as F\n'), ((38951, 38958), 'timeit.default_timer', 'timer', ([], {}), '()\n', (38956, 38958), True, 'from timeit import default_timer as timer\n'), ((13335, 13395), 'torch.FloatTensor', 'torch.FloatTensor', (['(self.config.img_grid_sz ** 2)', 'zb.shape[1]'], {}), '(self.config.img_grid_sz ** 2, zb.shape[1])\n', (13352, 13395), False, 'import torch\n'), ((28861, 28910), 'torch.nn.functional.interpolate', 'F.interpolate', (['xb'], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(xb, scale_factor=2, mode='nearest')\n", (28874, 28910), True, 'import torch.nn.functional as F\n'), ((30767, 30782), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (30780, 30782), False, 'import torch\n'), ((42296, 42406), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', ([], {'input': 'loss_train_gen', 'target': 'self._dummy_target_gen', 'reduction': '"""mean"""'}), "(input=loss_train_gen, target=self.\n _dummy_target_gen, reduction='mean')\n", (42330, 42406), True, 'import torch.nn.functional as F\n'), ((13568, 13614), 'torch.LongTensor', 'torch.LongTensor', (['(self.config.img_grid_sz ** 2)'], {}), '(self.config.img_grid_sz ** 2)\n', (13584, 13614), False, 'import torch\n'), ((29118, 29159), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['xb'], {'kernel_size': '(2)', 'stride': '(2)'}), '(xb, kernel_size=2, stride=2)\n', (29130, 29159), True, 'import torch.nn.functional as F\n'), ((29300, 29351), 'torch.nn.functional.interpolate', 'F.interpolate', (['xb'], {'scale_factor': '(0.5)', 'mode': '"""nearest"""'}), "(xb, scale_factor=0.5, mode='nearest')\n", (29313, 29351), True, 'import torch.nn.functional as F\n')] |
import numpy as np
import networkx as nx
import itertools as it
import random
from delegationForest import forest
import scipy.stats as sts
from utils import network_utils as nu
import math
class Election:
def __init__(self, n, num_samples=1000):
self.n = n
self.network = nx.Graph()
self.network_is_set = False
self.competencies = []
self.competencies_is_set = False
self.delegations = forest(n)
self.delegations_is_set = False
self.guru_chance = 1
self.sortition_amount = 1
self.num_samples = num_samples
def set_network(self, network_type, args=()):
"""
Create and/or set the network that voters will exist upon.
:param network_type: A string detailing the type of graph to be used, or an already instantiated nx.Graph object
:param args: a tuple containing any args required for the Graph generator
:return:
"""
if isinstance(network_type, nx.Graph):
self.network = network_type
self.n = len(self.network.nodes)
elif isinstance(network_type, str):
self.network = nu.generate_single_network(network_type=network_type,
num_nodes=self.n,
args=args)
self.network = nu.validate_network(self.network)
self.network_is_set = True
def set_competencies(self, dist, args=[]):
"""
Create and/or set the competencies that voters have.
:param dist: A string naming the distribution of competencies, or a list of n competency values.
:param args: args required for the competency distribution
:return:
"""
if isinstance(dist, list):
self.competencies = dist
elif isinstance(dist, str):
if dist == "uniform":
self.competencies = np.random.uniform(low=args[0], high=args[1], size=self.n).tolist()
if dist == "gaussian":
lower = args[0]
upper = args[1]
mu = args[2]
sigma = args[3]
N = self.n
self.competencies = sts.truncnorm.rvs(
(lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma, size=N).tolist()
if dist == "exponential":
scale = args[0]
competencies = np.random.exponential(scale=scale, size=self.n).tolist()
for i in range(len(competencies)):
while competencies[i] > 1:
competencies[i] = 1
# could resample if over 1 but instead just truncate it to 1 to align with the distribution
# competencies[i] = np.random.exponential(scale)
self.competencies = competencies
self.competencies_is_set = True
def reset_election(self):
"""
Reset all parameters in order to start a new trial of an election.
For now, we reset network, delegations, and competencies. But we could keep some of that constant over several
trials if desired.
:return:
"""
self.competencies = []
self.competencies_is_set = False
self.network = nx.Graph()
self.network_is_set = False
self.delegations = forest(self.n)
self.delegations_is_set = False
def set_delegations(self, guru_chance, mechanism, args=[]):
"""
Set the delegations using the given mechanism.
# TODO: Separate each mechanism into a different method.
:param mechanism: Most likely a string saying what mechanism to use. Could also pass a delegation function itself
:param args: Any args required for the delegation mechanism
:return:
"""
self.delegations_is_set = True
self.guru_chance = guru_chance
if isinstance(mechanism, list):
raise NotImplementedError("Still need to implement functionality for custom pre-decided delegations.")
elif isinstance(mechanism, str):
for i in range(self.n):
if random.random() < self.guru_chance:
# this voter will "choose" to act as a guru
continue
if mechanism == "random":
# find random neighbour to delegate to, or try all and find they all lead to a cycle
neighbours = list(self.network.neighbors(i))
random.shuffle(neighbours)
for j in neighbours:
if self.delegations.add_delegation(i, j):
break
if mechanism == "max":
# delegate to most accurate neighbour that won't cause a cycle, including yourself as an option
neighbour_acc_pairs = [(self.competencies[n], n) for n in list(self.network.neighbors(i))]
neighbour_acc_pairs.append((self.competencies[i], i))
neighbour_acc_pairs.sort(key=lambda x: x[0], reverse=True) # sort from most to least accurate
for (c, j) in neighbour_acc_pairs:
if self.delegations.add_delegation(i, j):
break
if mechanism == "random_better":
# delegate to a randomly chosen neighbour that is more competent than yourself
my_competency = self.competencies[i]
better_neighbours = [(self.competencies[n], n) for n in list(self.network.neighbors(i))]
better_neighbours = list(filter(lambda c: c[0] > my_competency, better_neighbours))
if len(better_neighbours) > 0:
random.shuffle(better_neighbours)
for comp, j in better_neighbours:
if self.delegations.add_delegation(i, j):
break
def prob_liquid(self):
"""
Use dynamic programming to calculate the probability that the current delegations and competencies will select
the correct outcome.
:return:
"""
assert self.network_is_set and self.competencies_is_set and self.delegations_is_set
return self.delegations.accuracy_of_forest(self.competencies)
def prob_direct(self):
"""
Calculate the probability that voters with the current set of competencies will select the correct outcome.
If the number of voters is small, calculate this exactly. Otherwise, approximate it.
:return:
"""
assert self.network_is_set and self.competencies_is_set and self.delegations_is_set
SMALL = 15
if self.n < SMALL: # could move this threshold to a parameter in the configuration file
return self.prob_direct_exact()
else:
return self.prob_correct_approximate(self.competencies)
def prob_direct_exact(self):
"""
Calculate the exact probability that voters with the current set of competencies will select the correct outcome
For each possible number of correct votes in a majority and each set of voters of that size, calculate the
chance of exactly that set of voters being correct.
:return:
"""
success_chance = 0
voter_indices = list(range(self.n))
majority_threshold = self.n - self.n//2
for num_correct in range(majority_threshold, self.n+1):
for majority_members in it.combinations(voter_indices, num_correct):
marginal_chance = np.prod([self.competencies[i] if i in majority_members else 1-self.competencies[i] for i in voter_indices])
success_chance += marginal_chance
return success_chance
def prob_correct_approximate(self, voter_competencies, weights=None):
"""
Approximate the probability that voters with the current set of competencies will select the correct outcome.
Sample the predetermined number of outcomes and return the average chance that an election turns out correctly.
:return:
"""
if weights is None:
weights = [1 for _ in voter_competencies]
num_voters = len(voter_competencies)
# majority_threshold = num_voters - num_voters // 2
majority_threshold = sum(weights) - sum(weights) / 2
num_correct = 0
for i in range(self.num_samples):
votes = [random.random() < voter_competencies[v] for v in range(num_voters)]
correct_weight = [weights[i] if votes[i] else 0 for i in range(num_voters)]
if sum(correct_weight) >= majority_threshold:
num_correct += 1
success_chance = num_correct/self.num_samples
return success_chance
def prob_sortition(self, sortition_args, sortition_amount, sortition_method):
"""
Calculate the probability that voters chosen by some sortition mechanism will select the correct outcome
:return:
"""
assert self.network_is_set and self.competencies_is_set
# if type(sortition_args) is tuple:
# sortition_amount = sortition_args[0]
# elif type(sortition_args) is int or type(sortition_args) is float:
# sortition_amount = sortition_args
# else:
# raise ValueError("Must pass valid sortition arguments")
if 0 < sortition_amount < 1:
num_voters = int(sortition_amount * self.n)
elif sortition_amount >= 1:
num_voters = int(sortition_amount)
else:
raise ValueError("Must pass in a positive value for sortition amount.")
weights = None
if sortition_method == "random":
voter_competencies = random.sample(self.competencies, k=num_voters)
elif sortition_method == "weighted random":
voter_competencies = random.sample(self.competencies, k=num_voters)
weights = [math.log2(p/(1-p)) for p in voter_competencies]
weights = [float(i) / sum(weights) for i in weights]
elif sortition_method == "most accurate":
all_competencies = sorted(self.competencies, reverse=True)
voter_competencies = all_competencies[0:num_voters]
elif sortition_method == "weighted most accurate":
all_competencies = sorted(self.competencies, reverse=True)
voter_competencies = all_competencies[0:num_voters]
weights = [math.log2(p / (1 - p)) for p in voter_competencies]
weights = [float(i) / sum(weights) for i in weights]
elif sortition_method == "continuous":
q = sortition_args[0]
# 1. sort voters by competency
all_competencies = sorted(self.competencies, reverse=True)
# 2. From most to least competent, swap idx with a random index greater than idx with probability q
# for i in range(len(all_competencies)):
# since we are only swapping with voters less accurate (further in list) we can stop after giving first
# num_voter voters a chance to swap with someone less competent
for i in range(num_voters):
# print(f"At voter {i}")
# so there is a q% chance of swapping with a less accurate voter
# so higher q means approaching a uniformly random set of voters
if random.random() < q:
j = random.randint(i+1, len(all_competencies)-1)
swap_value = all_competencies[i]
all_competencies[i] = all_competencies[j]
all_competencies[j] = swap_value
# print(f"swapped {i} and {j} and this is a good spot for a breakpoint")
# 3. take first num_voters from resulting list
voter_competencies = all_competencies[0:num_voters]
else:
raise ValueError(f"Passed {sortition_method} as sortition_method which is not a supported value.")
return self.prob_correct_approximate(voter_competencies, weights)
# if __name__ == "__main__":
# e = Election(n=15, num_samples=1000)
# e.set_network(network_type="complete")
# e.set_competencies(dist="gaussian", args=[0, 1, 0.5, 0.3])
# e.set_delegations(guru_chance=0.8, mechanism="random_better")
#
# for num_samples in [100, 1000, 10000]:
# p_direct_exact = e.prob_direct_exact()
# p_direct_mc = e.prob_correct_approximate(e.competencies)
#
# print(p_direct_exact)
# print(p_direct_mc)
# print("---__------------")
#
#
#
# # p_liquid = e.prob_liquid() | [
"numpy.random.uniform",
"random.sample",
"random.shuffle",
"numpy.random.exponential",
"itertools.combinations",
"random.random",
"networkx.Graph",
"utils.network_utils.validate_network",
"scipy.stats.truncnorm.rvs",
"utils.network_utils.generate_single_network",
"math.log2",
"numpy.prod",
"... | [((299, 309), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (307, 309), True, 'import networkx as nx\n'), ((445, 454), 'delegationForest.forest', 'forest', (['n'], {}), '(n)\n', (451, 454), False, 'from delegationForest import forest\n'), ((1377, 1410), 'utils.network_utils.validate_network', 'nu.validate_network', (['self.network'], {}), '(self.network)\n', (1396, 1410), True, 'from utils import network_utils as nu\n'), ((3331, 3341), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (3339, 3341), True, 'import networkx as nx\n'), ((3406, 3420), 'delegationForest.forest', 'forest', (['self.n'], {}), '(self.n)\n', (3412, 3420), False, 'from delegationForest import forest\n'), ((7638, 7681), 'itertools.combinations', 'it.combinations', (['voter_indices', 'num_correct'], {}), '(voter_indices, num_correct)\n', (7653, 7681), True, 'import itertools as it\n'), ((9911, 9957), 'random.sample', 'random.sample', (['self.competencies'], {'k': 'num_voters'}), '(self.competencies, k=num_voters)\n', (9924, 9957), False, 'import random\n'), ((1162, 1248), 'utils.network_utils.generate_single_network', 'nu.generate_single_network', ([], {'network_type': 'network_type', 'num_nodes': 'self.n', 'args': 'args'}), '(network_type=network_type, num_nodes=self.n,\n args=args)\n', (1188, 1248), True, 'from utils import network_utils as nu\n'), ((7718, 7834), 'numpy.prod', 'np.prod', (['[(self.competencies[i] if i in majority_members else 1 - self.competencies[\n i]) for i in voter_indices]'], {}), '([(self.competencies[i] if i in majority_members else 1 - self.\n competencies[i]) for i in voter_indices])\n', (7725, 7834), True, 'import numpy as np\n'), ((10044, 10090), 'random.sample', 'random.sample', (['self.competencies'], {'k': 'num_voters'}), '(self.competencies, k=num_voters)\n', (10057, 10090), False, 'import random\n'), ((8598, 8613), 'random.random', 'random.random', ([], {}), '()\n', (8611, 8613), False, 'import random\n'), ((10114, 10136), 'math.log2', 'math.log2', (['(p / (1 - p))'], {}), '(p / (1 - p))\n', (10123, 10136), False, 'import math\n'), ((4208, 4223), 'random.random', 'random.random', ([], {}), '()\n', (4221, 4223), False, 'import random\n'), ((4571, 4597), 'random.shuffle', 'random.shuffle', (['neighbours'], {}), '(neighbours)\n', (4585, 4597), False, 'import random\n'), ((1948, 2005), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'args[0]', 'high': 'args[1]', 'size': 'self.n'}), '(low=args[0], high=args[1], size=self.n)\n', (1965, 2005), True, 'import numpy as np\n'), ((2240, 2335), 'scipy.stats.truncnorm.rvs', 'sts.truncnorm.rvs', (['((lower - mu) / sigma)', '((upper - mu) / sigma)'], {'loc': 'mu', 'scale': 'sigma', 'size': 'N'}), '((lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale\n =sigma, size=N)\n', (2257, 2335), True, 'import scipy.stats as sts\n'), ((2463, 2510), 'numpy.random.exponential', 'np.random.exponential', ([], {'scale': 'scale', 'size': 'self.n'}), '(scale=scale, size=self.n)\n', (2484, 2510), True, 'import numpy as np\n'), ((5850, 5883), 'random.shuffle', 'random.shuffle', (['better_neighbours'], {}), '(better_neighbours)\n', (5864, 5883), False, 'import random\n'), ((10631, 10653), 'math.log2', 'math.log2', (['(p / (1 - p))'], {}), '(p / (1 - p))\n', (10640, 10653), False, 'import math\n'), ((11568, 11583), 'random.random', 'random.random', ([], {}), '()\n', (11581, 11583), False, 'import random\n')] |
# coding: utf-8
# # Nengo Example: Matrix multiplication
#
# This example demonstrates how to perform general matrix multiplication using Nengo. The matrix can change during the computation, which makes it distinct from doing static matrix multiplication with neural connection weights (as done in all neural networks).
# In[ ]:
import numpy as np
import matplotlib.pyplot as plt
import nengo
# In[ ]:
N = 100
Amat = np.asarray([[.5, -.5]])
Bmat = np.asarray([[0.58, -1.,], [.7, 0.1]])
# Values should stay within the range (-radius,radius)
radius = 1
model = nengo.Network(label='Matrix Multiplication', seed=123)
with model:
# Make 2 EnsembleArrays to store the input
A = nengo.networks.EnsembleArray(N, Amat.size, radius=radius)
B = nengo.networks.EnsembleArray(N, Bmat.size, radius=radius)
# connect inputs to them so we can set their value
inputA = nengo.Node(Amat.ravel())
inputB = nengo.Node(Bmat.ravel())
nengo.Connection(inputA, A.input)
nengo.Connection(inputB, B.input)
A_probe = nengo.Probe(A.output, sample_every=0.01, synapse=0.01)
B_probe = nengo.Probe(B.output, sample_every=0.01, synapse=0.01)
# In[ ]:
with nengo.Simulator(model) as sim:
sim.run(1)
plt.subplot(1, 2, 1)
plt.title('A')
plt.plot(sim.trange(dt=0.01), sim.data[A_probe])
plt.subplot(1, 2, 2)
plt.title('B')
plt.plot(sim.trange(dt=0.01), sim.data[B_probe]);
# In[ ]:
from nengo.dists import Choice
with model:
# The C matix is composed of populations that each contain
# one element of A and one element of B.
# These elements will be multiplied together in the next step.
# The appropriate encoders make the multiplication more accurate
C = nengo.networks.EnsembleArray(N,
n_ensembles=Amat.size * Bmat.shape[1],
ens_dimensions=2,
radius=1.5 * radius,
encoders=Choice([[1, 1], [-1, 1], [1, -1], [-1, -1]]))
# Determine the transformation matrices to get the correct pairwise
# products computed. This looks a bit like black magic but if
# you manually try multiplying two matrices together, you can see
# the underlying pattern. Basically, we need to build up D1*D2*D3
# pairs of numbers in C to compute the product of. If i,j,k are the
# indexes into the D1*D2*D3 products, we want to compute the product
# of element (i,j) in A with the element (j,k) in B. The index in
# A of (i,j) is j+i*D2 and the index in B of (j,k) is k+j*D3.
# The index in C is j+k*D2+i*D2*D3, multiplied by 2 since there are
# two values per ensemble. We add 1 to the B index so it goes into
# the second value in the ensemble.
transformA = np.zeros((C.dimensions, Amat.size))
transformB = np.zeros((C.dimensions, Bmat.size))
for i in range(Amat.shape[0]):
for j in range(Amat.shape[1]):
for k in range(Bmat.shape[1]):
tmp = (j + k * Amat.shape[1] + i * Bmat.size)
transformA[tmp * 2][j + i * Amat.shape[1]] = 1
transformB[tmp * 2 + 1][k + j * Bmat.shape[1]] = 1
print("A->C")
print(transformA)
print("B->C")
print(transformB)
with model:
nengo.Connection(A.output, C.input, transform=transformA)
nengo.Connection(B.output, C.input, transform=transformB)
C_probe = nengo.Probe(C.output, sample_every=0.01, synapse=0.01)
# In[ ]:
# Look at C
with nengo.Simulator(model) as sim:
sim.run(1)
# In[ ]:
plt.plot(sim.trange(dt=0.01), sim.data[C_probe])
plt.title('C');
# In[ ]:
with model:
# Now compute the products and do the appropriate summing
D = nengo.networks.EnsembleArray(N,
n_ensembles=Amat.shape[0] * Bmat.shape[1],
radius=radius)
def product(x):
return x[0] * x[1]
# The mapping for this transformation is much easier, since we want to
# combine D2 pairs of elements (we sum D2 products together)
transformC = np.zeros((D.dimensions, Bmat.size))
for i in range(Bmat.size):
transformC[i // Bmat.shape[0]][i] = 1
print("C->D")
print(transformC)
with model:
prod = C.add_output("product", product)
nengo.Connection(prod, D.input, transform=transformC)
D_probe = nengo.Probe(D.output, sample_every=0.01, synapse=0.01)
# In[ ]:
with nengo.Simulator(model) as sim:
sim.run(1)
# In[ ]:
plt.plot(sim.trange(dt=0.01), sim.data[D_probe])
for d in np.dot(Amat, Bmat).flatten():
plt.axhline(d, color='k')
plt.title("D");
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.axhline",
"nengo.dists.Choice",
"nengo.networks.EnsembleArray",
"numpy.asarray",
"numpy.zeros",
"nengo.Probe",
"nengo.Simulator",
"numpy.dot",
"nengo.Network",
"nengo.Connection"
] | [((427, 452), 'numpy.asarray', 'np.asarray', (['[[0.5, -0.5]]'], {}), '([[0.5, -0.5]])\n', (437, 452), True, 'import numpy as np\n'), ((458, 496), 'numpy.asarray', 'np.asarray', (['[[0.58, -1.0], [0.7, 0.1]]'], {}), '([[0.58, -1.0], [0.7, 0.1]])\n', (468, 496), True, 'import numpy as np\n'), ((572, 626), 'nengo.Network', 'nengo.Network', ([], {'label': '"""Matrix Multiplication"""', 'seed': '(123)'}), "(label='Matrix Multiplication', seed=123)\n", (585, 626), False, 'import nengo\n'), ((1231, 1251), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (1242, 1251), True, 'import matplotlib.pyplot as plt\n'), ((1252, 1266), 'matplotlib.pyplot.title', 'plt.title', (['"""A"""'], {}), "('A')\n", (1261, 1266), True, 'import matplotlib.pyplot as plt\n'), ((1316, 1336), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (1327, 1336), True, 'import matplotlib.pyplot as plt\n'), ((1337, 1351), 'matplotlib.pyplot.title', 'plt.title', (['"""B"""'], {}), "('B')\n", (1346, 1351), True, 'import matplotlib.pyplot as plt\n'), ((2744, 2779), 'numpy.zeros', 'np.zeros', (['(C.dimensions, Amat.size)'], {}), '((C.dimensions, Amat.size))\n', (2752, 2779), True, 'import numpy as np\n'), ((2793, 2828), 'numpy.zeros', 'np.zeros', (['(C.dimensions, Bmat.size)'], {}), '((C.dimensions, Bmat.size))\n', (2801, 2828), True, 'import numpy as np\n'), ((3522, 3536), 'matplotlib.pyplot.title', 'plt.title', (['"""C"""'], {}), "('C')\n", (3531, 3536), True, 'import matplotlib.pyplot as plt\n'), ((3982, 4017), 'numpy.zeros', 'np.zeros', (['(D.dimensions, Bmat.size)'], {}), '((D.dimensions, Bmat.size))\n', (3990, 4017), True, 'import numpy as np\n'), ((4497, 4511), 'matplotlib.pyplot.title', 'plt.title', (['"""D"""'], {}), "('D')\n", (4506, 4511), True, 'import matplotlib.pyplot as plt\n'), ((694, 751), 'nengo.networks.EnsembleArray', 'nengo.networks.EnsembleArray', (['N', 'Amat.size'], {'radius': 'radius'}), '(N, Amat.size, radius=radius)\n', (722, 751), False, 'import nengo\n'), ((760, 817), 'nengo.networks.EnsembleArray', 'nengo.networks.EnsembleArray', (['N', 'Bmat.size'], {'radius': 'radius'}), '(N, Bmat.size, radius=radius)\n', (788, 817), False, 'import nengo\n'), ((958, 991), 'nengo.Connection', 'nengo.Connection', (['inputA', 'A.input'], {}), '(inputA, A.input)\n', (974, 991), False, 'import nengo\n'), ((996, 1029), 'nengo.Connection', 'nengo.Connection', (['inputB', 'B.input'], {}), '(inputB, B.input)\n', (1012, 1029), False, 'import nengo\n'), ((1044, 1098), 'nengo.Probe', 'nengo.Probe', (['A.output'], {'sample_every': '(0.01)', 'synapse': '(0.01)'}), '(A.output, sample_every=0.01, synapse=0.01)\n', (1055, 1098), False, 'import nengo\n'), ((1113, 1167), 'nengo.Probe', 'nengo.Probe', (['B.output'], {'sample_every': '(0.01)', 'synapse': '(0.01)'}), '(B.output, sample_every=0.01, synapse=0.01)\n', (1124, 1167), False, 'import nengo\n'), ((1185, 1207), 'nengo.Simulator', 'nengo.Simulator', (['model'], {}), '(model)\n', (1200, 1207), False, 'import nengo\n'), ((3197, 3254), 'nengo.Connection', 'nengo.Connection', (['A.output', 'C.input'], {'transform': 'transformA'}), '(A.output, C.input, transform=transformA)\n', (3213, 3254), False, 'import nengo\n'), ((3259, 3316), 'nengo.Connection', 'nengo.Connection', (['B.output', 'C.input'], {'transform': 'transformB'}), '(B.output, C.input, transform=transformB)\n', (3275, 3316), False, 'import nengo\n'), ((3331, 3385), 'nengo.Probe', 'nengo.Probe', (['C.output'], {'sample_every': '(0.01)', 'synapse': '(0.01)'}), '(C.output, sample_every=0.01, synapse=0.01)\n', (3342, 3385), False, 'import nengo\n'), ((3415, 3437), 'nengo.Simulator', 'nengo.Simulator', (['model'], {}), '(model)\n', (3430, 3437), False, 'import nengo\n'), ((3632, 3725), 'nengo.networks.EnsembleArray', 'nengo.networks.EnsembleArray', (['N'], {'n_ensembles': '(Amat.shape[0] * Bmat.shape[1])', 'radius': 'radius'}), '(N, n_ensembles=Amat.shape[0] * Bmat.shape[1],\n radius=radius)\n', (3660, 3725), False, 'import nengo\n'), ((4181, 4234), 'nengo.Connection', 'nengo.Connection', (['prod', 'D.input'], {'transform': 'transformC'}), '(prod, D.input, transform=transformC)\n', (4197, 4234), False, 'import nengo\n'), ((4249, 4303), 'nengo.Probe', 'nengo.Probe', (['D.output'], {'sample_every': '(0.01)', 'synapse': '(0.01)'}), '(D.output, sample_every=0.01, synapse=0.01)\n', (4260, 4303), False, 'import nengo\n'), ((4321, 4343), 'nengo.Simulator', 'nengo.Simulator', (['model'], {}), '(model)\n', (4336, 4343), False, 'import nengo\n'), ((4471, 4496), 'matplotlib.pyplot.axhline', 'plt.axhline', (['d'], {'color': '"""k"""'}), "(d, color='k')\n", (4482, 4496), True, 'import matplotlib.pyplot as plt\n'), ((4437, 4455), 'numpy.dot', 'np.dot', (['Amat', 'Bmat'], {}), '(Amat, Bmat)\n', (4443, 4455), True, 'import numpy as np\n'), ((1981, 2025), 'nengo.dists.Choice', 'Choice', (['[[1, 1], [-1, 1], [1, -1], [-1, -1]]'], {}), '([[1, 1], [-1, 1], [1, -1], [-1, -1]])\n', (1987, 2025), False, 'from nengo.dists import Choice\n')] |
# > \brief \b SROTG
#
# =========== DOCUMENTATION ===========
#
# Online html documentation available at
# http://www.netlib.org/lapack/explore-html/
#
# Definition:
# ===========
#
# def SROTG(SA,SB,C,S)
#
# .. Scalar Arguments ..
# REAL C,S,SA,SB
# ..
#
#
# > \par Purpose:
# =============
# >
# > \verbatim
# >
# > SROTG construct givens plane rotation.
# > \endverbatim
#
# Arguments:
# ==========
#
# > \param[in] SA
# > \verbatim
# > SA is REAL
# > \endverbatim
# >
# > \param[in] SB
# > \verbatim
# > SB is REAL
# > \endverbatim
# >
# > \param[out] C
# > \verbatim
# > C is REAL
# > \endverbatim
# >
# > \param[out] S
# > \verbatim
# > S is REAL
# > \endverbatim
#
# Authors:
# ========
#
# > \author Univ. of Tennessee
# > \author Univ. of California Berkeley
# > \author Univ. of Colorado Denver
# > \author NAG Ltd.
#
# > \date November 2017
#
# > \ingroup single_blas_level1
#
# > \par Further Details:
# =====================
# >
# > \verbatim
# >
# > <NAME>, linpack, 3/11/78.
# > \endverbatim
# >
# =====================================================================
from math import sqrt
import numpy as np
def srotg(SA, SB):
#
# -- Reference BLAS level1 routine (version 3.8.0) --
# -- Reference BLAS is a software package provided by Univ. of Tennessee, --
# -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
# November 2017
#
# .. Scalar Arguments ..
# REAL C,S,SA,SB
# ..
#
# =====================================================================
#
# .. Local Scalars ..
# REAL R,ROE,SCALE,Z
# ..
# .. Intrinsic Functions ..
# INTRINSIC ABS,SIGN,SQRT
# ..
SCALE = abs(SA) + abs(SB)
if SCALE == 0.0:
C, S, R, Z = 1.0, 0.0, 0.0, 0.0
else:
ROE = SA if abs(SA) > abs(SB) else SB
R = np.sign(ROE) * SCALE * sqrt((SA / SCALE) ** 2 + (SB / SCALE) ** 2)
C = SA / R
S = SB / R
if abs(SA) > abs(SB):
Z = S
elif abs(SB) >= abs(SA) and C != 0.0:
Z = 1.0 / C
else:
Z = 1.0
return C, S, R, Z
| [
"math.sqrt",
"numpy.sign"
] | [((1979, 2022), 'math.sqrt', 'sqrt', (['((SA / SCALE) ** 2 + (SB / SCALE) ** 2)'], {}), '((SA / SCALE) ** 2 + (SB / SCALE) ** 2)\n', (1983, 2022), False, 'from math import sqrt\n'), ((1956, 1968), 'numpy.sign', 'np.sign', (['ROE'], {}), '(ROE)\n', (1963, 1968), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 <NAME>
"""Convert inputs and lables GLOBAL cmvns to a Numpy file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import struct
import numpy as np
def convert_cmvn_to_numpy(inputs_cmvn, labels_cmvn, save_dir):
"""Convert global binary ark cmvn to numpy format."""
print("Convert %s and %s to Numpy format" % (inputs_cmvn, labels_cmvn))
inputs_filename = inputs_cmvn
labels_filename = labels_cmvn
inputs = read_binary_file(inputs_filename, 0)
labels = read_binary_file(labels_filename, 0)
inputs_frame = inputs[0][-1]
labels_frame = labels[0][-1]
# assert inputs_frame == labels_frame
cmvn_inputs = np.hsplit(inputs, [inputs.shape[1] - 1])[0]
cmvn_labels = np.hsplit(labels, [labels.shape[1] - 1])[0]
mean_inputs = cmvn_inputs[0] / inputs_frame
stddev_inputs = np.sqrt(cmvn_inputs[1] / inputs_frame - mean_inputs ** 2)
mean_labels = cmvn_labels[0] / labels_frame
stddev_labels = np.sqrt(cmvn_labels[1] / labels_frame - mean_labels ** 2)
cmvn_name = os.path.join(save_dir, "train_cmvn.npz")
np.savez(cmvn_name,
mean_inputs=mean_inputs,
stddev_inputs=stddev_inputs,
mean_labels=mean_labels,
stddev_labels=stddev_labels)
print("Write to %s" % cmvn_name)
def read_binary_file(filename, offset=0):
"""Read data from matlab binary file (row, col and matrix).
Returns:
A numpy matrix containing data of the given binary file.
"""
read_buffer = open(filename, 'rb')
read_buffer.seek(int(offset), 0)
header = struct.unpack('<xcccc', read_buffer.read(5))
if header[0] != 'B':
print("Input .ark file is not binary")
sys.exit(-1)
if header[1] == 'C':
print("Input .ark file is compressed, exist now.")
sys.exit(-1)
rows = 0; cols= 0
_, rows = struct.unpack('<bi', read_buffer.read(5))
_, cols = struct.unpack('<bi', read_buffer.read(5))
if header[1] == "F":
tmp_mat = np.frombuffer(read_buffer.read(rows * cols * 4),
dtype=np.float32)
elif header[1] == "D":
tmp_mat = np.frombuffer(read_buffer.read(rows * cols * 8),
dtype=np.float64)
mat = np.reshape(tmp_mat, (rows, cols))
read_buffer.close()
return mat
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--inputs',
type=str,
default='data/train/inputs.cmvn',
help="Name of input CMVN file."
)
parser.add_argument(
'--labels',
type=str,
default='data/train/labels.cmvn',
help="Name of label CMVN file."
)
parser.add_argument(
'--save_dir',
required=True,
help="Directory to save Numpy format CMVN file."
)
FLAGS, unparsed = parser.parse_known_args()
convert_cmvn_to_numpy(FLAGS.inputs, FLAGS.labels, FLAGS.save_dir)
| [
"argparse.ArgumentParser",
"numpy.hsplit",
"numpy.reshape",
"numpy.savez",
"os.path.join",
"sys.exit",
"numpy.sqrt"
] | [((991, 1048), 'numpy.sqrt', 'np.sqrt', (['(cmvn_inputs[1] / inputs_frame - mean_inputs ** 2)'], {}), '(cmvn_inputs[1] / inputs_frame - mean_inputs ** 2)\n', (998, 1048), True, 'import numpy as np\n'), ((1117, 1174), 'numpy.sqrt', 'np.sqrt', (['(cmvn_labels[1] / labels_frame - mean_labels ** 2)'], {}), '(cmvn_labels[1] / labels_frame - mean_labels ** 2)\n', (1124, 1174), True, 'import numpy as np\n'), ((1192, 1232), 'os.path.join', 'os.path.join', (['save_dir', '"""train_cmvn.npz"""'], {}), "(save_dir, 'train_cmvn.npz')\n", (1204, 1232), False, 'import os\n'), ((1237, 1368), 'numpy.savez', 'np.savez', (['cmvn_name'], {'mean_inputs': 'mean_inputs', 'stddev_inputs': 'stddev_inputs', 'mean_labels': 'mean_labels', 'stddev_labels': 'stddev_labels'}), '(cmvn_name, mean_inputs=mean_inputs, stddev_inputs=stddev_inputs,\n mean_labels=mean_labels, stddev_labels=stddev_labels)\n', (1245, 1368), True, 'import numpy as np\n'), ((2414, 2447), 'numpy.reshape', 'np.reshape', (['tmp_mat', '(rows, cols)'], {}), '(tmp_mat, (rows, cols))\n', (2424, 2447), True, 'import numpy as np\n'), ((2530, 2555), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2553, 2555), False, 'import argparse\n'), ((816, 856), 'numpy.hsplit', 'np.hsplit', (['inputs', '[inputs.shape[1] - 1]'], {}), '(inputs, [inputs.shape[1] - 1])\n', (825, 856), True, 'import numpy as np\n'), ((878, 918), 'numpy.hsplit', 'np.hsplit', (['labels', '[labels.shape[1] - 1]'], {}), '(labels, [labels.shape[1] - 1])\n', (887, 918), True, 'import numpy as np\n'), ((1864, 1876), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (1872, 1876), False, 'import sys\n'), ((1969, 1981), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (1977, 1981), False, 'import sys\n')] |
import argparse
import os
from pathlib import Path
import numpy as np
from lib.GANDCTAnalysis.src.dataset import image_paths
from lib.GANDCTAnalysis.src.image_np import load_image
from src.resources.calculations import subtract_fingerprint, reduce_fingerprint_proportionally, dct, restore_from_dct, \
remove_high_frequencies
SIZE = [128, 128, 3]
def save_image(manipulated, name, output_path, greyscale=False):
# Save reconstructed PNG image for both Frank and Yu et al.
restored_image = restore_from_dct(manipulated, greyscale)
restored_image.save(f"{output_path}/{name}.png")
def remove_fingerprint_and_save(paths, fingerprint, factor, output, proportional=False):
for i, path in enumerate(paths):
# load and manipulate image one after another
# this will keep the memory requirement low
image = load_image(path)
image = dct(image)
if not proportional: # absolute fingerprint of mean spectra
manipulated = subtract_fingerprint(image, factor, fingerprint)
else: # proportional fingerprint of regression weights
manipulated = reduce_fingerprint_proportionally(image, factor, fingerprint)
save_image(manipulated, Path(path).stem, output)
print(f"\rRemoved fingerprint from {i+1:6d} of {len(paths)} images", end="")
print()
def remove_bar(paths, output, width):
for i, path in enumerate(paths):
image = load_image(path)
image = dct(image)
image = remove_high_frequencies(image, width)
# convert back and save
save_image(image, Path(path).stem, output)
print(f"\rRemoved bar from {i + 1:6d} of {len(paths)} images", end="")
print()
def main(args):
# Create output folder, if it doesn't already exist
if args.output is not None:
output = args.output
else:
path = Path(args.GAN_IMAGES)
output = f"{path.parent}/{path.stem}_{args.mode}"
os.makedirs(output, exist_ok=True)
paths = image_paths(args.GAN_IMAGES)
if args.mode == "mean":
fingerprint = np.load(args.FINGERPRINT)
print(f"Remove fingerprint directly from {args.GAN_IMAGES}")
remove_fingerprint_and_save(paths, fingerprint, args.factor, output, proportional=False)
elif args.mode == "peak":
fingerprint = np.load(args.FINGERPRINT)
print(f"Remove fingerprint proportionally from {args.GAN_IMAGES}")
if args.threshold is not None:
assert 1 >= args.threshold >= 0
fingerprint[fingerprint < args.threshold] = 0
remove_fingerprint_and_save(paths, fingerprint, args.factor, output, proportional=True)
elif args.mode == "regression":
fingerprint = np.load(args.FINGERPRINT)
print(f"Remove fingerprint proportionally from {args.GAN_IMAGES}")
remove_fingerprint_and_save(paths, fingerprint, args.factor, output, proportional=True)
elif args.mode == "bar":
print(f"Remove bars {args.GAN_IMAGES}")
remove_bar(paths, output, args.width)
else:
raise NotImplementedError("Specified non valid mode!")
def parse_args():
parser = argparse.ArgumentParser()
commands = parser.add_subparsers(help="Mode {mean|peak|regression|bar}.", dest="mode")
mean = commands.add_parser("mean")
mean.add_argument("GAN_IMAGES", help="Folder of GAN image dataset to be manipulated.", type=str)
mean.add_argument("FINGERPRINT", help=f".npy file which contains the precalculated fingerprint", type=str, default=1)
mean.add_argument("--output", "-o",help=f"Output folder.", type=str)
mean.add_argument("--factor", help=f"Factor by which to scale the fingerprint before removal", type=float, default=1)
peak = commands.add_parser("peak")
peak.add_argument("GAN_IMAGES", help="Folder of GAN image dataset to be manipulated.", type=str)
peak.add_argument("FINGERPRINT", help=f".npy file which contains the precalculated fingerprint", type=str, default=1)
peak.add_argument("--output", "-o",help=f"Output folder.", type=str)
peak.add_argument("--factor", help=f"Factor by which to scale the fingerprint before removal", type=float, default=1)
peak.add_argument("--threshold", help=f"Threshold, which to apply to fingerprint before removal", type=float)
regression = commands.add_parser("regression")
regression.add_argument("GAN_IMAGES", help="Folder of GAN image dataset to be manipulated.", type=str)
regression.add_argument("FINGERPRINT", help=f".npy file which contains the precalculated fingerprint", type=str, default=1)
regression.add_argument("--output", "-o",help=f"Output folder.", type=str)
regression.add_argument("--factor", help=f"Factor by which to scale the fingerprint before removal", type=float, default=1)
bar = commands.add_parser("bar")
bar.add_argument("GAN_IMAGES", help="Folder of GAN image dataset to be manipulated.", type=str)
bar.add_argument("--width", help=f"Width of bar to remove. Default is 10.", type=int, default=10)
bar.add_argument("--output", "-o",help=f"Output folder.", type=str)
args = parser.parse_args()
return args
if __name__ == "__main__":
main(parse_args())
| [
"src.resources.calculations.remove_high_frequencies",
"numpy.load",
"src.resources.calculations.reduce_fingerprint_proportionally",
"os.makedirs",
"argparse.ArgumentParser",
"src.resources.calculations.restore_from_dct",
"lib.GANDCTAnalysis.src.dataset.image_paths",
"src.resources.calculations.dct",
... | [((503, 543), 'src.resources.calculations.restore_from_dct', 'restore_from_dct', (['manipulated', 'greyscale'], {}), '(manipulated, greyscale)\n', (519, 543), False, 'from src.resources.calculations import subtract_fingerprint, reduce_fingerprint_proportionally, dct, restore_from_dct, remove_high_frequencies\n'), ((1953, 1987), 'os.makedirs', 'os.makedirs', (['output'], {'exist_ok': '(True)'}), '(output, exist_ok=True)\n', (1964, 1987), False, 'import os\n'), ((2001, 2029), 'lib.GANDCTAnalysis.src.dataset.image_paths', 'image_paths', (['args.GAN_IMAGES'], {}), '(args.GAN_IMAGES)\n', (2012, 2029), False, 'from lib.GANDCTAnalysis.src.dataset import image_paths\n'), ((3147, 3172), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3170, 3172), False, 'import argparse\n'), ((847, 863), 'lib.GANDCTAnalysis.src.image_np.load_image', 'load_image', (['path'], {}), '(path)\n', (857, 863), False, 'from lib.GANDCTAnalysis.src.image_np import load_image\n'), ((880, 890), 'src.resources.calculations.dct', 'dct', (['image'], {}), '(image)\n', (883, 890), False, 'from src.resources.calculations import subtract_fingerprint, reduce_fingerprint_proportionally, dct, restore_from_dct, remove_high_frequencies\n'), ((1435, 1451), 'lib.GANDCTAnalysis.src.image_np.load_image', 'load_image', (['path'], {}), '(path)\n', (1445, 1451), False, 'from lib.GANDCTAnalysis.src.image_np import load_image\n'), ((1468, 1478), 'src.resources.calculations.dct', 'dct', (['image'], {}), '(image)\n', (1471, 1478), False, 'from src.resources.calculations import subtract_fingerprint, reduce_fingerprint_proportionally, dct, restore_from_dct, remove_high_frequencies\n'), ((1496, 1533), 'src.resources.calculations.remove_high_frequencies', 'remove_high_frequencies', (['image', 'width'], {}), '(image, width)\n', (1519, 1533), False, 'from src.resources.calculations import subtract_fingerprint, reduce_fingerprint_proportionally, dct, restore_from_dct, remove_high_frequencies\n'), ((1869, 1890), 'pathlib.Path', 'Path', (['args.GAN_IMAGES'], {}), '(args.GAN_IMAGES)\n', (1873, 1890), False, 'from pathlib import Path\n'), ((2081, 2106), 'numpy.load', 'np.load', (['args.FINGERPRINT'], {}), '(args.FINGERPRINT)\n', (2088, 2106), True, 'import numpy as np\n'), ((987, 1035), 'src.resources.calculations.subtract_fingerprint', 'subtract_fingerprint', (['image', 'factor', 'fingerprint'], {}), '(image, factor, fingerprint)\n', (1007, 1035), False, 'from src.resources.calculations import subtract_fingerprint, reduce_fingerprint_proportionally, dct, restore_from_dct, remove_high_frequencies\n'), ((1126, 1187), 'src.resources.calculations.reduce_fingerprint_proportionally', 'reduce_fingerprint_proportionally', (['image', 'factor', 'fingerprint'], {}), '(image, factor, fingerprint)\n', (1159, 1187), False, 'from src.resources.calculations import subtract_fingerprint, reduce_fingerprint_proportionally, dct, restore_from_dct, remove_high_frequencies\n'), ((2325, 2350), 'numpy.load', 'np.load', (['args.FINGERPRINT'], {}), '(args.FINGERPRINT)\n', (2332, 2350), True, 'import numpy as np\n'), ((1220, 1230), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (1224, 1230), False, 'from pathlib import Path\n'), ((1593, 1603), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (1597, 1603), False, 'from pathlib import Path\n'), ((2721, 2746), 'numpy.load', 'np.load', (['args.FINGERPRINT'], {}), '(args.FINGERPRINT)\n', (2728, 2746), True, 'import numpy as np\n')] |
#! /usr/bin/env python3.6
import argparse
import json
from rospkg import RosPack
import os
from glob import glob
import matplotlib.pyplot as plt
import numpy as np
from typing import Union, List
import yaml
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('run_name', default='empty_4x4', nargs='?', type=str,
help='Name of the configuration to be loaded')
parser.add_argument('-s', '--show', action='store_true')
parser.add_argument('-c', '--color', default='tab:blue', type=str,
help='Color of the plots')
args = parser.parse_args()
pkg_path = RosPack().get_path('multi_agent_sac')
run_dir = os.path.join(pkg_path, 'runs', args.run_name)
# load config
with open(os.path.join(pkg_path, 'params', args.run_name+'.yaml')) as f:
config = yaml.load(f, Loader=yaml.FullLoader)
# load training data
summaries = []
for run in glob(os.path.join(run_dir, 'run_*')):
summary_f = os.path.join(run, 'logs', 'summary.json')
if not os.path.isfile(summary_f):
continue
with open(summary_f) as f:
data = json.load(f)
summaries.append(data)
if len(summaries) == 0:
print('No summaries found')
quit()
print(f'Found {len(summaries)} runs')
# strip path name from keys
for summary in summaries:
keys = list(summary.keys())
for k in keys:
new_k = k.split('/logs/')[1]
summary[new_k] = summary.pop(k)
plt.rcParams.update({'font.size': 22})
## Calculate, show and save plot for a given value
#
# @param name Name or list of names for the plotted values
# @param save Save the figure to runs/<run_name>/figures/<name>.png
# @param log_scale Use log scale when plotting
def draw_plot(name: Union[str, List[str]], *,
log_scale: bool=False) -> type(None):
data = []
for summary in summaries:
if type(name) == str:
run_data = summary[name]
data.append(run_data)
elif type(name) == list:
for n in name:
run_data = summary[n]
data.append(run_data)
data = np.array(data)
ep = data[0, :, 1]*config['n_threads']/1000
vals = data[:, :, 2]
avg = np.average(data[:, :, 2], axis=0)
plt.grid()
ax = plt.gca()
ax.fill_between(ep, np.min(vals, axis=0), np.max(vals, axis=0), alpha=.2, color=args.color)
if log_scale:
ax.set_yscale('log')
plt.plot(ep, avg, color=args.color)
y_range = np.max(avg) - np.min(avg)
y_top = np.max(avg) + y_range*0.2
y_bot = np.min(avg) - y_range*0.2
plt.ylim(bottom=y_bot, top=y_top)
plt.xlabel('1000 episodes')
plt.tight_layout()
os.makedirs(os.path.join(run_dir, 'figures'), exist_ok=True)
if type(name) == str:
n = name
else:
n = name[0]
plt.savefig(os.path.join(run_dir, 'figures', n.replace('/', '_')))
if args.show:
fig = plt.gcf()
if type(name) == str:
n = name
else:
n = name[0]
fig.canvas.set_window_title(n)
plt.show()
draw_plot('evaluation/episode_reward_average/episode_reward_average')
draw_plot('evaluation/collision_average/collision_average')
draw_plot('evaluation/reached_goal_average/reached_goal_average')
draw_plot(['loss/critic/critic_1', 'loss/critic/critic_2'])
draw_plot('loss/entropy/entropy')
draw_plot('loss/policy/policy')
draw_plot('evaluation/collision_reward/collision_reward')
draw_plot('evaluation/alpha/alpha', log_scale=True)
| [
"yaml.load",
"argparse.ArgumentParser",
"os.path.isfile",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout",
"os.path.join",
"numpy.max",
"matplotlib.pyplot.rcParams.update",
"numpy.average",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"numpy.min",
"matplotlib.pyplot.gcf",
... | [((251, 276), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (274, 276), False, 'import argparse\n'), ((684, 729), 'os.path.join', 'os.path.join', (['pkg_path', '"""runs"""', 'args.run_name'], {}), "(pkg_path, 'runs', args.run_name)\n", (696, 729), False, 'import os\n'), ((1537, 1575), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 22}"], {}), "({'font.size': 22})\n", (1556, 1575), True, 'import matplotlib.pyplot as plt\n'), ((843, 879), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (852, 879), False, 'import yaml\n'), ((945, 975), 'os.path.join', 'os.path.join', (['run_dir', '"""run_*"""'], {}), "(run_dir, 'run_*')\n", (957, 975), False, 'import os\n'), ((998, 1039), 'os.path.join', 'os.path.join', (['run', '"""logs"""', '"""summary.json"""'], {}), "(run, 'logs', 'summary.json')\n", (1010, 1039), False, 'import os\n'), ((2261, 2275), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2269, 2275), True, 'import numpy as np\n'), ((2374, 2407), 'numpy.average', 'np.average', (['data[:, :, 2]'], {'axis': '(0)'}), '(data[:, :, 2], axis=0)\n', (2384, 2407), True, 'import numpy as np\n'), ((2417, 2427), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2425, 2427), True, 'import matplotlib.pyplot as plt\n'), ((2441, 2450), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2448, 2450), True, 'import matplotlib.pyplot as plt\n'), ((2614, 2649), 'matplotlib.pyplot.plot', 'plt.plot', (['ep', 'avg'], {'color': 'args.color'}), '(ep, avg, color=args.color)\n', (2622, 2649), True, 'import matplotlib.pyplot as plt\n'), ((2786, 2819), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'bottom': 'y_bot', 'top': 'y_top'}), '(bottom=y_bot, top=y_top)\n', (2794, 2819), True, 'import matplotlib.pyplot as plt\n'), ((2828, 2855), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""1000 episodes"""'], {}), "('1000 episodes')\n", (2838, 2855), True, 'import matplotlib.pyplot as plt\n'), ((2864, 2882), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2880, 2882), True, 'import matplotlib.pyplot as plt\n'), ((628, 637), 'rospkg.RosPack', 'RosPack', ([], {}), '()\n', (635, 637), False, 'from rospkg import RosPack\n'), ((763, 820), 'os.path.join', 'os.path.join', (['pkg_path', '"""params"""', "(args.run_name + '.yaml')"], {}), "(pkg_path, 'params', args.run_name + '.yaml')\n", (775, 820), False, 'import os\n'), ((1055, 1080), 'os.path.isfile', 'os.path.isfile', (['summary_f'], {}), '(summary_f)\n', (1069, 1080), False, 'import os\n'), ((1158, 1170), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1167, 1170), False, 'import json\n'), ((2479, 2499), 'numpy.min', 'np.min', (['vals'], {'axis': '(0)'}), '(vals, axis=0)\n', (2485, 2499), True, 'import numpy as np\n'), ((2501, 2521), 'numpy.max', 'np.max', (['vals'], {'axis': '(0)'}), '(vals, axis=0)\n', (2507, 2521), True, 'import numpy as np\n'), ((2668, 2679), 'numpy.max', 'np.max', (['avg'], {}), '(avg)\n', (2674, 2679), True, 'import numpy as np\n'), ((2682, 2693), 'numpy.min', 'np.min', (['avg'], {}), '(avg)\n', (2688, 2693), True, 'import numpy as np\n'), ((2710, 2721), 'numpy.max', 'np.max', (['avg'], {}), '(avg)\n', (2716, 2721), True, 'import numpy as np\n'), ((2752, 2763), 'numpy.min', 'np.min', (['avg'], {}), '(avg)\n', (2758, 2763), True, 'import numpy as np\n'), ((2904, 2936), 'os.path.join', 'os.path.join', (['run_dir', '"""figures"""'], {}), "(run_dir, 'figures')\n", (2916, 2936), False, 'import os\n'), ((3158, 3167), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3165, 3167), True, 'import matplotlib.pyplot as plt\n'), ((3328, 3338), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3336, 3338), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
from torch.utils.data import Dataset, DataLoader
class MNIST_Dataset(Dataset):
def __init__(self, image):
super(MNIST_Dataset).__init__()
self.image = image
def __len__(self):
return self.image.shape[0]
def __getitem__(self, idx):
return np.random.binomial(1, self.image[idx, :]).astype('float32')
class OMNIGLOT_Dataset(Dataset):
def __init__(self, image):
super(OMNIGLOT_Dataset).__init__()
self.image = image
def __len__(self):
return self.image.shape[0]
def __getitem__(self, idx):
return np.random.binomial(1, self.image[idx, :]).astype('float32')
| [
"numpy.random.binomial"
] | [((306, 347), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'self.image[idx, :]'], {}), '(1, self.image[idx, :])\n', (324, 347), True, 'import numpy as np\n'), ((606, 647), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'self.image[idx, :]'], {}), '(1, self.image[idx, :])\n', (624, 647), True, 'import numpy as np\n')] |
# Copyright 2020 Graphcore Ltd.
import os
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.python import ipu
tf.disable_v2_behavior()
SIZE = 5
def add_op(x, y):
outputs = {
"output_types": [tf.float32],
"output_shapes": [tf.TensorShape([SIZE])],
}
base_path = os.path.realpath(os.path.dirname(__file__))
lib_path = os.path.join(base_path, "libcustom_op.so")
gp_path = os.path.join(base_path, "custom_codelet.gp")
return ipu.custom_ops.precompiled_user_op([x, y],
lib_path,
gp_path,
outs=outputs)
if __name__ == '__main__':
cfg = ipu.utils.create_ipu_config()
cfg = ipu.utils.auto_select_ipus(cfg, 1)
ipu.utils.configure_ipu_system(cfg)
with tf.device("cpu"):
x_data = tf.placeholder(np.float32, [SIZE])
y_data = tf.placeholder(np.float32, [SIZE])
with ipu.scopes.ipu_scope("/device:IPU:0"):
xla_result = ipu.ipu_compiler.compile(add_op, [x_data, y_data])
with tf.Session() as sess:
a = np.random.rand(SIZE)
b = np.random.rand(SIZE)
result = sess.run(xla_result, feed_dict={x_data: a, y_data: b})
# Show result from the IPU:
print("IPU:", result[0])
# Same calculation on host for comparison:
print("numpy:", a + b)
| [
"tensorflow.python.ipu.utils.configure_ipu_system",
"tensorflow.python.ipu.scopes.ipu_scope",
"os.path.join",
"tensorflow.python.ipu.custom_ops.precompiled_user_op",
"tensorflow.compat.v1.placeholder",
"os.path.dirname",
"tensorflow.compat.v1.TensorShape",
"tensorflow.python.ipu.ipu_compiler.compile",... | [((131, 155), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (153, 155), True, 'import tensorflow.compat.v1 as tf\n'), ((373, 415), 'os.path.join', 'os.path.join', (['base_path', '"""libcustom_op.so"""'], {}), "(base_path, 'libcustom_op.so')\n", (385, 415), False, 'import os\n'), ((430, 474), 'os.path.join', 'os.path.join', (['base_path', '"""custom_codelet.gp"""'], {}), "(base_path, 'custom_codelet.gp')\n", (442, 474), False, 'import os\n'), ((487, 562), 'tensorflow.python.ipu.custom_ops.precompiled_user_op', 'ipu.custom_ops.precompiled_user_op', (['[x, y]', 'lib_path', 'gp_path'], {'outs': 'outputs'}), '([x, y], lib_path, gp_path, outs=outputs)\n', (521, 562), False, 'from tensorflow.python import ipu\n'), ((740, 769), 'tensorflow.python.ipu.utils.create_ipu_config', 'ipu.utils.create_ipu_config', ([], {}), '()\n', (767, 769), False, 'from tensorflow.python import ipu\n'), ((780, 814), 'tensorflow.python.ipu.utils.auto_select_ipus', 'ipu.utils.auto_select_ipus', (['cfg', '(1)'], {}), '(cfg, 1)\n', (806, 814), False, 'from tensorflow.python import ipu\n'), ((819, 854), 'tensorflow.python.ipu.utils.configure_ipu_system', 'ipu.utils.configure_ipu_system', (['cfg'], {}), '(cfg)\n', (849, 854), False, 'from tensorflow.python import ipu\n'), ((331, 356), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (346, 356), False, 'import os\n'), ((865, 881), 'tensorflow.compat.v1.device', 'tf.device', (['"""cpu"""'], {}), "('cpu')\n", (874, 881), True, 'import tensorflow.compat.v1 as tf\n'), ((900, 934), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['np.float32', '[SIZE]'], {}), '(np.float32, [SIZE])\n', (914, 934), True, 'import tensorflow.compat.v1 as tf\n'), ((952, 986), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['np.float32', '[SIZE]'], {}), '(np.float32, [SIZE])\n', (966, 986), True, 'import tensorflow.compat.v1 as tf\n'), ((997, 1034), 'tensorflow.python.ipu.scopes.ipu_scope', 'ipu.scopes.ipu_scope', (['"""/device:IPU:0"""'], {}), "('/device:IPU:0')\n", (1017, 1034), False, 'from tensorflow.python import ipu\n'), ((1057, 1107), 'tensorflow.python.ipu.ipu_compiler.compile', 'ipu.ipu_compiler.compile', (['add_op', '[x_data, y_data]'], {}), '(add_op, [x_data, y_data])\n', (1081, 1107), False, 'from tensorflow.python import ipu\n'), ((1118, 1130), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (1128, 1130), True, 'import tensorflow.compat.v1 as tf\n'), ((1152, 1172), 'numpy.random.rand', 'np.random.rand', (['SIZE'], {}), '(SIZE)\n', (1166, 1172), True, 'import numpy as np\n'), ((1185, 1205), 'numpy.random.rand', 'np.random.rand', (['SIZE'], {}), '(SIZE)\n', (1199, 1205), True, 'import numpy as np\n'), ((266, 288), 'tensorflow.compat.v1.TensorShape', 'tf.TensorShape', (['[SIZE]'], {}), '([SIZE])\n', (280, 288), True, 'import tensorflow.compat.v1 as tf\n')] |
import numpy as np
import sys
import shapely.geometry
import shapely.ops
import ujson as json
from descartes.patch import PolygonPatch
import matplotlib
#matplotlib.use('GTKAgg')
import matplotlib.pyplot as plt
from collections import defaultdict
import os
class TilespecVisualizer(object):
def __init__(self, scale=0.1):
self._scale = 0.1
def _get_pts_polygons(self, all_tiles_pts):
polygons = [
shapely.geometry.Polygon([
t_pts[0],
t_pts[1],
t_pts[2],
t_pts[3],
t_pts[0]])
for t_pts in all_tiles_pts]
return polygons
def _get_pts_unified_polygon(self, all_tiles_pts):
polygons = self._get_pts_polygons(all_tiles_pts)
return shapely.ops.cascaded_union(polygons)
def _plot_polygons(self, polygons):
fig = plt.figure()
plt.gca().invert_yaxis()
ax = fig.add_subplot(111)
#multi = shapely.geometry.MultiPolygon([[p, []] for p in polygons])
multi = shapely.geometry.MultiPolygon(polygons)
for p in multi:
# plot coordinates system
x, y = p.exterior.xy
ax.plot(x, y, 'o', color='#999999', zorder=1)
#patch = PolygonPatch(p, facecolor='#6699cc', edgecolor='#6699cc', alpha=0.5, zorder=2)
patch = PolygonPatch(p, edgecolor='#6699cc', alpha=0.5, zorder=2)
ax.add_patch(patch)
#polygons = [for ts in tilespecs]
return fig, ax
def _find_center(self, all_tiles_pts):
# receives a list of a (tile) points list
# find the minimial and maximal x and y values, and return the center of the mfov
min_x = min([np.min(tile_pts[:, 0]) for tile_pts in all_tiles_pts])
min_y = min([np.min(tile_pts[:, 1]) for tile_pts in all_tiles_pts])
max_x = max([np.max(tile_pts[:, 0]) for tile_pts in all_tiles_pts])
max_y = max([np.max(tile_pts[:, 1]) for tile_pts in all_tiles_pts])
return np.array([(min_x + max_x) / 2.0, (min_y + max_y) / 2.0]).astype(np.int)
def visualize_tilespecs(self, tilespecs, title=None):
# get the per-mfov transformation
mfovs = set([ts["mfov"] for ts in tilespecs])
mfovs = sorted(list(mfovs))
# Get the tilespecs boundaries for each mfov
mfovs_tiles_orig = defaultdict(list)
for tile_ts in tilespecs:
tile_mfov = tile_ts["mfov"]
orig_pts = np.array([
[tile_ts["bbox"][0], tile_ts["bbox"][2]],
[tile_ts["bbox"][1], tile_ts["bbox"][2]],
[tile_ts["bbox"][1], tile_ts["bbox"][3]],
[tile_ts["bbox"][0], tile_ts["bbox"][3]]
], dtype=np.float64)
mfovs_tiles_orig[tile_mfov].append(orig_pts)
for cur_mfov in mfovs:
for idx in range(len(mfovs_tiles_orig[cur_mfov])):
mfovs_tiles_orig[cur_mfov][idx] *= self._scale
# Create the polygons for each of the mfovs
polygons_orig = [self._get_pts_unified_polygon(mfovs_tiles_orig[cur_mfov]) for cur_mfov in mfovs]
# Create the figures for both the original and the projected outlines
fig_orig, ax_orig = self._plot_polygons(polygons_orig)
# Add mfov indices to the center of each relevant mfov
# first, find normalized centers
mfov_centers_orig = {cur_mfov: self._find_center(mfovs_tiles_orig[cur_mfov]) for cur_mfov in mfovs}
for cur_mfov in mfovs:
ax_orig.text(mfov_centers_orig[cur_mfov][0], mfov_centers_orig[cur_mfov][1], '{}'.format(cur_mfov), color='red')
if title is not None:
ax_orig.set_title(title)
return fig_orig
def visualize_ts_file(self, ts_fname):
"""
Given a tilespec file name, opens, and outlines the mfovs boundaries as polygons,
"""
with open(ts_fname, 'rt') as ts_f:
tilespecs = json.load(ts_f)
title = os.path.basename(ts_fname)
return self.visualize_tilespecs(tilespecs, title)
if __name__ == '__main__':
in_files = sys.argv[1:]
visualizer = TilespecVisualizer()
for in_file in in_files:
fig_orig = visualizer.visualize_ts_file(in_file)
plt.show()
| [
"descartes.patch.PolygonPatch",
"matplotlib.pyplot.show",
"os.path.basename",
"ujson.load",
"collections.defaultdict",
"matplotlib.pyplot.figure",
"numpy.min",
"numpy.array",
"numpy.max",
"matplotlib.pyplot.gca"
] | [((880, 892), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (890, 892), True, 'import matplotlib.pyplot as plt\n'), ((2372, 2389), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2383, 2389), False, 'from collections import defaultdict\n'), ((4048, 4074), 'os.path.basename', 'os.path.basename', (['ts_fname'], {}), '(ts_fname)\n', (4064, 4074), False, 'import os\n'), ((4324, 4334), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4332, 4334), True, 'import matplotlib.pyplot as plt\n'), ((1367, 1424), 'descartes.patch.PolygonPatch', 'PolygonPatch', (['p'], {'edgecolor': '"""#6699cc"""', 'alpha': '(0.5)', 'zorder': '(2)'}), "(p, edgecolor='#6699cc', alpha=0.5, zorder=2)\n", (1379, 1424), False, 'from descartes.patch import PolygonPatch\n'), ((2489, 2694), 'numpy.array', 'np.array', (["[[tile_ts['bbox'][0], tile_ts['bbox'][2]], [tile_ts['bbox'][1], tile_ts[\n 'bbox'][2]], [tile_ts['bbox'][1], tile_ts['bbox'][3]], [tile_ts['bbox']\n [0], tile_ts['bbox'][3]]]"], {'dtype': 'np.float64'}), "([[tile_ts['bbox'][0], tile_ts['bbox'][2]], [tile_ts['bbox'][1],\n tile_ts['bbox'][2]], [tile_ts['bbox'][1], tile_ts['bbox'][3]], [tile_ts\n ['bbox'][0], tile_ts['bbox'][3]]], dtype=np.float64)\n", (2497, 2694), True, 'import numpy as np\n'), ((4015, 4030), 'ujson.load', 'json.load', (['ts_f'], {}), '(ts_f)\n', (4024, 4030), True, 'import ujson as json\n'), ((901, 910), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (908, 910), True, 'import matplotlib.pyplot as plt\n'), ((1728, 1750), 'numpy.min', 'np.min', (['tile_pts[:, 0]'], {}), '(tile_pts[:, 0])\n', (1734, 1750), True, 'import numpy as np\n'), ((1804, 1826), 'numpy.min', 'np.min', (['tile_pts[:, 1]'], {}), '(tile_pts[:, 1])\n', (1810, 1826), True, 'import numpy as np\n'), ((1880, 1902), 'numpy.max', 'np.max', (['tile_pts[:, 0]'], {}), '(tile_pts[:, 0])\n', (1886, 1902), True, 'import numpy as np\n'), ((1956, 1978), 'numpy.max', 'np.max', (['tile_pts[:, 1]'], {}), '(tile_pts[:, 1])\n', (1962, 1978), True, 'import numpy as np\n'), ((2027, 2083), 'numpy.array', 'np.array', (['[(min_x + max_x) / 2.0, (min_y + max_y) / 2.0]'], {}), '([(min_x + max_x) / 2.0, (min_y + max_y) / 2.0])\n', (2035, 2083), True, 'import numpy as np\n')] |
"""
Code by <NAME>(@graykode)
https://en.wikipedia.org/wiki/Bernoulli_distribution
"""
import random
import numpy as np
from matplotlib import pyplot as plt
def bernoulli(p, k):
return p if k else 1 - p
n_experiment = 100
p = 0.6
x = np.arange(n_experiment)
y = []
for _ in range(n_experiment):
pick = bernoulli(p, k=bool(random.getrandbits(1)))
y.append(pick)
u, s = np.mean(y), np.std(y)
plt.scatter(x, y, label=r'$\mu=%.2f,\ \sigma=%.2f$' % (u, s))
plt.legend()
plt.savefig('graph/bernoulli.png')
plt.show()
| [
"matplotlib.pyplot.show",
"numpy.std",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"numpy.mean",
"numpy.arange",
"random.getrandbits",
"matplotlib.pyplot.savefig"
] | [((248, 271), 'numpy.arange', 'np.arange', (['n_experiment'], {}), '(n_experiment)\n', (257, 271), True, 'import numpy as np\n'), ((413, 476), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'label': "('$\\\\mu=%.2f,\\\\ \\\\sigma=%.2f$' % (u, s))"}), "(x, y, label='$\\\\mu=%.2f,\\\\ \\\\sigma=%.2f$' % (u, s))\n", (424, 476), True, 'from matplotlib import pyplot as plt\n'), ((475, 487), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (485, 487), True, 'from matplotlib import pyplot as plt\n'), ((488, 522), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""graph/bernoulli.png"""'], {}), "('graph/bernoulli.png')\n", (499, 522), True, 'from matplotlib import pyplot as plt\n'), ((523, 533), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (531, 533), True, 'from matplotlib import pyplot as plt\n'), ((391, 401), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (398, 401), True, 'import numpy as np\n'), ((403, 412), 'numpy.std', 'np.std', (['y'], {}), '(y)\n', (409, 412), True, 'import numpy as np\n'), ((340, 361), 'random.getrandbits', 'random.getrandbits', (['(1)'], {}), '(1)\n', (358, 361), False, 'import random\n')] |
"""
Download from https://raw.githubusercontent.com/EricYizhenWang/robust_nn_icml/master/robust_1nn.py
"""
from __future__ import division
import numpy as np
from sklearn.neighbors import NearestNeighbors, KNeighborsClassifier
from scipy.spatial.distance import cdist
from .robust_nn.eps_separation import find_eps_separated_set
from ..variables import auto_var
from .approx_ap import approx_ap
def find_confident_label(X, Y, k, Delta):
thres = 2*Delta
neigh = NearestNeighbors(k)
neigh.fit(X)
nn = neigh.kneighbors(X, k, return_distance=False)
Y_hat = np.array([[Y[j] for j in i] for i in nn])
Y_hat = np.sum(Y_hat, axis=1)/k
Y_hat = [0 if (abs(Y_hat[i]) < thres) else np.sign(Y_hat[i])
for i in range(X.shape[0])]
Y_hat = np.array(Y_hat)
return Y_hat
def find_red_points(X, Y, Y_hat, eps, ord):
n = X.shape[0]
d = cdist(X, X, 'minkowski', ord)
is_close = (d < eps)
is_red = np.ones((n, 1))
for i in range(n):
for j in range(n):
if (is_close[i, j]) and (Y_hat[i] != Y_hat[j]):
is_red[i] = 0
if Y_hat[i] != Y[i]:
is_red[i] = 0
red_pts = [np.array([X[i] for i in range(n) if is_red[i]]),
np.array([Y[i] for i in range(n) if is_red[i]])]
other_pts = [np.array([X[i] for i in range(n) if not is_red[i]]),
np.array([Y[i] for i in range(n) if not is_red[i]])]
[X_red, Y_red] = [red_pts[0], red_pts[1]]
[X_other, Y_other] = [other_pts[0], other_pts[1]]
return X_red, Y_red, X_other, Y_other
def get_aug_v2(X, Y, Delta, delta, eps, ord):
k = min(int(3*np.log(X.shape[0]/delta)/(np.log(2)*(Delta**2))), len(X))
Y_hat = find_confident_label(X, Y, k, Delta)
X_red, Y_red, X_other, Y_other = find_red_points(
X, Y, eps=eps, Y_hat=Y_hat, ord=ord)
print("X_red: ", X_red.shape)
[X, Y] = [X_other, Y_other]
print("X_other: ", X.shape)
X, Y = find_eps_separated_set(X, eps/2, Y, ord=ord)
if X_red.shape[0] > 0:
X_train = np.concatenate([X, X_red])
Y_train = np.concatenate([Y, Y_red])
else:
X_train = X
Y_train = Y
return X_train, Y_train
def get_aug_data(model, X, y, eps, sep_measure=None):
"""Augment the data for defense, returns the augmented data
Arguments:
model {Classifier} -- The original classifier model, model.train_type defines the way to augment the data.
model.train_type == 'adv': adversarial training
model.train_type == 'robustv2': adversarial pruning
model.train_type == 'advPruning': Wang's defense for 1-NN
model.train_type is None: Do nothing returns the original data
X {ndarray, dim=2} -- feature vectors
y {ndarray, dim=1} -- labels
eps {float} -- defense strength
Returns:
augX {ndarray, dim=2} -- augmented feature vectors
augy {ndarray, dim=1} -- augmented labels
"""
if model.train_type in ['adv', 'advPruning', 'advPruningmin', 'robustv2', 'approxAP']:
if eps is None and model.eps is None:
raise ValueError("eps should not be None with train type %s" % model.train_type)
elif eps is None:
eps = model.eps
print(f"augmenting data with eps: {eps}")
if sep_measure is None:
sep_measure = model.sep_measure if model.sep_measure else model.ord
if model.train_type == 'adv':
advX = model.attack_model.perturb(X, y=y, eps=eps)
ind = np.where(np.linalg.norm(advX, axis=1) != 0)
augX = np.vstack((X, X[ind]+advX[ind]))
augy = np.concatenate((y, y[ind]))
elif model.train_type == 'adv2':
print(auto_var.var_value)
model_name = auto_var.get_variable_name("model")
aug_model_name = "_".join(model_name.split("_")[1:])
print(aug_model_name)
for _ in range(5):
if "decision_tree" in model_name:
auto_var.get_var_with_argument("model", "decision_tree_d5").fit(X, y=y)
elif "rf" in model_name:
auto_var.get_var_with_argument("model", "random_forest_100_d5").fit(X, y=y)
elif "nn" in model_name:
auto_var.get_var_with_argument("model", "_".join(model_name.split("_")[1:3])).fit(X, y=y)
else:
raise ValueError()
advX = auto_var.get_var("attack").perturb(X, y=y, eps=eps)
ind = np.where(np.linalg.norm(advX, axis=1) != 0)
X = np.vstack((X, X[ind]+advX[ind]))
y = np.concatenate((y, y[ind]))
auto_var.set_intermidiate_variable("trnX", X)
auto_var.set_intermidiate_variable("trny", y)
augX, augy = X, y
elif model.train_type == 'advPruningmin':
if len(np.unique(y)) != 2:
raise ValueError("Can only deal with number of classes = 2"
"got %d", len(np.unique(y)))
y = y.astype(int)*2-1
augX, augy = find_eps_separated_set(X, eps/2, y, 'min_measure')
augy = (augy+1)//2
elif model.train_type == 'approxAP':
augX, augy = approx_ap(X, y, eps, sep_measure)
elif model.train_type == 'advPruning':
if len(np.unique(y)) != 2:
raise ValueError("Can only deal with number of classes = 2"
"got %d", len(np.unique(y)))
y = y.astype(int)*2-1
augX, augy = find_eps_separated_set(X, eps/2, y, ord=sep_measure)
augy = (augy+1)//2
elif model.train_type == 'robustv2':
if len(np.unique(y)) != 2:
raise ValueError("Can only deal with number of classes = 2"
"got %d", len(np.unique(y)))
y = y.astype(int)*2-1
Delta = model.Delta
delta = model.delta
augX, augy = get_aug_v2(X, y, Delta, delta, eps, sep_measure)
augy = (augy+1)//2
elif model.train_type is None:
augX, augy = X, y
elif model.train_type == 'robust':
augX, augy = X, y
else:
raise ValueError("Not supported training type %s", model.train_type)
return augX, augy
| [
"scipy.spatial.distance.cdist",
"numpy.sum",
"numpy.log",
"numpy.unique",
"numpy.ones",
"numpy.array",
"sklearn.neighbors.NearestNeighbors",
"numpy.linalg.norm",
"numpy.sign",
"numpy.vstack",
"numpy.concatenate"
] | [((472, 491), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', (['k'], {}), '(k)\n', (488, 491), False, 'from sklearn.neighbors import NearestNeighbors, KNeighborsClassifier\n'), ((576, 617), 'numpy.array', 'np.array', (['[[Y[j] for j in i] for i in nn]'], {}), '([[Y[j] for j in i] for i in nn])\n', (584, 617), True, 'import numpy as np\n'), ((772, 787), 'numpy.array', 'np.array', (['Y_hat'], {}), '(Y_hat)\n', (780, 787), True, 'import numpy as np\n'), ((877, 906), 'scipy.spatial.distance.cdist', 'cdist', (['X', 'X', '"""minkowski"""', 'ord'], {}), "(X, X, 'minkowski', ord)\n", (882, 906), False, 'from scipy.spatial.distance import cdist\n'), ((946, 961), 'numpy.ones', 'np.ones', (['(n, 1)'], {}), '((n, 1))\n', (953, 961), True, 'import numpy as np\n'), ((630, 651), 'numpy.sum', 'np.sum', (['Y_hat'], {'axis': '(1)'}), '(Y_hat, axis=1)\n', (636, 651), True, 'import numpy as np\n'), ((2054, 2080), 'numpy.concatenate', 'np.concatenate', (['[X, X_red]'], {}), '([X, X_red])\n', (2068, 2080), True, 'import numpy as np\n'), ((2099, 2125), 'numpy.concatenate', 'np.concatenate', (['[Y, Y_red]'], {}), '([Y, Y_red])\n', (2113, 2125), True, 'import numpy as np\n'), ((3581, 3615), 'numpy.vstack', 'np.vstack', (['(X, X[ind] + advX[ind])'], {}), '((X, X[ind] + advX[ind]))\n', (3590, 3615), True, 'import numpy as np\n'), ((3629, 3656), 'numpy.concatenate', 'np.concatenate', (['(y, y[ind])'], {}), '((y, y[ind]))\n', (3643, 3656), True, 'import numpy as np\n'), ((701, 718), 'numpy.sign', 'np.sign', (['Y_hat[i]'], {}), '(Y_hat[i])\n', (708, 718), True, 'import numpy as np\n'), ((3530, 3558), 'numpy.linalg.norm', 'np.linalg.norm', (['advX'], {'axis': '(1)'}), '(advX, axis=1)\n', (3544, 3558), True, 'import numpy as np\n'), ((4512, 4546), 'numpy.vstack', 'np.vstack', (['(X, X[ind] + advX[ind])'], {}), '((X, X[ind] + advX[ind]))\n', (4521, 4546), True, 'import numpy as np\n'), ((4561, 4588), 'numpy.concatenate', 'np.concatenate', (['(y, y[ind])'], {}), '((y, y[ind]))\n', (4575, 4588), True, 'import numpy as np\n'), ((1642, 1668), 'numpy.log', 'np.log', (['(X.shape[0] / delta)'], {}), '(X.shape[0] / delta)\n', (1648, 1668), True, 'import numpy as np\n'), ((1668, 1677), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1674, 1677), True, 'import numpy as np\n'), ((4461, 4489), 'numpy.linalg.norm', 'np.linalg.norm', (['advX'], {'axis': '(1)'}), '(advX, axis=1)\n', (4475, 4489), True, 'import numpy as np\n'), ((4793, 4805), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (4802, 4805), True, 'import numpy as np\n'), ((4928, 4940), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (4937, 4940), True, 'import numpy as np\n'), ((5228, 5240), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (5237, 5240), True, 'import numpy as np\n'), ((5363, 5375), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (5372, 5375), True, 'import numpy as np\n'), ((5566, 5578), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (5575, 5578), True, 'import numpy as np\n'), ((5701, 5713), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (5710, 5713), True, 'import numpy as np\n')] |
from mesa import Agent
import numpy as np
'''
This module describes the main agents used in CityModel:
- CarAgent
- BuildingAgent
- TrafficLightAgent
- IntersectionAgent
'''
class CarAgent(Agent):
'''
Creates a car agent inside the model grid.
Arguments:
- unique_id: car identifier
- path: random optimal path calculated at birth
- max_velocity: model level maximum velocity
- tolerance: model level congestion tolerance
Aditionally the folllowing parameters will be modified during execution:
- congestion: congestion level calculated as velocity_sum/max_velocity_sum at each step
- haste: 0 if not hasty, 1 if hasty
- steps: amount of step an agent resides within the grid
'''
def __init__(self, model, unique_id, path, max_velocity, tolerance):
super().__init__(unique_id, model)
self.path = path
self.pos_i = 0
self.pos = path[self.pos_i]
self.max_velocity = max_velocity
self.velocity = max_velocity
self.velocity_sum = 0
self.max_velocity_sum = 0
self.congestion = self.velocity/self.max_velocity
self.haste = 0
self.steps = 0
self.tolerance = tolerance
self.type = 0 # self.update_type()
def accelerate(self, amount):
self.velocity += int(amount)
def decelerate(self, distance):
if distance > 0:
self.velocity = int(np.ceil(distance / 2))
else:
self.velocity = 0
def destroy(self):
self.model.grid.remove_agent(self)
self.model.schedule.remove(self)
self.model.num_car_agents -= 1
def step(self):
'''
At each step the car agent updates its congestion level, checks if it is to become hasty
and retrieves its next position in the path.
Aditionally, the agent checks if there is a red traffic light agent, if True the agent stops; if False, the agent
accelerates.
Next the agent checks its distance relative to surrounding cars, if it is closer to a neighbouring car
than it's velocity, the agent will decelarate.
Finally, if the agent has not been stopped, it moves to its next position.
'''
self.update_congestion()
self.update_haste()
next_path = self.path[self.pos_i + 1:self.pos_i + self.max_velocity + 1]
content: [TrafficLightAgent,
CarAgent] = self.model.grid.get_cell_list_contents(next_path)
current = self.model.grid.get_cell_list_contents(self.pos)
traffic_light = False
# check if traffic light on current cell
if isinstance(current[0], TrafficLightAgent):
if current[0].state != 0:
self.velocity = 0
return
else:
self.accelerate(int(np.ceil(self.max_velocity - self.velocity)/2))
# if object on next_path, act accordingly
if content:
next_obj = content[0]
distance_to_next = next_path.index(next_obj.pos)
if isinstance(next_obj, TrafficLightAgent):
if len(content) > 1:
next_car = content[1]
if next_car.pos == next_obj.pos:
distance_to_next -= 1
traffic_light = True
if self.velocity > 0 and distance_to_next <= self.velocity:
if traffic_light:
distance_to_next += 1
self.decelerate(distance_to_next)
elif self.velocity < self.max_velocity:
if traffic_light:
distance_to_next += 1
self.accelerate(np.ceil((self.max_velocity - self.velocity) / 2))
if self.velocity > distance_to_next:
self.velocity = distance_to_next
elif self.velocity > self.max_velocity:
self.velocity = self.max_velocity
else:
pass
self.move(next_path)
def move(self, next_path):
"""
Moves agent velocity amount of steps, if end of grid reached, remove agent
"""
if self.pos_i + self.velocity >= len(self.path):
self.destroy()
elif self.velocity > 0:
self.model.grid.move_agent(self, next_path[self.velocity-1])
self.pos_i += self.velocity
else:
pass
def update_congestion(self):
"""
Update congestion parameter for data collection
"""
self.velocity_sum += self.velocity
self.max_velocity_sum += self.max_velocity
self.congestion = self.velocity_sum/self.max_velocity_sum
self.steps += 1
def update_haste(self):
"""
Update haste parameter of agent
"""
haste_probability = (self.velocity_sum/self.steps)/self.max_velocity
if self.steps > 10:
if self.congestion < self.tolerance and np.random.uniform() < haste_probability:
# agent is hasty, increase max velocity
self.haste = 1
self.max_velocity = self.max_velocity + int(np.ceil(self.max_velocity * 0.25))
if self.velocity > self.max_velocity:
self.velocity = self.max_velocity
else:
if self.haste != 0:
# agent is "normal" again, decrease velocity
self.haste = 0
self.max_velocity = 5
if self.velocity > self.max_velocity:
self.velocity = self.max_velocity
def update_type(self):
"""
Update type of agent
"""
if np.random.uniform() < 0.10:
if np.random.uniform() < 0.3:
# patient
self.max_velocity = self.max_velocity - 1
self.tolerance_1 = self.tolerance_1 + 0.1
self.tolerance_2 = self.tolerance_2 + 0.15
if self.velocity > self.max_velocity:
self.velocity = self.max_velocity
return 1
else:
# inpatient
self.max_velocity = self.max_velocity + 2
self.tolerance_1 = self.tolerance_1 - 0.1
self.tolerance_2 = self.tolerance_2 - 0.15
if self.velocity > self.max_velocity:
self.velocity = self.max_velocity
return 2
else:
return 0
class BuildingAgent(Agent):
'''
Creates a building agent whose only attributes are a unique_id and its position:
- pos: (x,y) coordinates in the model grid
'''
def __init__(self, unique_id, model, pos):
super().__init__(unique_id, model)
self.pos = pos
class IntersectionAgent(Agent):
'''
Creates an intersection agent, where the traffic lights live.
Arguments:
- unique_id: agents' identifier
- pos: (x,y) coordinates in the model grid
- green_light_duration: duration of green/red light for a given TrafficLightAgent inside the IntersectionAgent
'''
def __init__(self, unique_id, model, pos, green_light_duration):
super().__init__(unique_id, model)
self.model = model
self.unique_id = unique_id
self.pos = pos
self.counter = 0
traffic_light_positions = [(pos[0] - 1, pos[1]),
(pos[0] + 1, pos[1] - 1),
(pos[0] + 2, pos[1] + 1),
(pos[0], pos[1] + 2)]
self.traffic_lights = []
for i in range(2):
tlight1 = TrafficLightAgent(self.model.get_new_unique_id(
), self.model, traffic_light_positions[2*i], state=2)
tlight2 = TrafficLightAgent(self.model.get_new_unique_id(
), self.model, traffic_light_positions[2*i+1], state=0)
self.traffic_lights.append(tlight1)
self.traffic_lights.append(tlight2)
self.green_duration = green_light_duration
self.yellow_duration = 2
def step(self):
if self.yellow_duration > 0:
if self.counter == self.green_duration:
for tl in self.traffic_lights:
if tl.state == 0:
tl.switch()
elif self.counter == self.green_duration + self.yellow_duration:
for tl in self.traffic_lights:
tl.switch()
self.counter = 0
else:
if self.counter == self.green_duration:
for tl in self.traffic_lights:
tl.switch(include_yellow=False)
self.counter = 0
self.counter += 1
class TrafficLightAgent(Agent):
'''
Creates a traffic light inside the model grid.
Arguments:
- unique_id: agents' identifier
- pos: (x,y) coordinates in the model grid
- state: 0 if green, 1 if yellow, 2 if red
'''
def __init__(self, unique_id, model, pos, state):
super().__init__(unique_id, model)
self.colors = {0: 'green', 1: 'yellow', 2: 'red'}
self.state = state
self.pos = pos
def switch(self, include_yellow=True):
if include_yellow:
if self.state == 2:
self.state = 0
else:
self.state += 1
else:
if self.state == 2:
self.state = 0
else:
self.state = 2 | [
"numpy.random.uniform",
"numpy.ceil"
] | [((5730, 5749), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (5747, 5749), True, 'import numpy as np\n'), ((1457, 1478), 'numpy.ceil', 'np.ceil', (['(distance / 2)'], {}), '(distance / 2)\n', (1464, 1478), True, 'import numpy as np\n'), ((5773, 5792), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (5790, 5792), True, 'import numpy as np\n'), ((4995, 5014), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (5012, 5014), True, 'import numpy as np\n'), ((3713, 3761), 'numpy.ceil', 'np.ceil', (['((self.max_velocity - self.velocity) / 2)'], {}), '((self.max_velocity - self.velocity) / 2)\n', (3720, 3761), True, 'import numpy as np\n'), ((5183, 5216), 'numpy.ceil', 'np.ceil', (['(self.max_velocity * 0.25)'], {}), '(self.max_velocity * 0.25)\n', (5190, 5216), True, 'import numpy as np\n'), ((2870, 2912), 'numpy.ceil', 'np.ceil', (['(self.max_velocity - self.velocity)'], {}), '(self.max_velocity - self.velocity)\n', (2877, 2912), True, 'import numpy as np\n')] |
# /usr/bin/python
from __future__ import print_function, division
import click
import glob
from lxml import etree
import numpy as np
import os
import pandas as pd
from PIL import Image
import shutil
import subprocess
import tensorflow as tf
from tqdm import tqdm
from object_detection.kitti_to_voc import kitti_to_voc
from object_detection.create_pascal_tf_record import dict_to_tf_example
from object_detection.utils import dataset_util
from object_detection.utils import label_map_util
label_map_dict = label_map_util.get_label_map_dict('data/kitti_map.pbtxt')
DEFAULT_DATA_DIR = 'kitti_data'
VOC_TRAIN_DIR = 'voc_kitti'
VOC_VALID_DIR = 'voc_kitti_valid'
TRAIN_RECORD_PATH = 'data/train.tfrecord'
VALID_RECORD_PATH = 'data/valid.tfrecord'
def get_fun_paths(base_voc_dir):
annotations_dir = '{}/VOC2012/Annotations/'.format(base_voc_dir)
examples_path = '{}/VOC2012/ImageSets/Main/trainval.txt'.format(base_voc_dir)
return annotations_dir, examples_path
def strip_leading_zeroes(path):
'''training/image_2/00074.jpg -> training/image_2/74.jpg'''
end = path[-4:]
new_basename = '{}{}'.format(int(os.path.basename(path)[:-4]), end)
new_path = os.path.join(os.path.dirname(path), new_basename)
if not os.path.exists(new_path):
shutil.move(path, new_path)
return new_path
def convert_to_jpg_and_save(png_path):
# TODO(SS): faster version?
im = Image.open(png_path)
rgb_im = im.convert('RGB')
new_path = '{}.jpg'.format(png_path[:-4])
rgb_im.save(new_path)
os.remove(png_path)
return new_path
def get_id(path):
return os.path.basename(path)[:-4]
def make_directory_if_not_there(path):
'''makes a directory if not there'''
if not os.path.exists(path):
os.makedirs(path)
def get_labels_path(id, data_dir=DEFAULT_DATA_DIR):
return os.path.join(data_dir, 'training', 'label_2', '{}.txt'.format(id))
def get_image_path(id, data_dir):
return os.path.join(data_dir, 'training', 'image_2', '{}.jpg'.format(id))
def split_validation_images(data_dir, pct_train=0.9, num_consider='all'):
'''make valid.txt and train.txt and create valid subtree'''
label_paths = glob.glob(os.path.join(data_dir, 'training', 'label_2', '*.txt'))
if isinstance(num_consider, int):
label_paths = label_paths[:num_consider]
else:
num_consider = len(label_paths)
assert len(label_paths) > 0
num_train = int(np.floor(num_consider * pct_train))
valid_label_dir = os.path.join(data_dir, 'valid', 'label_2')
valid_image_dir = os.path.join(data_dir, 'valid', 'image_2')
make_directory_if_not_there(valid_image_dir)
make_directory_if_not_there(valid_label_dir)
train_paths = np.random.choice(label_paths, num_train, replace=False)
train_ids = []
valid_ids = []
for label_path in label_paths:
id = get_id(label_path)
image_path = get_image_path(id, data_dir)
if not os.path.exists(image_path):
print('no path {}'.format(image_path))
continue
if label_path in train_paths:
train_ids.append(id)
else:
valid_ids.append(id)
shutil.move(label_path, valid_label_dir)
shutil.move(image_path, valid_image_dir)
assert len(valid_ids) > 0
make_directory_if_not_there(os.path.join(data_dir, 'valid', 'label_2'))
train_file_contents = ','.join(train_ids)
valid_file_contents = ','.join(valid_ids)
with open('kitti_data/train.txt', 'w+') as f:
f.write(train_file_contents)
with open('kitti_data/valid.txt', 'w+') as f:
f.write(valid_file_contents)
def strip_zeroes_and_convert_to_jpg(data_dir):
'''convert images to jpg, strip leading zeroes and write train.txt file'''
data_dir = os.path.expanduser(data_dir)
image_paths = glob.glob(os.path.join(data_dir, 'training', 'image_2', '*.png'))
label_paths = glob.glob(os.path.join(data_dir, 'training', 'label_2', '*.txt'))
for path in tqdm(image_paths):
print(path)
stripped_path = strip_leading_zeroes(path)
convert_to_jpg_and_save(stripped_path)
for path in label_paths:
strip_leading_zeroes(path)
def xml_to_dict(path):
with tf.gfile.GFile(path, 'r') as fid:
xml_str = fid.read()
xml = etree.fromstring(xml_str)
return dataset_util.recursive_parse_xml_to_dict(xml)['annotation']
def create_records(data_dir, to_path='data/train.tfrecord'):
annotations_dir, examples_path = get_fun_paths(data_dir)
writer = tf.python_io.TFRecordWriter(to_path)
labels = {}
examples_list = dataset_util.read_examples_list(examples_path)
assert len(examples_list) > 0, examples_path
for i, example in enumerate(examples_list):
path = os.path.join(annotations_dir, example + '.xml')
data = xml_to_dict(path)
assert 'object' in data, data['filename']
labels[i] = [k['name'] for k in data['object']]
try:
tf_example = dict_to_tf_example(data, data_dir, label_map_dict)
except Exception as e: #TODO(SS): remove me
print(e)
import pdb; pdb.set_trace()
writer.write(tf_example.SerializeToString())
writer.close()
return labels # to inspect a bit
def glob_base(pat):
return list(map(os.path.basename, glob.glob(pat)))
def assert_non_overlap_and_len():
valid_ids = glob_base(VOC_VALID_DIR + '/VOC2012/JPEGImages/*.jpg')
train_ids = glob_base(VOC_TRAIN_DIR+ '/VOC2012/JPEGImages/*.jpg')
assert len(pd.Index(valid_ids).intersection(train_ids)) == 0
@click.command()
@click.option('--to-path', default=TRAIN_RECORD_PATH)
@click.option('--data-dir', default=DEFAULT_DATA_DIR)
def do_kitti_ingest(to_path, data_dir):
# strip_zeroes_and_convert_to_jpg(data_dir)
assert os.path.exists('vod_converter'), 'Must git clone vod-converter'
split_validation_images(data_dir)
assert not os.path.exists(VOC_TRAIN_DIR)
assert not os.path.exists(VOC_VALID_DIR)
kitti_to_voc(os.path.join(data_dir, 'training'),
VOC_TRAIN_DIR, os.path.join(data_dir, 'train.txt'))
kitti_to_voc(os.path.join(data_dir, 'valid'),
VOC_VALID_DIR, os.path.join(data_dir, 'valid.txt'))
create_records(VOC_TRAIN_DIR, to_path=to_path)
create_records(VOC_VALID_DIR, to_path=VALID_RECORD_PATH)
assert_non_overlap_and_len()
print('succesfully wrote {} and {}'.format(to_path, VALID_RECORD_PATH))
if __name__ == '__main__':
do_kitti_ingest()
| [
"os.remove",
"numpy.floor",
"click.option",
"object_detection.utils.label_map_util.get_label_map_dict",
"glob.glob",
"os.path.join",
"os.path.dirname",
"os.path.exists",
"click.command",
"numpy.random.choice",
"tqdm.tqdm",
"os.path.basename",
"pandas.Index",
"tensorflow.gfile.GFile",
"ob... | [((509, 566), 'object_detection.utils.label_map_util.get_label_map_dict', 'label_map_util.get_label_map_dict', (['"""data/kitti_map.pbtxt"""'], {}), "('data/kitti_map.pbtxt')\n", (542, 566), False, 'from object_detection.utils import label_map_util\n'), ((5585, 5600), 'click.command', 'click.command', ([], {}), '()\n', (5598, 5600), False, 'import click\n'), ((5602, 5654), 'click.option', 'click.option', (['"""--to-path"""'], {'default': 'TRAIN_RECORD_PATH'}), "('--to-path', default=TRAIN_RECORD_PATH)\n", (5614, 5654), False, 'import click\n'), ((5656, 5708), 'click.option', 'click.option', (['"""--data-dir"""'], {'default': 'DEFAULT_DATA_DIR'}), "('--data-dir', default=DEFAULT_DATA_DIR)\n", (5668, 5708), False, 'import click\n'), ((1402, 1422), 'PIL.Image.open', 'Image.open', (['png_path'], {}), '(png_path)\n', (1412, 1422), False, 'from PIL import Image\n'), ((1530, 1549), 'os.remove', 'os.remove', (['png_path'], {}), '(png_path)\n', (1539, 1549), False, 'import os\n'), ((2486, 2528), 'os.path.join', 'os.path.join', (['data_dir', '"""valid"""', '"""label_2"""'], {}), "(data_dir, 'valid', 'label_2')\n", (2498, 2528), False, 'import os\n'), ((2551, 2593), 'os.path.join', 'os.path.join', (['data_dir', '"""valid"""', '"""image_2"""'], {}), "(data_dir, 'valid', 'image_2')\n", (2563, 2593), False, 'import os\n'), ((2711, 2766), 'numpy.random.choice', 'np.random.choice', (['label_paths', 'num_train'], {'replace': '(False)'}), '(label_paths, num_train, replace=False)\n', (2727, 2766), True, 'import numpy as np\n'), ((3778, 3806), 'os.path.expanduser', 'os.path.expanduser', (['data_dir'], {}), '(data_dir)\n', (3796, 3806), False, 'import os\n'), ((3991, 4008), 'tqdm.tqdm', 'tqdm', (['image_paths'], {}), '(image_paths)\n', (3995, 4008), False, 'from tqdm import tqdm\n'), ((4299, 4324), 'lxml.etree.fromstring', 'etree.fromstring', (['xml_str'], {}), '(xml_str)\n', (4315, 4324), False, 'from lxml import etree\n'), ((4533, 4569), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['to_path'], {}), '(to_path)\n', (4560, 4569), True, 'import tensorflow as tf\n'), ((4606, 4652), 'object_detection.utils.dataset_util.read_examples_list', 'dataset_util.read_examples_list', (['examples_path'], {}), '(examples_path)\n', (4637, 4652), False, 'from object_detection.utils import dataset_util\n'), ((5808, 5839), 'os.path.exists', 'os.path.exists', (['"""vod_converter"""'], {}), "('vod_converter')\n", (5822, 5839), False, 'import os\n'), ((1190, 1211), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (1205, 1211), False, 'import os\n'), ((1238, 1262), 'os.path.exists', 'os.path.exists', (['new_path'], {}), '(new_path)\n', (1252, 1262), False, 'import os\n'), ((1272, 1299), 'shutil.move', 'shutil.move', (['path', 'new_path'], {}), '(path, new_path)\n', (1283, 1299), False, 'import shutil\n'), ((1600, 1622), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (1616, 1622), False, 'import os\n'), ((1721, 1741), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1735, 1741), False, 'import os\n'), ((1751, 1768), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (1762, 1768), False, 'import os\n'), ((2182, 2236), 'os.path.join', 'os.path.join', (['data_dir', '"""training"""', '"""label_2"""', '"""*.txt"""'], {}), "(data_dir, 'training', 'label_2', '*.txt')\n", (2194, 2236), False, 'import os\n'), ((2427, 2461), 'numpy.floor', 'np.floor', (['(num_consider * pct_train)'], {}), '(num_consider * pct_train)\n', (2435, 2461), True, 'import numpy as np\n'), ((3325, 3367), 'os.path.join', 'os.path.join', (['data_dir', '"""valid"""', '"""label_2"""'], {}), "(data_dir, 'valid', 'label_2')\n", (3337, 3367), False, 'import os\n'), ((3835, 3889), 'os.path.join', 'os.path.join', (['data_dir', '"""training"""', '"""image_2"""', '"""*.png"""'], {}), "(data_dir, 'training', 'image_2', '*.png')\n", (3847, 3889), False, 'import os\n'), ((3919, 3973), 'os.path.join', 'os.path.join', (['data_dir', '"""training"""', '"""label_2"""', '"""*.txt"""'], {}), "(data_dir, 'training', 'label_2', '*.txt')\n", (3931, 3973), False, 'import os\n'), ((4226, 4251), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['path', '"""r"""'], {}), "(path, 'r')\n", (4240, 4251), True, 'import tensorflow as tf\n'), ((4336, 4381), 'object_detection.utils.dataset_util.recursive_parse_xml_to_dict', 'dataset_util.recursive_parse_xml_to_dict', (['xml'], {}), '(xml)\n', (4376, 4381), False, 'from object_detection.utils import dataset_util\n'), ((4765, 4812), 'os.path.join', 'os.path.join', (['annotations_dir', "(example + '.xml')"], {}), "(annotations_dir, example + '.xml')\n", (4777, 4812), False, 'import os\n'), ((5925, 5954), 'os.path.exists', 'os.path.exists', (['VOC_TRAIN_DIR'], {}), '(VOC_TRAIN_DIR)\n', (5939, 5954), False, 'import os\n'), ((5970, 5999), 'os.path.exists', 'os.path.exists', (['VOC_VALID_DIR'], {}), '(VOC_VALID_DIR)\n', (5984, 5999), False, 'import os\n'), ((6017, 6051), 'os.path.join', 'os.path.join', (['data_dir', '"""training"""'], {}), "(data_dir, 'training')\n", (6029, 6051), False, 'import os\n'), ((6085, 6120), 'os.path.join', 'os.path.join', (['data_dir', '"""train.txt"""'], {}), "(data_dir, 'train.txt')\n", (6097, 6120), False, 'import os\n'), ((6139, 6170), 'os.path.join', 'os.path.join', (['data_dir', '"""valid"""'], {}), "(data_dir, 'valid')\n", (6151, 6170), False, 'import os\n'), ((6204, 6239), 'os.path.join', 'os.path.join', (['data_dir', '"""valid.txt"""'], {}), "(data_dir, 'valid.txt')\n", (6216, 6239), False, 'import os\n'), ((2937, 2963), 'os.path.exists', 'os.path.exists', (['image_path'], {}), '(image_path)\n', (2951, 2963), False, 'import os\n'), ((3168, 3208), 'shutil.move', 'shutil.move', (['label_path', 'valid_label_dir'], {}), '(label_path, valid_label_dir)\n', (3179, 3208), False, 'import shutil\n'), ((3221, 3261), 'shutil.move', 'shutil.move', (['image_path', 'valid_image_dir'], {}), '(image_path, valid_image_dir)\n', (3232, 3261), False, 'import shutil\n'), ((4990, 5040), 'object_detection.create_pascal_tf_record.dict_to_tf_example', 'dict_to_tf_example', (['data', 'data_dir', 'label_map_dict'], {}), '(data, data_dir, label_map_dict)\n', (5008, 5040), False, 'from object_detection.create_pascal_tf_record import dict_to_tf_example\n'), ((5323, 5337), 'glob.glob', 'glob.glob', (['pat'], {}), '(pat)\n', (5332, 5337), False, 'import glob\n'), ((1127, 1149), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (1143, 1149), False, 'import os\n'), ((5138, 5153), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (5151, 5153), False, 'import pdb\n'), ((5532, 5551), 'pandas.Index', 'pd.Index', (['valid_ids'], {}), '(valid_ids)\n', (5540, 5551), True, 'import pandas as pd\n')] |
# -*- mode: python; coding: utf-8 -*-
# Copyright 2020 the .NET Foundation
# Licensed under the MIT License.
from __future__ import absolute_import, division, print_function
import numpy.testing as nt
import pytest
from xml.etree import ElementTree as etree
from . import assert_xml_trees_equal
from .. import imageset, enums, stringify_xml_doc, write_xml_doc
def test_basic_xml():
expected_str = '''
<ImageSet MSRCommunityId="0" MSRComponentId="0" Permission="0"
BandPass="Gamma" BaseDegreesPerTile="0.1" BaseTileLevel="1"
BottomsUp="True" CenterX="1.234" CenterY="-0.31415"
DataSetType="Planet" ElevationModel="False" FileType=".PNG" Generic="False"
MeanRadius="0.0" Name="<NAME>"
OffsetX="100.1" OffsetY="100.2" Projection="SkyImage"
Rotation="5.4321" Sparse="False" StockSet="False" TileLevels="4"
Url="http://example.org/{0}" WidthFactor="2">
<Credits>Escaping & Entities</Credits>
<CreditsUrl>https://example.org/credits</CreditsUrl>
<Description>Escaping <entities></Description>
<ThumbnailUrl>https://example.org/thumbnail.jpg</ThumbnailUrl>
</ImageSet>
'''
expected_xml = etree.fromstring(expected_str)
imgset = imageset.ImageSet()
imgset.data_set_type = enums.DataSetType.PLANET
imgset.name = '<NAME>'
imgset.url = 'http://example.org/{0}'
imgset.width_factor = 2
imgset.base_tile_level = 1
imgset.tile_levels = 4
imgset.base_degrees_per_tile = 0.1
imgset.file_type = '.PNG'
imgset.bottoms_up = True
imgset.projection = enums.ProjectionType.SKY_IMAGE
imgset.center_x = 1.234
imgset.center_y = -0.31415
imgset.offset_x = 100.1
imgset.offset_y = 100.2
imgset.rotation_deg = 5.4321
imgset.band_pass = enums.Bandpass.GAMMA
imgset.sparse = False
imgset.credits = 'Escaping & Entities'
imgset.credits_url = 'https://example.org/credits'
imgset.thumbnail_url = 'https://example.org/thumbnail.jpg'
imgset.description = 'Escaping <entities>'
observed_xml = imgset.to_xml()
assert_xml_trees_equal(expected_xml, observed_xml)
def test_wcs_1():
expected_str = '''
<ImageSet MSRCommunityId="0" MSRComponentId="0" Permission="0"
BandPass="Visible" BaseDegreesPerTile="4.870732233333334e-05"
BaseTileLevel="0" BottomsUp="False" CenterX="83.633083" CenterY="22.0145"
DataSetType="Sky" ElevationModel="False" FileType=".png" Generic="False"
MeanRadius="0.0" OffsetX="1502.8507831457316" OffsetY="1478.8005935660037"
Projection="SkyImage" Rotation="-0.29036478519000003" Sparse="True"
StockSet="False" TileLevels="0" WidthFactor="2">
</ImageSet>
'''
expected_xml = etree.fromstring(expected_str)
wcs_keywords = {
'CTYPE1': 'RA---TAN',
'CTYPE2': 'DEC--TAN',
'CRVAL1': 83.633083,
'CRVAL2': 22.0145,
'PC1_1': 0.9999871586199364,
'PC1_2': 0.005067799840785529,
'PC2_1': -0.005067799840785529,
'PC2_2': 0.9999871586199364,
'CRPIX1': 1503.8507831457316,
'CRPIX2': 1479.8005935660037,
'CDELT1': -4.870732233333334e-05,
'CDELT2': 4.870732233333334e-05,
}
imgset = imageset.ImageSet()
imgset.set_position_from_wcs(wcs_keywords, 3000, 3000)
observed_xml = imgset.to_xml()
assert_xml_trees_equal(expected_xml, observed_xml)
wcs_roundtrip = imgset.wcs_headers_from_position()
for kw in wcs_roundtrip.keys():
expected = wcs_keywords[kw]
observed = wcs_roundtrip[kw]
if kw in ('CTYPE1', 'CTYPE2'):
assert expected == observed
else:
nt.assert_almost_equal(expected, observed)
def test_misc_ser():
expected_str = '''
<ImageSet BandPass="Visible" BaseDegreesPerTile="0.0" BaseTileLevel="0"
BottomsUp="False" CenterX="0.0" CenterY="0.0" DataSetType="Sky" ElevationModel="False"
FileType=".png" Generic="False" MeanRadius="0.0" MSRCommunityId="0" MSRComponentId="0"
OffsetX="0.0" OffsetY="0.0" Permission="0" Projection="SkyImage"
Rotation="0.0" Sparse="True" StockSet="False" TileLevels="0"
Url="http://example.com/unspecified" WidthFactor="2" />
'''
expected_xml = etree.fromstring(expected_str)
imgset = imageset.ImageSet()
imgset.url = 'http://example.com/unspecified'
observed_xml = imgset.to_xml()
assert_xml_trees_equal(expected_xml, observed_xml)
expected_text = stringify_xml_doc(expected_xml)
observed_text = imgset.to_xml_string()
assert observed_text == expected_text
from io import StringIO, BytesIO
expected_strio = StringIO()
observed_strio = StringIO()
write_xml_doc(expected_xml, dest_stream=expected_strio, indent=False)
imgset.write_xml(observed_strio, indent=False)
assert observed_strio.getvalue() == expected_strio.getvalue()
expected_bio = BytesIO()
observed_bio = BytesIO()
write_xml_doc(expected_xml, dest_stream=expected_bio, dest_wants_bytes=True)
imgset.write_xml(observed_bio, dest_wants_bytes=True)
assert observed_bio.getvalue() == expected_bio.getvalue()
| [
"io.BytesIO",
"io.StringIO",
"numpy.testing.assert_almost_equal",
"xml.etree.ElementTree.fromstring"
] | [((1182, 1212), 'xml.etree.ElementTree.fromstring', 'etree.fromstring', (['expected_str'], {}), '(expected_str)\n', (1198, 1212), True, 'from xml.etree import ElementTree as etree\n'), ((2726, 2756), 'xml.etree.ElementTree.fromstring', 'etree.fromstring', (['expected_str'], {}), '(expected_str)\n', (2742, 2756), True, 'from xml.etree import ElementTree as etree\n'), ((4259, 4289), 'xml.etree.ElementTree.fromstring', 'etree.fromstring', (['expected_str'], {}), '(expected_str)\n', (4275, 4289), True, 'from xml.etree import ElementTree as etree\n'), ((4663, 4673), 'io.StringIO', 'StringIO', ([], {}), '()\n', (4671, 4673), False, 'from io import StringIO, BytesIO\n'), ((4695, 4705), 'io.StringIO', 'StringIO', ([], {}), '()\n', (4703, 4705), False, 'from io import StringIO, BytesIO\n'), ((4917, 4926), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (4924, 4926), False, 'from io import StringIO, BytesIO\n'), ((4946, 4955), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (4953, 4955), False, 'from io import StringIO, BytesIO\n'), ((3669, 3711), 'numpy.testing.assert_almost_equal', 'nt.assert_almost_equal', (['expected', 'observed'], {}), '(expected, observed)\n', (3691, 3711), True, 'import numpy.testing as nt\n')] |
# This file contains helper functions that are related to mnist sequence classification, such as loading data
import numpy as np
import tensorflow as tf
supported_data_sets = ["SequentialMNIST", "P-MNIST"]
def binary_mask(numbers, mask_size=10):
"""
This function binary masks the passed list of numbers.
Input:
numbers: list, a list of numbers, for example, [1, 2, 3];
mask_size: int, total numbers possible in this mask, for example, for MIDI numbers – 10.
Output:
masks: list, a list of masks (numbers masked in a binary mask).
Example:
Input:
numbers = [1, 2, 3]
mask_size = 4
Output:
masks = [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]
"""
masks = []
# Go through each of the numbers and put a one in number'th place in the mask array
for number in numbers:
# Create an array of length [mask_size], filled with zeros
mask = np.zeros(mask_size)
mask[number] = 1
masks.append(mask)
return masks
# Loads the data set with the passed name
def load_data(name):
"""
This function loads and returns the requested data set.
Input:
name: string, the name of the data set.
Output:
x_train: list, input sequences of words (represented by integers) for training;
y_train: list, output sequences of one-hot encoded labels for training;
x_test: list, input sequences of words (represented by integers) for testing;
y_test: list, output sequences of one-hot encoded labels for testing;
maxlen: int, the maximum length of sequences.
"""
print("Started loading data...")
# Check if data set is supported
if name not in supported_data_sets:
raise Exception("This code doesn't support the following data set!")
seed = 0
tf.compat.v1.set_random_seed(seed)
np.random.seed(seed)
rng = np.random.RandomState(seed)
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = np.reshape(x_train, (-1, 784, 1))
x_test = np.reshape(x_test, (-1, 784, 1))
y_train = binary_mask(y_train)
y_test = binary_mask(y_test)
x_train = x_train / 255
x_test = x_test / 255
if name == "P-MNIST":
perm = rng.permutation(x_train.shape[1])
x_train = x_train[:, perm]
x_test = x_test[:, perm]
# Reserve 10,000 samples for validation.
x_valid = x_train[-10000:]
y_valid = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]
return x_train, y_train, x_valid, y_valid, x_test, y_test
if __name__ == '__main__': # Main function
# To load a specific data set use:
X_TRAIN, Y_TRAIN, X_VALID, Y_VALID, X_TEST, Y_TEST = load_data("SequentialMNIST")
| [
"numpy.random.seed",
"numpy.zeros",
"numpy.random.RandomState",
"tensorflow.keras.datasets.mnist.load_data",
"tensorflow.compat.v1.set_random_seed",
"numpy.reshape"
] | [((1952, 1986), 'tensorflow.compat.v1.set_random_seed', 'tf.compat.v1.set_random_seed', (['seed'], {}), '(seed)\n', (1980, 1986), True, 'import tensorflow as tf\n'), ((1991, 2011), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2005, 2011), True, 'import numpy as np\n'), ((2022, 2049), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (2043, 2049), True, 'import numpy as np\n'), ((2094, 2129), 'tensorflow.keras.datasets.mnist.load_data', 'tf.keras.datasets.mnist.load_data', ([], {}), '()\n', (2127, 2129), True, 'import tensorflow as tf\n'), ((2145, 2178), 'numpy.reshape', 'np.reshape', (['x_train', '(-1, 784, 1)'], {}), '(x_train, (-1, 784, 1))\n', (2155, 2178), True, 'import numpy as np\n'), ((2192, 2224), 'numpy.reshape', 'np.reshape', (['x_test', '(-1, 784, 1)'], {}), '(x_test, (-1, 784, 1))\n', (2202, 2224), True, 'import numpy as np\n'), ((1013, 1032), 'numpy.zeros', 'np.zeros', (['mask_size'], {}), '(mask_size)\n', (1021, 1032), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import time
import PyDAQmx
from PyDAQmx import Task
import numpy as np
#value = 1.3
#
#task = Task()
#task.CreateAOVoltageChan("/Dev1/ao0","",-10.0,10.0,PyDAQmx.DAQmx_Val_Volts,None)
#task.StartTask()
#task.WriteAnalogScalarF64(1,10.0,value,None)
#time.sleep(5)
#task.StopTask()
#
#
class Stim():
def __init__(self):
self.pulse = np.zeros(1, dtype=np.uint8)
self.task = Task()
def connect(self):
self.task.CreateDOChan("/Dev1/port0/line3","",PyDAQmx.DAQmx_Val_ChanForAllLines)
self.task.StartTask()
def disconnect(self):
self.task.StopTask()
def stim_on(self):
self.pulse[0]=1
self.task.WriteDigitalLines(1, 1, 5.0, PyDAQmx.DAQmx_Val_GroupByChannel, self.pulse, None, None)
def stim_off(self):
self.pulse[0]=0
self.task.WriteDigitalLines(1, 1, 5.0, PyDAQmx.DAQmx_Val_GroupByChannel, self.pulse , None, None)
# def freq(self):
# ctr_ini_delay = 0 # sec
# ctr_period = 0.1 # sec
# ctr_duty_cycle = 0.01
# self.task.CreateCOPulseChanFreq("/Dev1/port0/line3", "", PyDAQmx.DAQmx_Val_Hz, PyDAQmx.DAQmx_Val_Low, ctr_ini_delay, 1/float(ctr_period), ctr_duty_cycle)
# self.task.CfgImplicitTiming(PyDAQmx.DAQmx_Val_ContSamps, 1000)
if __name__ == "__main__":
laser = Stim()
laser.connect()
# laser.freq()
laser.stim_on()
time.sleep(0.5)
laser.stim_off()
laser.disconnect()
#
#
#ctr_ini_delay = 0 # sec
#ctr_period = 0.1 # sec
#ctr_duty_cycle = 0.01
#task = PyDAQmx.Task()
#task.CreateCOPulseChanFreq("Dev1/ao0", "", PyDAQmx.DAQmx_Val_Hz, PyDAQmx.DAQmx_Val_Low, ctr_ini_delay, 1/float(ctr_period), ctr_duty_cycle)
#task.CfgImplicitTiming(PyDAQmx.DAQmx_Val_ContSamps, 1000)
#task.StartTask()
#sleep(5)
#task.StopTask()
#task.ClearTask()
| [
"numpy.zeros",
"PyDAQmx.Task",
"time.sleep"
] | [((1299, 1314), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1309, 1314), False, 'import time\n'), ((365, 392), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'np.uint8'}), '(1, dtype=np.uint8)\n', (373, 392), True, 'import numpy as np\n'), ((407, 413), 'PyDAQmx.Task', 'Task', ([], {}), '()\n', (411, 413), False, 'from PyDAQmx import Task\n')] |
import numpy as np
from pybullet_planning.utils import PI, MAX_DISTANCE
from pybullet_planning.motion_planners import birrt
from pybullet_planning.interfaces.robots.collision import get_collision_fn
from pybullet_planning.interfaces.planner_interface.joint_motion_planning import get_distance_fn, get_extend_fn, get_sample_fn, check_initial_end
#####################################
def get_closest_angle_fn(body, joints, linear_weight=1., angular_weight=1., reversible=True):
from pybullet_planning.interfaces.env_manager.pose_transformation import get_angle
assert len(joints) == 3
linear_extend_fn = get_distance_fn(body, joints[:2], weights=linear_weight*np.ones(2))
angular_extend_fn = get_distance_fn(body, joints[2:], weights=[angular_weight])
def closest_fn(q1, q2):
angle_and_distance = []
for direction in [0, PI] if reversible else [PI]:
angle = get_angle(q1[:2], q2[:2]) + direction
distance = angular_extend_fn(q1[2:], [angle]) \
+ linear_extend_fn(q1[:2], q2[:2]) \
+ angular_extend_fn([angle], q2[2:])
angle_and_distance.append((angle, distance))
return min(angle_and_distance, key=lambda pair: pair[1])
return closest_fn
def get_nonholonomic_distance_fn(body, joints, weights=None, **kwargs):
assert weights is None
closest_angle_fn = get_closest_angle_fn(body, joints, **kwargs)
def distance_fn(q1, q2):
_, distance = closest_angle_fn(q1, q2)
return distance
return distance_fn
def get_nonholonomic_extend_fn(body, joints, resolutions=None, **kwargs):
assert resolutions is None
assert len(joints) == 3
linear_extend_fn = get_extend_fn(body, joints[:2])
angular_extend_fn = get_extend_fn(body, joints[2:])
closest_angle_fn = get_closest_angle_fn(body, joints, **kwargs)
def extend_fn(q1, q2):
angle, _ = closest_angle_fn(q1, q2)
path = []
for aq in angular_extend_fn(q1[2:], [angle]):
path.append(np.append(q1[:2], aq))
for lq in linear_extend_fn(q1[:2], q2[:2]):
path.append(np.append(lq, [angle]))
for aq in angular_extend_fn([angle], q2[2:]):
path.append(np.append(q2[:2], aq))
return path
return extend_fn
def plan_nonholonomic_motion(body, joints, end_conf, obstacles=[], attachments=[],
self_collisions=True, disabled_collisions=set(),
weights=None, resolutions=None, reversible=True,
max_distance=MAX_DISTANCE, custom_limits={}, **kwargs):
from pybullet_planning.interfaces.robots.joint import get_joint_positions
assert len(joints) == len(end_conf)
sample_fn = get_sample_fn(body, joints, custom_limits=custom_limits)
distance_fn = get_nonholonomic_distance_fn(body, joints, weights=weights, reversible=reversible)
extend_fn = get_nonholonomic_extend_fn(body, joints, resolutions=resolutions, reversible=reversible)
collision_fn = get_collision_fn(body, joints, obstacles, attachments,
self_collisions, disabled_collisions,
custom_limits=custom_limits, max_distance=max_distance)
start_conf = get_joint_positions(body, joints)
if not check_initial_end(start_conf, end_conf, collision_fn):
return None
return birrt(start_conf, end_conf, distance_fn, sample_fn, extend_fn, collision_fn, **kwargs)
| [
"pybullet_planning.interfaces.planner_interface.joint_motion_planning.check_initial_end",
"pybullet_planning.interfaces.robots.collision.get_collision_fn",
"numpy.ones",
"pybullet_planning.interfaces.planner_interface.joint_motion_planning.get_sample_fn",
"numpy.append",
"pybullet_planning.interfaces.plan... | [((712, 771), 'pybullet_planning.interfaces.planner_interface.joint_motion_planning.get_distance_fn', 'get_distance_fn', (['body', 'joints[2:]'], {'weights': '[angular_weight]'}), '(body, joints[2:], weights=[angular_weight])\n', (727, 771), False, 'from pybullet_planning.interfaces.planner_interface.joint_motion_planning import get_distance_fn, get_extend_fn, get_sample_fn, check_initial_end\n'), ((1722, 1753), 'pybullet_planning.interfaces.planner_interface.joint_motion_planning.get_extend_fn', 'get_extend_fn', (['body', 'joints[:2]'], {}), '(body, joints[:2])\n', (1735, 1753), False, 'from pybullet_planning.interfaces.planner_interface.joint_motion_planning import get_distance_fn, get_extend_fn, get_sample_fn, check_initial_end\n'), ((1778, 1809), 'pybullet_planning.interfaces.planner_interface.joint_motion_planning.get_extend_fn', 'get_extend_fn', (['body', 'joints[2:]'], {}), '(body, joints[2:])\n', (1791, 1809), False, 'from pybullet_planning.interfaces.planner_interface.joint_motion_planning import get_distance_fn, get_extend_fn, get_sample_fn, check_initial_end\n'), ((2771, 2827), 'pybullet_planning.interfaces.planner_interface.joint_motion_planning.get_sample_fn', 'get_sample_fn', (['body', 'joints'], {'custom_limits': 'custom_limits'}), '(body, joints, custom_limits=custom_limits)\n', (2784, 2827), False, 'from pybullet_planning.interfaces.planner_interface.joint_motion_planning import get_distance_fn, get_extend_fn, get_sample_fn, check_initial_end\n'), ((3053, 3210), 'pybullet_planning.interfaces.robots.collision.get_collision_fn', 'get_collision_fn', (['body', 'joints', 'obstacles', 'attachments', 'self_collisions', 'disabled_collisions'], {'custom_limits': 'custom_limits', 'max_distance': 'max_distance'}), '(body, joints, obstacles, attachments, self_collisions,\n disabled_collisions, custom_limits=custom_limits, max_distance=max_distance\n )\n', (3069, 3210), False, 'from pybullet_planning.interfaces.robots.collision import get_collision_fn\n'), ((3292, 3325), 'pybullet_planning.interfaces.robots.joint.get_joint_positions', 'get_joint_positions', (['body', 'joints'], {}), '(body, joints)\n', (3311, 3325), False, 'from pybullet_planning.interfaces.robots.joint import get_joint_positions\n'), ((3423, 3513), 'pybullet_planning.motion_planners.birrt', 'birrt', (['start_conf', 'end_conf', 'distance_fn', 'sample_fn', 'extend_fn', 'collision_fn'], {}), '(start_conf, end_conf, distance_fn, sample_fn, extend_fn, collision_fn,\n **kwargs)\n', (3428, 3513), False, 'from pybullet_planning.motion_planners import birrt\n'), ((3337, 3390), 'pybullet_planning.interfaces.planner_interface.joint_motion_planning.check_initial_end', 'check_initial_end', (['start_conf', 'end_conf', 'collision_fn'], {}), '(start_conf, end_conf, collision_fn)\n', (3354, 3390), False, 'from pybullet_planning.interfaces.planner_interface.joint_motion_planning import get_distance_fn, get_extend_fn, get_sample_fn, check_initial_end\n'), ((676, 686), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (683, 686), True, 'import numpy as np\n'), ((911, 936), 'pybullet_planning.interfaces.env_manager.pose_transformation.get_angle', 'get_angle', (['q1[:2]', 'q2[:2]'], {}), '(q1[:2], q2[:2])\n', (920, 936), False, 'from pybullet_planning.interfaces.env_manager.pose_transformation import get_angle\n'), ((2046, 2067), 'numpy.append', 'np.append', (['q1[:2]', 'aq'], {}), '(q1[:2], aq)\n', (2055, 2067), True, 'import numpy as np\n'), ((2145, 2167), 'numpy.append', 'np.append', (['lq', '[angle]'], {}), '(lq, [angle])\n', (2154, 2167), True, 'import numpy as np\n'), ((2247, 2268), 'numpy.append', 'np.append', (['q2[:2]', 'aq'], {}), '(q2[:2], aq)\n', (2256, 2268), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""plotutil.py: module is dedicated to plottting."""
__author__ = "<NAME>."
__copyright__ = "Copyright 2020, SuperDARN@VT"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0."
__maintainer__ = "<NAME>."
__email__ = "<EMAIL>"
__status__ = "Research"
import matplotlib
matplotlib.use("Agg")
import datetime as dt
from matplotlib.collections import LineCollection
from mpl_toolkits.axes_grid1 import SubplotDivider, Size
from mpl_toolkits.axes_grid1.mpl_axes import Axes
import matplotlib.pyplot as plt
from pylab import gca, gcf
import numpy as np
from matplotlib.transforms import Affine2D, Transform
import mpl_toolkits.axisartist.floating_axes as floating_axes
from matplotlib.projections import polar
from mpl_toolkits.axisartist.grid_finder import FixedLocator, DictFormatter
from types import MethodType
import glob
import pandas as pd
from dateutil import tz
from scipy.io import loadmat
import copy
from scipy.stats import skewnorm
from PyIF import te_compute as te
from sklearn.feature_selection import mutual_info_regression as MIR
from SALib.sample import saltelli
from SALib.analyze import sobol
from SALib.analyze import rbd_fast
import itertools
from math import pi
from matplotlib.legend_handler import HandlerPatch
class HandlerCircle(HandlerPatch):
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize, trans):
center = 0.5 * width - 0.5 * xdescent, 0.5 * height - 0.5 * ydescent
p = plt.Circle(xy=center, radius=orig_handle.radius)
self.update_prop(p, orig_handle, legend)
p.set_transform(trans)
return [p]
import utils
def textHighlighted(xy, text, ax=None, color="k", fontsize=None, xytext=(0,0),
zorder=None, text_alignment=(0,0), xycoords="data",
textcoords="offset points", **kwargs):
"""
Plot highlighted annotation (with a white lining)
Parameters
----------
xy : position of point to annotate
text : str text to show
ax : Optional[ ]
color : Optional[char]
text color; deafult is "k"
fontsize : Optional [ ] text font size; default is None
xytext : Optional[ ] text position; default is (0, 0)
zorder : text zorder; default is None
text_alignment : Optional[ ]
xycoords : Optional[ ] xy coordinate[1]; default is "data"
textcoords : Optional[ ] text coordinate[2]; default is "offset points"
**kwargs :
Notes
-----
Belongs to class rbspFp.
References
----------
[1] see `matplotlib.pyplot.annotate
<http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.annotate>`)
[2] see `matplotlib.pyplot.annotate
<http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.annotate>`)
"""
if ax is None: ax = gca()
text_path = mp.text.TextPath((0, 0), text, size=fontsize, **kwargs)
p1 = matplotlib.patches.PathPatch(text_path, ec="w", lw=4, fc="w", alpha=0.7,
zorder=zorder, transform=mp.transforms.IdentityTransform())
p2 = matplotlib.patches.PathPatch(text_path, ec="none", fc=color, zorder=zorder,
transform=mp.transforms.IdentityTransform())
offsetbox2 = matplotlib.offsetbox.AuxTransformBox(mp.transforms.IdentityTransform())
offsetbox2.add_artist(p1)
offsetbox2.add_artist(p2)
ab = mp.offsetbox.AnnotationBbox(offsetbox2, xy, xybox=xytext, xycoords=xycoords,
boxcoords=textcoords, box_alignment=text_alignment, frameon=False)
ab.set_zorder(zorder)
ax.add_artist(ab)
return
def addColorbar(mappable, ax):
"""
Append colorbar to axes
Parameters
----------
mappable : a mappable object
ax : an axes object
Returns
-------
cbax : colorbar axes object
Notes
-----
This is mostly useful for axes created with :func:`curvedEarthAxes`.
"""
fig1 = ax.get_figure()
divider = SubplotDivider(fig1, *ax.get_geometry(), aspect=True)
# axes for colorbar
cbax = Axes(fig1, divider.get_position())
h = [Size.AxesX(ax), # main axes
Size.Fixed(0.1), # padding
Size.Fixed(0.2)] # colorbar
v = [Size.AxesY(ax)]
_ = divider.set_horizontal(h)
_ = divider.set_vertical(v)
_ = ax.set_axes_locator(divider.new_locator(nx=0, ny=0))
_ = cbax.set_axes_locator(divider.new_locator(nx=2, ny=0))
_ = fig1.add_axes(cbax)
_ = cbax.axis["left"].toggle(all=False)
_ = cbax.axis["top"].toggle(all=False)
_ = cbax.axis["bottom"].toggle(all=False)
_ = cbax.axis["right"].toggle(ticklabels=True, label=True)
_ = plt.colorbar(mappable, cax=cbax, shrink=0.7)
return cbax
def curvedEarthAxes(rect=111, fig=None, minground=0., maxground=2000, minalt=0,
maxalt=500, Re=6371., nyticks=5, nxticks=4):
"""
Create curved axes in ground-range and altitude
Parameters
----------
rect : Optional[int] subplot spcification
fig : Optional[pylab.figure object] (default to gcf)
minground : Optional[float]
maxground : Optional[int] maximum ground range [km]
minalt : Optional[int] lowest altitude limit [km]
maxalt : Optional[int] highest altitude limit [km]
Re : Optional[float] Earth radius in kilometers
nyticks : Optional[int] Number of y axis tick marks; default is 5
nxticks : Optional[int] Number of x axis tick marks; deafult is 4
Returns
-------
ax : matplotlib.axes object containing formatting
aax : matplotlib.axes objec containing data
"""
ang = maxground / Re
minang = minground / Re
angran = ang - minang
angle_ticks = [(0, "{:.0f}".format(minground))]
while angle_ticks[-1][0] < angran:
tang = angle_ticks[-1][0] + 1./nxticks*angran
angle_ticks.append((tang, "{:.0f}".format((tang-minang)*Re)))
grid_locator1 = FixedLocator([v for v, s in angle_ticks])
tick_formatter1 = DictFormatter(dict(angle_ticks))
altran = float(maxalt - minalt)
alt_ticks = [(minalt+Re, "{:.0f}".format(minalt))]
while alt_ticks[-1][0] < Re+maxalt:
alt_ticks.append((altran / float(nyticks) + alt_ticks[-1][0],
"{:.0f}".format(altran / float(nyticks) +
alt_ticks[-1][0] - Re)))
_ = alt_ticks.pop()
grid_locator2 = FixedLocator([v for v, s in alt_ticks])
tick_formatter2 = DictFormatter(dict(alt_ticks))
tr_rotate = Affine2D().rotate(np.pi/2-ang/2)
tr_shift = Affine2D().translate(0, Re)
tr = polar.PolarTransform() + tr_rotate
grid_helper = floating_axes.GridHelperCurveLinear(tr, extremes=(0, angran, Re+minalt, Re+maxalt),
grid_locator1=grid_locator1, grid_locator2=grid_locator2, tick_formatter1=tick_formatter1,
tick_formatter2=tick_formatter2,)
if not fig: fig = plt.figure(figsize=(10,6))
ax1 = floating_axes.FloatingSubplot(fig, rect, grid_helper=grid_helper)
# adjust axis
ax1.axis["left"].label.set_text(r"Alt. [km]")
ax1.axis["bottom"].label.set_text(r"Ground range [km]")
ax1.invert_xaxis()
ax1.minground = minground
ax1.maxground = maxground
ax1.minalt = minalt
ax1.maxalt = maxalt
ax1.Re = Re
fig.add_subplot(ax1, transform=tr)
# create a parasite axes whose transData in RA, cz
aux_ax = ax1.get_aux_axes(tr)
# for aux_ax to have a clip path as in ax
aux_ax.patch = ax1.patch
# but this has a side effect that the patch is drawn twice, and possibly
# over some other artists. So, we decrease the zorder a bit to prevent this.
ax1.patch.zorder=0.9
return ax1, aux_ax
def plot_edens(time, beam=None, maxground=2000, maxalt=500,
nel_cmap="jet", nel_lim=[10, 12], title=False,
fig=None, rect=111, ax=None, aax=None,plot_colorbar=True,
nel_rasterize=False):
"""
Plot electron density profile
Parameters
----------
time : datetime.datetime time of profile
beam : Optional[ ] beam number
maxground : Optional[int]
maximum ground range [km]
maxalt : Optional[int] highest altitude limit [km]
nel_cmap : Optional[str] color map name for electron density index coloring
nel_lim : Optional[list, int] electron density index plotting limits
title : Optional[bool] Show default title
fig : Optional[pylab.figure] object (default to gcf)
rect : Optional[int] subplot spcification
ax : Optional[ ] Existing main axes
aax : Optional[ ] Existing auxialary axes
plot_colorbar : Optional[bool] Plot a colorbar
nel_rasterize : Optional[bool] Rasterize the electron density plot
Returns
-------
ax : matplotlib.axes object containing formatting
aax : matplotlib.axes object containing data
cbax : matplotlib.axes object containing colorbar
"""
return
def get_polar(d, Re=6371.):
""" Convert to polar coordinates """
th = d.grange / Re
r = d.height + Re
dop, sth, dth = d.dop, d.sth, d.dth
return th, r, dop, sth, dth
def plot_rays(dic, time, ti, beam, case, txt, maxground=2000, maxalt=500, step=1,
showrefract=False, nr_cmap="jet_r", nr_lim=[0.0, .1],
raycolor="0.3", title=True, zorder=2, alpha=1,
fig=None, rect=111, ax=None, aax=None):
"""
Plot ray paths
Parameters
----------
dic: str location of the data files
time: datetime.datetime time of rays
ti: int time index
beam: beam number
maxground : Optional[int] maximum ground range [km]
maxalt : Optional[int] highest altitude limit [km]
step : Optional[int] step between each plotted ray (in number of ray steps)
showrefract : Optional[bool] show refractive index along ray paths (supersedes raycolor)
nr_cmap : Optional[str] color map name for refractive index coloring
nr_lim : Optional[list, float] refractive index plotting limits
raycolor : Optional[float] color of ray paths
title : Optional[bool] Show default title
zorder : Optional[int]
alpha : Optional[int]
fig : Optional[pylab.figure] object (default to gcf)
rect : Optional[int] subplot spcification
ax : Optional[ ] Existing main axes
aax : Optional[ ] Existing auxialary axes
Returns
-------
ax : matplotlib.axes object containing formatting
aax : matplotlib.axes object containing data
cbax : matplotlib.axes object containing colorbar
"""
if not ax and not aax: ax, aax = curvedEarthAxes(fig=fig, rect=rect, maxground=maxground, maxalt=maxalt)
else:
if hasattr(ax, "time"): time = ax.time
if hasattr(ax, "beam"): beam = ax.beam
files = glob.glob(dic + "ti({ti}).bm({bm}).elv(*).{case}.csv".format(ti=ti, bm=beam, case=case))
files.sort()
for f in files:
th, r, v, _, _ = get_polar(pd.read_csv(f))
if not showrefract: aax.plot(th, r, c=raycolor, zorder=zorder, alpha=alpha)
else:
points = np.array([th, r]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lcol = LineCollection( segments, zorder=zorder, alpha=alpha)
_ = lcol.set_cmap( nr_cmap )
_ = lcol.set_norm( plt.Normalize(*nr_lim) )
_ = lcol.set_array( v )
_ = aax.add_collection( lcol )
if title:
stitle = "%s UT"%time.strftime("%Y-%m-%d %H:%M")
ax.set_title( stitle )
ax.text(1.05, 0.5, txt, horizontalalignment="center", verticalalignment="center",
transform=ax.transAxes, rotation=90)
if showrefract:
cbax = addColorbar(lcol, ax)
_ = cbax.set_ylabel(r"$\Delta$ f")
else: cbax = None
ax.beam = beam
fig = ax.get_figure()
fig.savefig(dic + "rt.ti({ti}).bm({bm}).{case}.png".format(ti=ti, bm=beam, case=case), bbox_inches="tight")
plt.close()
return ax, aax, cbax
def plot_exp_rays(dic, time, beam, cat="bgc", maxground=2000, maxalt=300, step=1,
showrefract=False, nr_cmap="jet_r", nr_lim=[0.8, 1.],
raycolor="0.3", title=False, zorder=2, alpha=1,
fig=None, rect=111, ax=None, aax=None):
""" Plot ray paths (previous method) """
if not ax and not aax: ax, aax = curvedEarthAxes(fig=fig, rect=rect, maxground=maxground, maxalt=maxalt)
else:
if hasattr(ax, "time"): time = ax.time
if hasattr(ax, "beam"): beam = ax.beam
files = glob.glob(dic + "exp.{cat}.bm({bm}).elv(*).csv".format(cat=cat, bm=beam))
files.sort()
for f in files:
th, r, v, _, _ = get_polar(pd.read_csv(f))
if not showrefract: aax.plot(th, r, c=raycolor, zorder=zorder, alpha=alpha)
else:
points = np.array([th, r]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lcol = LineCollection( segments, zorder=zorder, alpha=alpha)
_ = lcol.set_cmap( nr_cmap )
_ = lcol.set_norm( plt.Normalize(*nr_lim) )
_ = lcol.set_array( v )
_ = aax.add_collection( lcol )
if title:
stitle = ""
ax.set_title( stitle )
if showrefract:
cbax = addColorbar(lcol, ax)
_ = cbax.set_ylabel(r"$\Delta$ f")
else: cbax = None
ax.beam = beam
fig = ax.get_figure()
fig.savefig(dic + "rt.exp.{cat}.bm({bm}).png".format(cat=cat, bm=beam), bbox_inches="tight")
plt.close()
return ax, aax, cbax
def plot_radstn(p,f,pz,fz,fname,lat,lon,t,zone="America/New_York"):
""" Plot radar vertical dataset """
fig = plt.figure(figsize=(4,4), dpi=120)
ax = fig.add_subplot(111)
ax.set_ylabel("Alt. [km]")
ax.set_xlabel(r"EDens [$cm^{-3}$]")
ax.semilogx(p, pz, "r")
ax.semilogx(f, fz, "r--")
ax.set_ylim(50, 130)
ax.set_xlim(1e2, 1e7)
sza = utils.calculate_sza(t, lat, lon, alt=300)
l = t.replace(tzinfo=tz.gettz("UTC")).astimezone(tz.gettz("America/New_York"))
ax.set_title(r"UT-%s"%(t.strftime("%Y-%m-%d %H:%M")))
ax.text(1.05, 0.5, "Loc:(%.1f,%.1f), $\chi$-%.1f, LT-%s"%(lat, lon, sza, l.strftime("%H:%M")),
horizontalalignment="center", verticalalignment="center", transform=ax.transAxes, rotation=90)
fig.savefig(fname,bbox_inches="tight")
plt.close()
return
def plot_velocity_ts(dn, rad, bmnum):
""" Plot velocity TS data """
fig = plt.figure(figsize=(6,6), dpi=150)
axs = [fig.add_subplot(311), fig.add_subplot(312), fig.add_subplot(313)]
mkeys = ["vn", "vh", "vt"]
fmt = matplotlib.dates.DateFormatter("%H:%M")
fname = "data/sim/{dn}/{rad}/velocity.ts.csv".format(dn=dn.strftime("%Y.%m.%d.%H.%M"), rad=rad)
sdat = pd.read_csv(fname, parse_dates=["dn"])
axs[0].set_title("%s UT, Radar - %s, Beam - %d"%(dn.strftime("%Y.%m.%d.%H.%M"), rad, bmnum))
cols = ["r", "b", "k"]
labs = [r"$V_{d\eta}$", r"$V_{dh}$", r"$V_{t}$"]
I = 0
fname = "data/sim/{dn}/{rad}/sd_data.csv.gz".format(dn=dn.strftime("%Y.%m.%d.%H.%M"), rad=rad)
dat = utils.get_sd_data(fname, 15).dropna()
dat = dat.groupby("time").mean().reset_index()
for ax, mkey, col, lab in zip(axs, mkeys, cols, labs):
ax.set_ylabel("Velocity [m/s]")
ax.set_xlabel("Time [UT]")
ax.xaxis.set_major_formatter(fmt)
yerr = np.array([(mn, mx) for mn, mx in zip(sdat[mkey+"_min"], sdat[mkey+"_max"])]).T
ax.errorbar(sdat.dn, sdat[mkey], yerr=yerr,
mec=col, mfc=col, fmt="r^", ms=1.5, ls="None", ecolor=col,
capsize=1, capthick=.4, elinewidth=0.4,
alpha=0.5, label=lab)
if I == 2:
ax.plot(dat.time, dat.v, color="darkgreen", marker="o",
alpha=0.3, ls="None", markersize=0.5, label=r"$V_{sd}^{los}$")
ax.plot(dat.time, dat.v, color="darkred", marker="o",
alpha=0.3, ls="None", markersize=0.8)
ax.axhline(0, color="gray", ls="--", lw=0.6)
ax.legend(loc=1)
ax.set_ylim(10*int((np.min(sdat[mkey]+sdat[mkey+"_min"])/10)-1),
10*int((np.max(sdat[mkey]+sdat[mkey+"_max"])/10)+1))
ax.set_xlim(sdat.dn.tolist()[0], sdat.dn.tolist()[-1])
I += 1
fname = "data/sim/{dn}/{rad}/velocity.ts.png".format(dn=dn.strftime("%Y.%m.%d.%H.%M"), rad=rad)
fig.savefig(fname,bbox_inches="tight")
return
def plot_radstn_base(b,p,f,ht,fname,lat,lon,t,zone="America/New_York"):
""" Plot radar vertical dataset """
fig = plt.figure(figsize=(4,4), dpi=120)
ax = fig.add_subplot(111)
ax.set_ylabel("Alt. [km]")
ax.set_xlabel(r"EDens [$cm^{-3}$]")
ax.semilogx(b, ht, "k", label="Background")
ax.semilogx(p, ht, "r", label=r"$UT_{-1}$")
ax.semilogx(f, ht, "r--", label="UT")
ax.legend(loc=4)
ax.set_ylim(50, 130)
ax.set_xlim(1e2, 1e7)
sza = utils.calculate_sza(t, lat, lon, alt=300)
l = t.replace(tzinfo=tz.gettz("UTC")).astimezone(tz.gettz("America/New_York"))
ax.set_title(r"UT-%s"%(t.strftime("%Y-%m-%d %H:%M")))
ax.text(1.05, 0.5, "Loc:(%.1f,%.1f), $\chi$-%.1f, LT-%s"%(lat, lon, sza, l.strftime("%H:%M")),
horizontalalignment="center", verticalalignment="center", transform=ax.transAxes, rotation=90)
fig.savefig(fname,bbox_inches="tight")
plt.close()
return
def plot_rays_base(dic, time, ti, beam, case, txt, maxground=2000, maxalt=500, step=1,
showrefract=False, nr_cmap="jet_r", nr_lim=[-0.5, 0.5],
raycolor="0.3", title=True, zorder=2, alpha=1,
fig=None, rect=111, ax=None, aax=None, freq=12.):
"""
Plot ray paths
Parameters
----------
dic: str location of the data files
time: datetime.datetime time of rays
ti: int time index
beam: beam number
maxground : Optional[int] maximum ground range [km]
maxalt : Optional[int] highest altitude limit [km]
step : Optional[int] step between each plotted ray (in number of ray steps)
showrefract : Optional[bool] show refractive index along ray paths (supersedes raycolor)
nr_cmap : Optional[str] color map name for refractive index coloring
nr_lim : Optional[list, float] refractive index plotting limits
raycolor : Optional[float] color of ray paths
title : Optional[bool] Show default title
zorder : Optional[int]
alpha : Optional[int]
fig : Optional[pylab.figure] object (default to gcf)
rect : Optional[int] subplot spcification
ax : Optional[ ] Existing main axes
aax : Optional[ ] Existing auxialary axes
Returns
-------
ax : matplotlib.axes object containing formatting
aax : matplotlib.axes object containing data
cbax : matplotlib.axes object containing colorbar
"""
if not ax and not aax: ax, aax = curvedEarthAxes(fig=fig, rect=rect, maxground=maxground, maxalt=maxalt)
else:
if hasattr(ax, "time"): time = ax.time
if hasattr(ax, "beam"): beam = ax.beam
files = glob.glob(dic + "ti({ti}).elv(*).{case}.csv".format(ti="%02d"%ti, case=case))
files.sort()
Re = 6371.
for f in files:
th, r, v, _, _ = get_polar(pd.read_csv(f))
v = (0.5 * v * 3e8 / (freq * 1e6))
if not showrefract: aax.plot(th, r, c=raycolor, zorder=zorder, alpha=alpha)
else:
points = np.array([th, r]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lcol = LineCollection( segments, zorder=zorder, alpha=alpha)
_ = lcol.set_cmap( nr_cmap )
_ = lcol.set_norm( plt.Normalize(*nr_lim) )
_ = lcol.set_array( utils.smooth(v, window_len=21) )
_ = aax.add_collection( lcol )
aax.plot(np.arange(0,2000)/Re, np.ones(2000)*60+Re, color="b", ls="--", lw=0.5)
aax.plot(np.arange(0,2000)/Re, np.ones(2000)*95+Re, color="orange", ls="--", lw=0.5)
aax.plot(np.arange(0,2000)/Re, np.ones(2000)*130+Re, color="r", ls="--", lw=0.5)
if not showrefract and title:
stitle = "%s UT"%time.strftime("%Y-%m-%d %H:%M")
ax.set_title( stitle )
ax.text(1.05, 0.5, txt, horizontalalignment="center", verticalalignment="center",
transform=ax.transAxes, rotation=90)
if showrefract:
cbax = addColorbar(lcol, ax)
_ = cbax.set_ylabel(r"$\Delta$ V (m/s)")
stitle = "%s UT"%time.strftime("%Y-%m-%d %H:%M")+ "\n" + "Radar: BKS, Beam: %02d"%beam + "\n" +\
"Frequency: %.1f MHz"%freq + "\n"
ax.text(0.5, 0.8, stitle + txt + "(m/s)", horizontalalignment="center", verticalalignment="center",
transform=ax.transAxes)
else: cbax = None
ax.beam = beam
fig = ax.get_figure()
#fig.savefig(dic + "rt.ti({ti}).{case}.png".format(ti="%02d"%ti, case=case), bbox_inches="tight")
fig.savefig(dic + "rt.ti({ti}).{case}.png".format(ti="%02d"%ti, case=case))
plt.close()
return ax, aax, cbax
def plot_region_distribution(vd, ve, vf):
fig = plt.figure(figsize=(4,4))
ax = fig.add_subplot(111)
ax.hist(vd, bins=np.arange(0,1,.005), color="r", alpha=0.5, density=True, label=r"$\frac{v_D}{v_T}$", histtype="step")
ax.hist(ve, bins=np.arange(0,1,.005), color="b", alpha=0.5, density=True, label=r"$\frac{v_E}{v_T}$", histtype="step")
ax.hist(vf, bins=np.arange(0,1,.005), color="g", alpha=0.5, density=True, label=r"$\frac{v_F}{v_T}$", histtype="step")
ax.set_xlim(0,1)
ax.legend(loc=1)
ax.set_ylabel("Density")
ax.set_xlabel(r"$\frac{v_x}{v_T}$")
fig.savefig("data/hist_reg.png", bbox_inches="tight")
return
def plot_distribution(vn, vf):
fig = plt.figure(figsize=(4,4))
ax = fig.add_subplot(111)
ax.hist(vn, bins=np.arange(0,1,.005), color="r", alpha=0.5, density=True, label=r"$\frac{v_{\eta}}{v_T}$", histtype="step")
ax.hist(vf, bins=np.arange(0,1,.005), color="b", alpha=0.5, density=True, label=r"$\frac{v_{h}}{v_T}$", histtype="step")
ax.set_xlim(0,1)
ax.legend(loc=1)
ax.set_ylabel("Density")
ax.set_xlabel(r"$\frac{v_x}{v_T}$")
fig.savefig("data/hist.png", bbox_inches="tight")
return
class FanPlot(object):
""" Plot Fan Dataset """
def __init__(self, nrange=75, nbeam=24, r0=180, dr=45, dtheta=3.24, theta0=None):
"""
Initialize the fanplot do a certain size.
:param nrange: number of range gates
:param nbeam: number of beams
:param r0: initial beam distance - any distance unit as long as it"s consistent with dr
:param dr: length of each radar - any distance unit as long as it"s consistent with r0
:param dtheta: degrees per beam gate, degrees (default 3.24 degrees)
"""
# Set member variables
self.nrange = int(nrange)
self.nbeam = int(nbeam)
self.r0 = r0
self.dr = dr
self.dtheta = dtheta
# Initial angle (from X, polar coordinates) for beam 0
if theta0 == None:
self.theta0 = (90 - dtheta * nbeam / 2) # By default, point fanplot towards 90 deg
else:
self.theta0 = theta0
return
def add_axis(self, fig, subplot):
ax = fig.add_subplot(subplot, polar=True)
# Set up ticks and labels
self.r_ticks = range(self.r0, self.r0 + (self.nrange+1) * self.dr, self.dr)
self.theta_ticks = [self.theta0 + self.dtheta * b for b in range(self.nbeam+1)][::4]
rlabels = [""] * len(self.r_ticks)
for i in range(0, len(rlabels), 5):
rlabels[i] = i
plt.rgrids(self.r_ticks, rlabels)
plt.thetagrids(self.theta_ticks, range(self.nbeam+1)[::4])
return ax
def plot(self, ax, beams, gates, color="blue"):
"""
Add some data to the plot in a single color at positions given by "beams" and "gates".
:param beams: a list/array of beams
:param gates: a list/array of gates - same length as beams
:param color: a Matplotlib color
"""
for i, (beam, gate) in enumerate(zip(beams, gates)):
theta = (self.theta0 + beam * self.dtheta) * np.pi / 180 # radians
r = (self.r0 + gate * self.dr) # km
width = self.dtheta * np.pi / 180 # radians
height = self.dr # km
x1, x2 = theta, theta + width
y1, y2 = r, r + height
x = x1, x2, x2, x1
y = y1, y1, y2, y2
ax.fill(x, y, color=color)
self._scale_plot(ax)
return
def _add_colorbar(self, fig, ax, bounds, colormap, label=""):
"""
Add a colorbar to the right of an axis.
Similar to the function in RangeTimePlot, but positioned differently fanplots.
:param fig:
:param ax:
:param bounds:
:param colormap:
:param label:
:return:
"""
import matplotlib as mpl
pos = ax.get_position()
cpos = [pos.x1 + 0.025, pos.y0 + 0.25*pos.height,
0.01, pos.height * 0.5] # this list defines (left, bottom, width, height)
cax = fig.add_axes(cpos)
norm = mpl.colors.BoundaryNorm(bounds[::2], colormap.N)
cb2 = mpl.colorbar.ColorbarBase(cax, cmap=colormap,
norm=norm,
ticks=bounds[::2],
spacing="uniform",
orientation="vertical")
cb2.set_label(label)
# Remove the outer bounds in tick labels
ticks = [str(i) for i in bounds[::2]]
ticks[0], ticks[-1] = "", ""
cb2.ax.set_yticklabels(ticks)
return
def text(self, text, beam, gate, fontsize=8):
theta = (self.theta0 + beam * self.dtheta + 0.8 * self.dtheta) * np.pi / 180
r = (self.r0 + gate * self.dr)
plt.text(theta, r, text, fontsize=fontsize)
return
def save(self, filepath):
plt.tight_layout()
plt.savefig(filepath)
plt.close()
return
def _scale_plot(self, ax):
# Scale min-max
ax.set_thetamin(self.theta_ticks[0])
ax.set_thetamax(self.theta_ticks[-1])
ax.set_rmin(0)
ax.set_rmax(self.r_ticks[-1])
return
def _monotonically_increasing(self, vec):
if len(vec) < 2:
return True
return all(x <= y for x, y in zip(vec[:-1], vec[1:]))
def plot_fov(self, data_dict, scans, name, start, data, skip=1,
vel_max=100, vel_step=10,
save=True, base_filepath=""):
vel_ranges = list(range(-vel_max, vel_max + 1, vel_step))
vel_ranges.insert(0, -9999)
vel_ranges.append(9999)
vel_cmap = plt.cm.jet_r # use "viridis" colormap to make this redgreen colorblind proof
vel_colors = vel_cmap(np.linspace(0, 1, len(vel_ranges)))
for i in scans:
fig = plt.figure(figsize=(8,4), dpi=120)
vel_ax = self.add_axis(fig, 122)
dat_ax = self.add_axis(fig, 121)
vels = data_dict["vel"][i]
beams = data_dict["beam"][i]
gates = data_dict["gate"][i]
print("----------", i, skip, int(i/skip))
d_vels = data["vel"][int(i/skip)]
d_beams = data["beam"][int(i/skip)]
d_gates = data["gate"][int(i/skip)]
for k, (beam, gate, vel) in enumerate(zip(beams, gates, vels)):
beam, gate, vel = np.array([beam]), np.array([gate]), np.array([vel])
for s in range(len(vel_ranges) - 1):
step_mask = (vel >= vel_ranges[s]) & (vel <= vel_ranges[s + 1])
beam_s = beam[step_mask]
gate_s = gate[step_mask]
self.plot(vel_ax, beam_s, gate_s, vel_colors[s])
# Add data
for k, (vel, beam, gate) in enumerate(zip(d_vels, d_beams, d_gates)):
beam, gate, vel = np.array([beam]), np.array([gate]), np.array([vel])
for s in range(len(vel_ranges) - 1):
step_mask = (vel >= vel_ranges[s]) & (vel <= vel_ranges[s + 1])
beam_s = beam[step_mask]
gate_s = gate[step_mask]
self.plot(dat_ax, beam_s, gate_s, vel_colors[s])
self._add_colorbar(fig, vel_ax, vel_ranges, vel_cmap, label="Velocity [m/s]")
scan_time = start + dt.timedelta(minutes=i)
plt.suptitle("%s \n Scan time %s UT \n Velocity" % (name, scan_time))
if save:
filepath = "%s_%s.png" % (base_filepath, "%02d"%i)
self.save(filepath)
fig.clf()
plt.close()
return
def plot_velocity_ts_beam(dn, rad, bmnum, model, start, end):
""" Plot velocity TS data """
fig = plt.figure(figsize=(6,6), dpi=150)
axs = [fig.add_subplot(311), fig.add_subplot(312), fig.add_subplot(313)]
mkeys = ["vd", "vf", "vt"]
fmt = matplotlib.dates.DateFormatter("%H:%M")
dic = "data/op/{dn}/{model}/{rad}/bm.{bm}/".format(dn=dn.strftime("%Y.%m.%d.%H.%M"),
rad=rad, model=model, bm="%02d"%bmnum)
fstr = glob.glob(dic + "/velocity.ti*mat")
fstr.sort()
axs[0].set_title("%s UT, Radar - %s, Beam - %d, Model - %s"%(dn.strftime("%Y.%m.%d.%H.%M"), rad, bmnum, model))
cols = ["r", "b", "k"]
labs = [r"$V_{d\eta}$", r"$V_{dh}$", r"$V_{t}$"]
fname = "data/op/{dn}/{model}/sd_{rad}_data.csv.gz".format(dn=dn.strftime("%Y.%m.%d.%H.%M"), rad=rad, model=model)
dat = utils.get_sd_data(fname, bmnum).dropna()
dat = dat.groupby("time").mean().reset_index()
I = 0
for ax, mkey, col, lab in zip(axs, mkeys, cols, labs):
ax.set_ylabel("Velocity [m/s]")
ax.set_xlabel("Time [UT]")
ax.xaxis.set_major_formatter(fmt)
v, vmax, vmin, time = [], [], [], []
for i, f in enumerate(fstr):
sdat = loadmat(f)
if mkey == "vt":
v.append(np.median(sdat["vd"]+sdat["vf"]))
vmax.append((sdat["vd"]+sdat["vf"]).max())
vmin.append((sdat["vd"]+sdat["vf"]).min())
else:
v.append(np.median(sdat[mkey]))
vmax.append(sdat[mkey].max())
vmin.append(sdat[mkey].min())
time.append(start + dt.timedelta(minutes=i))
yerr = np.array([(mn, mx) for mn, mx in zip(vmin, vmax)]).T
ax.errorbar(time, v, yerr=yerr,
mec=col, mfc=col, fmt="r^", ms=1.5, ls="None", ecolor=col,
capsize=1, capthick=.4, elinewidth=0.4,
alpha=0.5, label=lab)
if I == 2:
ax.plot(dat.time, dat.v, color="darkred", marker="o",
alpha=0.7, ls="None", markersize=1.5, label=r"$V_{sd}^{los}$")
ax.axhline(0, color="gray", ls="--", lw=0.6)
ax.legend(loc=1)
ax.set_ylim(-100, 200)
ax.set_xlim(start, end)
I += 1
fname = "data/op/{dn}/{model}/{rad}/bm{bm}.png".format(dn=dn.strftime("%Y.%m.%d.%H.%M"), rad=rad, model=model, bm="%02d"%bmnum)
fig.savefig(fname,bbox_inches="tight")
return
def plot_edens_versus_height(eDensPC, eDensAC, ylim=[50,350]):
fig, axes = plt.subplots(figsize=(15,6), nrows=2, ncols=5, sharey=True, sharex=False)
from scipy import stats
for i in range(5):
x, y = np.array(eDensPC[i+16]), np.array(eDensAC[i+16])
xmean, ymean = np.quantile(x, q=.56, axis=0), np.quantile(y, q=.56, axis=0) #np.median(x, axis=0), np.median(y, axis=0)
xstd, ystd = 0.3*stats.median_absolute_deviation(x, axis=0), 0.3*stats.median_absolute_deviation(y, axis=0)
xl, xu = utils.smooth(np.quantile(x, q=.5, axis=0), window_len=51),\
utils.smooth(np.quantile(x, q=.62, axis=0), window_len=51)
yl, yu = utils.smooth(np.quantile(y, q=.5, axis=0), window_len=51),\
utils.smooth(np.quantile(y, q=.62, axis=0), window_len=51)
xmean, ymean = utils.smooth(xmean, window_len=51), utils.smooth(ymean, window_len=51)
ax = axes[0, i]
ax.semilogx(xmean, np.arange(50,350,1).ravel(), "ro", lw=0.8, markersize=1)
ax.fill_betweenx(np.arange(50,350,1).ravel(), x1=xl, x2=xu, alpha=0.3, color="r")
ax.set_xlim(.01, 10)
if i==0: ax.set_ylabel("Height, km")
ax.set_xlabel("Percentage Change")
ax = axes[1, i]
ax.semilogx(ymean, np.arange(50,350,1).ravel(), "ro", lw=0.8, markersize=1)
ax.fill_betweenx(np.arange(50,350,1).ravel(), x1=yl, x2=yu, alpha=0.3, color="r")
ax.set_xlim(.1, 10000)
if i==0: ax.set_ylabel("Height, km")
ax.set_xlabel("Absolute Change")
fig.subplots_adjust(hspace=0.3)
fig.savefig("data/sens.png", bbox_inches="tight")
return
def plot_ssi_versus_bins(irr, wavelength, ylim=[50,350]):
fig, ax = plt.subplots(figsize=(4,4), nrows=1, ncols=1, sharey=True, sharex=False)
xmean = np.mean(irr, axis=0)#np.quantile(irr, q=.56, axis=0)
std = np.std(irr, axis=0)
ax.loglog(wavelength, xmean, "ro", lw=0.8, markersize=1)
ax.errorbar(wavelength, xmean, yerr=std, capthick=1, elinewidth=0.8, capsize=1, ecolor="r", marker="o", ls="None", ms=1, mfc="k", mec="k")
ax.set_ylim(1e5,1e12)
ax.set_xlabel(r"$\Lambda$ (A)")
ax.set_ylabel(r"$I_{o}$ ($Wm^{-2}$)")
fig.savefig("data/sens.b.png", bbox_inches="tight")
return
def plot_rti(rad="bks", stime=dt.datetime(2015,5,5,21,50), etime=dt.datetime(2015,5,5,22,51), fs=None, bs=None):
import os
import utils
from scipy.signal import medfilt2d
fname = "data/op/2015.05.05.22.11/waccmx/sd_bks_data.csv.gz"
os.system("gzip -d " + fname)
df = pd.read_csv(fname.replace(".gz", ""), parse_dates=["time"])
os.system("gzip " + fname.replace(".gz", ""))
X, Y, Z = utils.get_gridded_parameters(df)
fig, ax = plt.subplots(figsize=(5,2.5), nrows=1, ncols=1, sharey=True, sharex=False, dpi=150)
fmt = matplotlib.dates.DateFormatter("%H:%M")
ax.xaxis.set_major_formatter(fmt)
ax.text(0.75, 1.05, r"%s| $f_o$=%.1f MHz"%(rad.upper(), np.median(df.tfreq)/1e3), horizontalalignment="center",
verticalalignment="center", transform=ax.transAxes)
if fs is not None: ax.axvline(fs, ls="--", color="b", lw=0.8)
if bs is not None: ax.axvline(bs, ls="--", color="r", lw=0.8)
ax.set_xlim(stime, etime)
ax.set_ylim(0, 70)
ax.set_xlabel("Time, UT")
ax.set_ylabel("Range Gate")
from matplotlib import colors as mpl_colors
import matplotlib.cm as cm
cmj = cm.get_cmap("jet")
cmpr = matplotlib.cm.prism
cmap = matplotlib.colors.ListedColormap([cmpr(.142), cmpr(.125),
cmpr(.11), cmpr(.1),
".6", cmpr(.175),
cmpr(.158), cmj(.32),
cmj(.37)])
bounds = np.round(np.linspace(-100, 100, 7))
bounds[3] = -15.
bounds = np.insert(bounds,4,15.)
norm = mpl_colors.BoundaryNorm(bounds, cmap.N)
cmap.set_bad("w", alpha=0.0)
pcoll = ax.pcolormesh(X.T, Y.T, utils.medfilt2D_weight(Z), lw=0.01, edgecolors="None", cmap=cmap, norm=norm)
cb = fig.colorbar(pcoll,shrink=0.9)
cb.set_label(r"Velocity, $ms^{-1}$")
fig.autofmt_xdate()
fig.savefig("data/rti.example.png", bbox_inches="tight")
return
def plot_supermag():
import os
from scipy import signal
import matplotlib.dates as mdates
fname = "data/op/2015.05.05.22.11/waccmx/sd_bks_data.csv.gz"
os.system("gzip -d " + fname)
df = pd.read_csv(fname.replace(".gz", ""), parse_dates=["time"])
os.system("gzip " + fname.replace(".gz", ""))
fs=dt.datetime(2015,5,5,22,7,40)
bs=dt.datetime(2015,5,5,22,9,30)
df = df[(df.time>=fs) & (df.time<=bs)][["time", "v"]]
df = df.groupby("time").v.agg(["median", "std"]).reset_index()
dx = pd.read_csv("data/supermag.csv", parse_dates=["Date_UTC"])
dx = dx[["Date_UTC", "IAGA", "GEOLON", "GEOLAT", "MAGON", "MAGLAT", "MLT", "MCOLAT", "SZA", "dbn_nez", "dbe_nez", "dbz_nez"]]
stns = ["BOU", "TUC", "T25", "BSL", "M06", "VIC", "BSL", "M08", "C08",]
fig, axes = plt.subplots(figsize=(6,12), nrows=3, ncols=3, sharey=True, sharex=True, dpi=150)
print(set(dx.IAGA))
j = 0
for stn in set(dx.IAGA):
if stn in stns:
if len(stns) > 1: ax = axes[j]
else: ax = axes
du = dx[(dx.IAGA==stn)]
keys, col, labs = ["dbn_nez", "dbe_nez", "dbz_nez"], ["r", "b", "k"], ["dN", "dE", "dZ"]
for i, k in enumerate(keys):
x = du[[k, "Date_UTC"]].set_index("Date_UTC").resample("2s").interpolate("linear").reset_index()
y = signal.resample(np.array(df["median"]), len(x))
ax.xaxis.set_major_formatter(mdates.DateFormatter("%H-%M"))
ax.plot(x.Date_UTC, x[k], col[i], ls="--", lw=0.8, label=labs[i])
ax.legend(loc=4)
ax.set_xlim(x.Date_UTC.tolist()[0], x.Date_UTC.tolist()[-1])
ax.set_ylabel("nT")
ax.set_xlabel("Time, UT")
ax.set_title("Mag: %s (%.2f, %.2f, %.2f)"%(stn,
du["GEOLON"].tolist()[0], du["GEOLAT"].tolist()[0], du["SZA"].tolist()[0]))
j += 1
fig.autofmt_xdate()
fig.savefig("data/corr.example.png", bbox_inches="tight")
return
| [
"matplotlib.cm.get_cmap",
"scipy.io.loadmat",
"pandas.read_csv",
"utils.get_sd_data",
"matplotlib.pyplot.suptitle",
"utils.smooth",
"numpy.ones",
"mpl_toolkits.axes_grid1.Size.Fixed",
"mpl_toolkits.axisartist.floating_axes.GridHelperCurveLinear",
"matplotlib.pyplot.figure",
"numpy.mean",
"nump... | [((297, 318), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (311, 318), False, 'import matplotlib\n'), ((4609, 4653), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['mappable'], {'cax': 'cbax', 'shrink': '(0.7)'}), '(mappable, cax=cbax, shrink=0.7)\n', (4621, 4653), True, 'import matplotlib.pyplot as plt\n'), ((5865, 5906), 'mpl_toolkits.axisartist.grid_finder.FixedLocator', 'FixedLocator', (['[v for v, s in angle_ticks]'], {}), '([v for v, s in angle_ticks])\n', (5877, 5906), False, 'from mpl_toolkits.axisartist.grid_finder import FixedLocator, DictFormatter\n'), ((6304, 6343), 'mpl_toolkits.axisartist.grid_finder.FixedLocator', 'FixedLocator', (['[v for v, s in alt_ticks]'], {}), '([v for v, s in alt_ticks])\n', (6316, 6343), False, 'from mpl_toolkits.axisartist.grid_finder import FixedLocator, DictFormatter\n'), ((6552, 6772), 'mpl_toolkits.axisartist.floating_axes.GridHelperCurveLinear', 'floating_axes.GridHelperCurveLinear', (['tr'], {'extremes': '(0, angran, Re + minalt, Re + maxalt)', 'grid_locator1': 'grid_locator1', 'grid_locator2': 'grid_locator2', 'tick_formatter1': 'tick_formatter1', 'tick_formatter2': 'tick_formatter2'}), '(tr, extremes=(0, angran, Re + minalt, \n Re + maxalt), grid_locator1=grid_locator1, grid_locator2=grid_locator2,\n tick_formatter1=tick_formatter1, tick_formatter2=tick_formatter2)\n', (6587, 6772), True, 'import mpl_toolkits.axisartist.floating_axes as floating_axes\n'), ((6844, 6909), 'mpl_toolkits.axisartist.floating_axes.FloatingSubplot', 'floating_axes.FloatingSubplot', (['fig', 'rect'], {'grid_helper': 'grid_helper'}), '(fig, rect, grid_helper=grid_helper)\n', (6873, 6909), True, 'import mpl_toolkits.axisartist.floating_axes as floating_axes\n'), ((11778, 11789), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11787, 11789), True, 'import matplotlib.pyplot as plt\n'), ((13308, 13319), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13317, 13319), True, 'import matplotlib.pyplot as plt\n'), ((13464, 13499), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 4)', 'dpi': '(120)'}), '(figsize=(4, 4), dpi=120)\n', (13474, 13499), True, 'import matplotlib.pyplot as plt\n'), ((13719, 13760), 'utils.calculate_sza', 'utils.calculate_sza', (['t', 'lat', 'lon'], {'alt': '(300)'}), '(t, lat, lon, alt=300)\n', (13738, 13760), False, 'import utils\n'), ((14152, 14163), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14161, 14163), True, 'import matplotlib.pyplot as plt\n'), ((14258, 14293), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)', 'dpi': '(150)'}), '(figsize=(6, 6), dpi=150)\n', (14268, 14293), True, 'import matplotlib.pyplot as plt\n'), ((14411, 14450), 'matplotlib.dates.DateFormatter', 'matplotlib.dates.DateFormatter', (['"""%H:%M"""'], {}), "('%H:%M')\n", (14441, 14450), False, 'import matplotlib\n'), ((14562, 14600), 'pandas.read_csv', 'pd.read_csv', (['fname'], {'parse_dates': "['dn']"}), "(fname, parse_dates=['dn'])\n", (14573, 14600), True, 'import pandas as pd\n'), ((16335, 16370), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 4)', 'dpi': '(120)'}), '(figsize=(4, 4), dpi=120)\n', (16345, 16370), True, 'import matplotlib.pyplot as plt\n'), ((16691, 16732), 'utils.calculate_sza', 'utils.calculate_sza', (['t', 'lat', 'lon'], {'alt': '(300)'}), '(t, lat, lon, alt=300)\n', (16710, 16732), False, 'import utils\n'), ((17127, 17138), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (17136, 17138), True, 'import matplotlib.pyplot as plt\n'), ((20726, 20737), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (20735, 20737), True, 'import matplotlib.pyplot as plt\n'), ((20816, 20842), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 4)'}), '(figsize=(4, 4))\n', (20826, 20842), True, 'import matplotlib.pyplot as plt\n'), ((21463, 21489), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 4)'}), '(figsize=(4, 4))\n', (21473, 21489), True, 'import matplotlib.pyplot as plt\n'), ((28662, 28697), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)', 'dpi': '(150)'}), '(figsize=(6, 6), dpi=150)\n', (28672, 28697), True, 'import matplotlib.pyplot as plt\n'), ((28815, 28854), 'matplotlib.dates.DateFormatter', 'matplotlib.dates.DateFormatter', (['"""%H:%M"""'], {}), "('%H:%M')\n", (28845, 28854), False, 'import matplotlib\n'), ((29006, 29041), 'glob.glob', 'glob.glob', (["(dic + '/velocity.ti*mat')"], {}), "(dic + '/velocity.ti*mat')\n", (29015, 29041), False, 'import glob\n'), ((31053, 31127), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 6)', 'nrows': '(2)', 'ncols': '(5)', 'sharey': '(True)', 'sharex': '(False)'}), '(figsize=(15, 6), nrows=2, ncols=5, sharey=True, sharex=False)\n', (31065, 31127), True, 'import matplotlib.pyplot as plt\n'), ((32690, 32763), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(4, 4)', 'nrows': '(1)', 'ncols': '(1)', 'sharey': '(True)', 'sharex': '(False)'}), '(figsize=(4, 4), nrows=1, ncols=1, sharey=True, sharex=False)\n', (32702, 32763), True, 'import matplotlib.pyplot as plt\n'), ((32775, 32795), 'numpy.mean', 'np.mean', (['irr'], {'axis': '(0)'}), '(irr, axis=0)\n', (32782, 32795), True, 'import numpy as np\n'), ((32838, 32857), 'numpy.std', 'np.std', (['irr'], {'axis': '(0)'}), '(irr, axis=0)\n', (32844, 32857), True, 'import numpy as np\n'), ((33265, 33296), 'datetime.datetime', 'dt.datetime', (['(2015)', '(5)', '(5)', '(21)', '(50)'], {}), '(2015, 5, 5, 21, 50)\n', (33276, 33296), True, 'import datetime as dt\n'), ((33300, 33331), 'datetime.datetime', 'dt.datetime', (['(2015)', '(5)', '(5)', '(22)', '(51)'], {}), '(2015, 5, 5, 22, 51)\n', (33311, 33331), True, 'import datetime as dt\n'), ((33487, 33516), 'os.system', 'os.system', (["('gzip -d ' + fname)"], {}), "('gzip -d ' + fname)\n", (33496, 33516), False, 'import os\n'), ((33650, 33682), 'utils.get_gridded_parameters', 'utils.get_gridded_parameters', (['df'], {}), '(df)\n', (33678, 33682), False, 'import utils\n'), ((33697, 33785), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 2.5)', 'nrows': '(1)', 'ncols': '(1)', 'sharey': '(True)', 'sharex': '(False)', 'dpi': '(150)'}), '(figsize=(5, 2.5), nrows=1, ncols=1, sharey=True, sharex=False,\n dpi=150)\n', (33709, 33785), True, 'import matplotlib.pyplot as plt\n'), ((33791, 33830), 'matplotlib.dates.DateFormatter', 'matplotlib.dates.DateFormatter', (['"""%H:%M"""'], {}), "('%H:%M')\n", (33821, 33830), False, 'import matplotlib\n'), ((34385, 34403), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""jet"""'], {}), "('jet')\n", (34396, 34403), True, 'import matplotlib.cm as cm\n'), ((34691, 34717), 'numpy.insert', 'np.insert', (['bounds', '(4)', '(15.0)'], {}), '(bounds, 4, 15.0)\n', (34700, 34717), True, 'import numpy as np\n'), ((34726, 34765), 'matplotlib.colors.BoundaryNorm', 'mpl_colors.BoundaryNorm', (['bounds', 'cmap.N'], {}), '(bounds, cmap.N)\n', (34749, 34765), True, 'from matplotlib import colors as mpl_colors\n'), ((35263, 35292), 'os.system', 'os.system', (["('gzip -d ' + fname)"], {}), "('gzip -d ' + fname)\n", (35272, 35292), False, 'import os\n'), ((35419, 35453), 'datetime.datetime', 'dt.datetime', (['(2015)', '(5)', '(5)', '(22)', '(7)', '(40)'], {}), '(2015, 5, 5, 22, 7, 40)\n', (35430, 35453), True, 'import datetime as dt\n'), ((35456, 35490), 'datetime.datetime', 'dt.datetime', (['(2015)', '(5)', '(5)', '(22)', '(9)', '(30)'], {}), '(2015, 5, 5, 22, 9, 30)\n', (35467, 35490), True, 'import datetime as dt\n'), ((35620, 35678), 'pandas.read_csv', 'pd.read_csv', (['"""data/supermag.csv"""'], {'parse_dates': "['Date_UTC']"}), "('data/supermag.csv', parse_dates=['Date_UTC'])\n", (35631, 35678), True, 'import pandas as pd\n'), ((35901, 35987), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 12)', 'nrows': '(3)', 'ncols': '(3)', 'sharey': '(True)', 'sharex': '(True)', 'dpi': '(150)'}), '(figsize=(6, 12), nrows=3, ncols=3, sharey=True, sharex=True,\n dpi=150)\n', (35913, 35987), True, 'import matplotlib.pyplot as plt\n'), ((1500, 1548), 'matplotlib.pyplot.Circle', 'plt.Circle', ([], {'xy': 'center', 'radius': 'orig_handle.radius'}), '(xy=center, radius=orig_handle.radius)\n', (1510, 1548), True, 'import matplotlib.pyplot as plt\n'), ((2805, 2810), 'pylab.gca', 'gca', ([], {}), '()\n', (2808, 2810), False, 'from pylab import gca, gcf\n'), ((4055, 4069), 'mpl_toolkits.axes_grid1.Size.AxesX', 'Size.AxesX', (['ax'], {}), '(ax)\n', (4065, 4069), False, 'from mpl_toolkits.axes_grid1 import SubplotDivider, Size\n'), ((4095, 4110), 'mpl_toolkits.axes_grid1.Size.Fixed', 'Size.Fixed', (['(0.1)'], {}), '(0.1)\n', (4105, 4110), False, 'from mpl_toolkits.axes_grid1 import SubplotDivider, Size\n'), ((4134, 4149), 'mpl_toolkits.axes_grid1.Size.Fixed', 'Size.Fixed', (['(0.2)'], {}), '(0.2)\n', (4144, 4149), False, 'from mpl_toolkits.axes_grid1 import SubplotDivider, Size\n'), ((4171, 4185), 'mpl_toolkits.axes_grid1.Size.AxesY', 'Size.AxesY', (['ax'], {}), '(ax)\n', (4181, 4185), False, 'from mpl_toolkits.axes_grid1 import SubplotDivider, Size\n'), ((6498, 6520), 'matplotlib.projections.polar.PolarTransform', 'polar.PolarTransform', ([], {}), '()\n', (6518, 6520), False, 'from matplotlib.projections import polar\n'), ((6807, 6834), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (6817, 6834), True, 'import matplotlib.pyplot as plt\n'), ((13814, 13842), 'dateutil.tz.gettz', 'tz.gettz', (['"""America/New_York"""'], {}), "('America/New_York')\n", (13822, 13842), False, 'from dateutil import tz\n'), ((16786, 16814), 'dateutil.tz.gettz', 'tz.gettz', (['"""America/New_York"""'], {}), "('America/New_York')\n", (16794, 16814), False, 'from dateutil import tz\n'), ((23354, 23387), 'matplotlib.pyplot.rgrids', 'plt.rgrids', (['self.r_ticks', 'rlabels'], {}), '(self.r_ticks, rlabels)\n', (23364, 23387), True, 'import matplotlib.pyplot as plt\n'), ((25051, 25099), 'matplotlib.colors.BoundaryNorm', 'mpl.colors.BoundaryNorm', (['bounds[::2]', 'colormap.N'], {}), '(bounds[::2], colormap.N)\n', (25074, 25099), True, 'import matplotlib as mpl\n'), ((25114, 25236), 'matplotlib.colorbar.ColorbarBase', 'mpl.colorbar.ColorbarBase', (['cax'], {'cmap': 'colormap', 'norm': 'norm', 'ticks': 'bounds[::2]', 'spacing': '"""uniform"""', 'orientation': '"""vertical"""'}), "(cax, cmap=colormap, norm=norm, ticks=bounds[::2],\n spacing='uniform', orientation='vertical')\n", (25139, 25236), True, 'import matplotlib as mpl\n'), ((25694, 25737), 'matplotlib.pyplot.text', 'plt.text', (['theta', 'r', 'text'], {'fontsize': 'fontsize'}), '(theta, r, text, fontsize=fontsize)\n', (25702, 25737), True, 'import matplotlib.pyplot as plt\n'), ((25792, 25810), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (25808, 25810), True, 'import matplotlib.pyplot as plt\n'), ((25819, 25840), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filepath'], {}), '(filepath)\n', (25830, 25840), True, 'import matplotlib.pyplot as plt\n'), ((25849, 25860), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (25858, 25860), True, 'import matplotlib.pyplot as plt\n'), ((34630, 34655), 'numpy.linspace', 'np.linspace', (['(-100)', '(100)', '(7)'], {}), '(-100, 100, 7)\n', (34641, 34655), True, 'import numpy as np\n'), ((34836, 34861), 'utils.medfilt2D_weight', 'utils.medfilt2D_weight', (['Z'], {}), '(Z)\n', (34858, 34861), False, 'import utils\n'), ((6413, 6423), 'matplotlib.transforms.Affine2D', 'Affine2D', ([], {}), '()\n', (6421, 6423), False, 'from matplotlib.transforms import Affine2D, Transform\n'), ((6461, 6471), 'matplotlib.transforms.Affine2D', 'Affine2D', ([], {}), '()\n', (6469, 6471), False, 'from matplotlib.transforms import Affine2D, Transform\n'), ((10754, 10768), 'pandas.read_csv', 'pd.read_csv', (['f'], {}), '(f)\n', (10765, 10768), True, 'import pandas as pd\n'), ((10950, 10999), 'numpy.concatenate', 'np.concatenate', (['[points[:-1], points[1:]]'], {'axis': '(1)'}), '([points[:-1], points[1:]], axis=1)\n', (10964, 10999), True, 'import numpy as np\n'), ((11019, 11071), 'matplotlib.collections.LineCollection', 'LineCollection', (['segments'], {'zorder': 'zorder', 'alpha': 'alpha'}), '(segments, zorder=zorder, alpha=alpha)\n', (11033, 11071), False, 'from matplotlib.collections import LineCollection\n'), ((12480, 12494), 'pandas.read_csv', 'pd.read_csv', (['f'], {}), '(f)\n', (12491, 12494), True, 'import pandas as pd\n'), ((12676, 12725), 'numpy.concatenate', 'np.concatenate', (['[points[:-1], points[1:]]'], {'axis': '(1)'}), '([points[:-1], points[1:]], axis=1)\n', (12690, 12725), True, 'import numpy as np\n'), ((12745, 12797), 'matplotlib.collections.LineCollection', 'LineCollection', (['segments'], {'zorder': 'zorder', 'alpha': 'alpha'}), '(segments, zorder=zorder, alpha=alpha)\n', (12759, 12797), False, 'from matplotlib.collections import LineCollection\n'), ((14897, 14925), 'utils.get_sd_data', 'utils.get_sd_data', (['fname', '(15)'], {}), '(fname, 15)\n', (14914, 14925), False, 'import utils\n'), ((18949, 18963), 'pandas.read_csv', 'pd.read_csv', (['f'], {}), '(f)\n', (18960, 18963), True, 'import pandas as pd\n'), ((19188, 19237), 'numpy.concatenate', 'np.concatenate', (['[points[:-1], points[1:]]'], {'axis': '(1)'}), '([points[:-1], points[1:]], axis=1)\n', (19202, 19237), True, 'import numpy as np\n'), ((19257, 19309), 'matplotlib.collections.LineCollection', 'LineCollection', (['segments'], {'zorder': 'zorder', 'alpha': 'alpha'}), '(segments, zorder=zorder, alpha=alpha)\n', (19271, 19309), False, 'from matplotlib.collections import LineCollection\n'), ((20893, 20915), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.005)'], {}), '(0, 1, 0.005)\n', (20902, 20915), True, 'import numpy as np\n'), ((21016, 21038), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.005)'], {}), '(0, 1, 0.005)\n', (21025, 21038), True, 'import numpy as np\n'), ((21139, 21161), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.005)'], {}), '(0, 1, 0.005)\n', (21148, 21161), True, 'import numpy as np\n'), ((21540, 21562), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.005)'], {}), '(0, 1, 0.005)\n', (21549, 21562), True, 'import numpy as np\n'), ((21668, 21690), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.005)'], {}), '(0, 1, 0.005)\n', (21677, 21690), True, 'import numpy as np\n'), ((26755, 26790), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 4)', 'dpi': '(120)'}), '(figsize=(8, 4), dpi=120)\n', (26765, 26790), True, 'import matplotlib.pyplot as plt\n'), ((28300, 28371), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['("""%s \n Scan time %s UT \n Velocity""" % (name, scan_time))'], {}), '("""%s \n Scan time %s UT \n Velocity""" % (name, scan_time))\n', (28312, 28371), True, 'import matplotlib.pyplot as plt\n'), ((28528, 28539), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (28537, 28539), True, 'import matplotlib.pyplot as plt\n'), ((29383, 29414), 'utils.get_sd_data', 'utils.get_sd_data', (['fname', 'bmnum'], {}), '(fname, bmnum)\n', (29400, 29414), False, 'import utils\n'), ((29762, 29772), 'scipy.io.loadmat', 'loadmat', (['f'], {}), '(f)\n', (29769, 29772), False, 'from scipy.io import loadmat\n'), ((31193, 31218), 'numpy.array', 'np.array', (['eDensPC[i + 16]'], {}), '(eDensPC[i + 16])\n', (31201, 31218), True, 'import numpy as np\n'), ((31218, 31243), 'numpy.array', 'np.array', (['eDensAC[i + 16]'], {}), '(eDensAC[i + 16])\n', (31226, 31243), True, 'import numpy as np\n'), ((31265, 31295), 'numpy.quantile', 'np.quantile', (['x'], {'q': '(0.56)', 'axis': '(0)'}), '(x, q=0.56, axis=0)\n', (31276, 31295), True, 'import numpy as np\n'), ((31296, 31326), 'numpy.quantile', 'np.quantile', (['y'], {'q': '(0.56)', 'axis': '(0)'}), '(y, q=0.56, axis=0)\n', (31307, 31326), True, 'import numpy as np\n'), ((31813, 31847), 'utils.smooth', 'utils.smooth', (['xmean'], {'window_len': '(51)'}), '(xmean, window_len=51)\n', (31825, 31847), False, 'import utils\n'), ((31849, 31883), 'utils.smooth', 'utils.smooth', (['ymean'], {'window_len': '(51)'}), '(ymean, window_len=51)\n', (31861, 31883), False, 'import utils\n'), ((11145, 11167), 'matplotlib.pyplot.Normalize', 'plt.Normalize', (['*nr_lim'], {}), '(*nr_lim)\n', (11158, 11167), True, 'import matplotlib.pyplot as plt\n'), ((12871, 12893), 'matplotlib.pyplot.Normalize', 'plt.Normalize', (['*nr_lim'], {}), '(*nr_lim)\n', (12884, 12893), True, 'import matplotlib.pyplot as plt\n'), ((19383, 19405), 'matplotlib.pyplot.Normalize', 'plt.Normalize', (['*nr_lim'], {}), '(*nr_lim)\n', (19396, 19405), True, 'import matplotlib.pyplot as plt\n'), ((19440, 19470), 'utils.smooth', 'utils.smooth', (['v'], {'window_len': '(21)'}), '(v, window_len=21)\n', (19452, 19470), False, 'import utils\n'), ((28264, 28287), 'datetime.timedelta', 'dt.timedelta', ([], {'minutes': 'i'}), '(minutes=i)\n', (28276, 28287), True, 'import datetime as dt\n'), ((31395, 31437), 'scipy.stats.median_absolute_deviation', 'stats.median_absolute_deviation', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (31426, 31437), False, 'from scipy import stats\n'), ((31443, 31485), 'scipy.stats.median_absolute_deviation', 'stats.median_absolute_deviation', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (31474, 31485), False, 'from scipy import stats\n'), ((31516, 31545), 'numpy.quantile', 'np.quantile', (['x'], {'q': '(0.5)', 'axis': '(0)'}), '(x, q=0.5, axis=0)\n', (31527, 31545), True, 'import numpy as np\n'), ((31592, 31622), 'numpy.quantile', 'np.quantile', (['x'], {'q': '(0.62)', 'axis': '(0)'}), '(x, q=0.62, axis=0)\n', (31603, 31622), True, 'import numpy as np\n'), ((31668, 31697), 'numpy.quantile', 'np.quantile', (['y'], {'q': '(0.5)', 'axis': '(0)'}), '(y, q=0.5, axis=0)\n', (31679, 31697), True, 'import numpy as np\n'), ((31744, 31774), 'numpy.quantile', 'np.quantile', (['y'], {'q': '(0.62)', 'axis': '(0)'}), '(y, q=0.62, axis=0)\n', (31755, 31774), True, 'import numpy as np\n'), ((13786, 13801), 'dateutil.tz.gettz', 'tz.gettz', (['"""UTC"""'], {}), "('UTC')\n", (13794, 13801), False, 'from dateutil import tz\n'), ((16758, 16773), 'dateutil.tz.gettz', 'tz.gettz', (['"""UTC"""'], {}), "('UTC')\n", (16766, 16773), False, 'from dateutil import tz\n'), ((19537, 19555), 'numpy.arange', 'np.arange', (['(0)', '(2000)'], {}), '(0, 2000)\n', (19546, 19555), True, 'import numpy as np\n'), ((19629, 19647), 'numpy.arange', 'np.arange', (['(0)', '(2000)'], {}), '(0, 2000)\n', (19638, 19647), True, 'import numpy as np\n'), ((19726, 19744), 'numpy.arange', 'np.arange', (['(0)', '(2000)'], {}), '(0, 2000)\n', (19735, 19744), True, 'import numpy as np\n'), ((27307, 27323), 'numpy.array', 'np.array', (['[beam]'], {}), '([beam])\n', (27315, 27323), True, 'import numpy as np\n'), ((27325, 27341), 'numpy.array', 'np.array', (['[gate]'], {}), '([gate])\n', (27333, 27341), True, 'import numpy as np\n'), ((27343, 27358), 'numpy.array', 'np.array', (['[vel]'], {}), '([vel])\n', (27351, 27358), True, 'import numpy as np\n'), ((27794, 27810), 'numpy.array', 'np.array', (['[beam]'], {}), '([beam])\n', (27802, 27810), True, 'import numpy as np\n'), ((27812, 27828), 'numpy.array', 'np.array', (['[gate]'], {}), '([gate])\n', (27820, 27828), True, 'import numpy as np\n'), ((27830, 27845), 'numpy.array', 'np.array', (['[vel]'], {}), '([vel])\n', (27838, 27845), True, 'import numpy as np\n'), ((29828, 29862), 'numpy.median', 'np.median', (["(sdat['vd'] + sdat['vf'])"], {}), "(sdat['vd'] + sdat['vf'])\n", (29837, 29862), True, 'import numpy as np\n'), ((30023, 30044), 'numpy.median', 'np.median', (['sdat[mkey]'], {}), '(sdat[mkey])\n', (30032, 30044), True, 'import numpy as np\n'), ((30170, 30193), 'datetime.timedelta', 'dt.timedelta', ([], {'minutes': 'i'}), '(minutes=i)\n', (30182, 30193), True, 'import datetime as dt\n'), ((31935, 31956), 'numpy.arange', 'np.arange', (['(50)', '(350)', '(1)'], {}), '(50, 350, 1)\n', (31944, 31956), True, 'import numpy as np\n'), ((32018, 32039), 'numpy.arange', 'np.arange', (['(50)', '(350)', '(1)'], {}), '(50, 350, 1)\n', (32027, 32039), True, 'import numpy as np\n'), ((32251, 32272), 'numpy.arange', 'np.arange', (['(50)', '(350)', '(1)'], {}), '(50, 350, 1)\n', (32260, 32272), True, 'import numpy as np\n'), ((32334, 32355), 'numpy.arange', 'np.arange', (['(50)', '(350)', '(1)'], {}), '(50, 350, 1)\n', (32343, 32355), True, 'import numpy as np\n'), ((33929, 33948), 'numpy.median', 'np.median', (['df.tfreq'], {}), '(df.tfreq)\n', (33938, 33948), True, 'import numpy as np\n'), ((36468, 36490), 'numpy.array', 'np.array', (["df['median']"], {}), "(df['median'])\n", (36476, 36490), True, 'import numpy as np\n'), ((36545, 36574), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%H-%M"""'], {}), "('%H-%M')\n", (36565, 36574), True, 'import matplotlib.dates as mdates\n'), ((10889, 10906), 'numpy.array', 'np.array', (['[th, r]'], {}), '([th, r])\n', (10897, 10906), True, 'import numpy as np\n'), ((12615, 12632), 'numpy.array', 'np.array', (['[th, r]'], {}), '([th, r])\n', (12623, 12632), True, 'import numpy as np\n'), ((19127, 19144), 'numpy.array', 'np.array', (['[th, r]'], {}), '([th, r])\n', (19135, 19144), True, 'import numpy as np\n'), ((19559, 19572), 'numpy.ones', 'np.ones', (['(2000)'], {}), '(2000)\n', (19566, 19572), True, 'import numpy as np\n'), ((19651, 19664), 'numpy.ones', 'np.ones', (['(2000)'], {}), '(2000)\n', (19658, 19664), True, 'import numpy as np\n'), ((19748, 19761), 'numpy.ones', 'np.ones', (['(2000)'], {}), '(2000)\n', (19755, 19761), True, 'import numpy as np\n'), ((15865, 15905), 'numpy.min', 'np.min', (["(sdat[mkey] + sdat[mkey + '_min'])"], {}), "(sdat[mkey] + sdat[mkey + '_min'])\n", (15871, 15905), True, 'import numpy as np\n'), ((15935, 15975), 'numpy.max', 'np.max', (["(sdat[mkey] + sdat[mkey + '_max'])"], {}), "(sdat[mkey] + sdat[mkey + '_max'])\n", (15941, 15975), True, 'import numpy as np\n')] |
import numpy as np
from synthrl.common.utils.terminalutils import mktable
class IOSet:
def __init__(self, ioset):
self.ioset = list(ioset)
self.n_example = len(self.ioset)
def __len__(self):
return self.n_example
def get_rand(self, n):
indices = (np.random.permutation(self.n_example).tolist() * (n // self.n_example + 1))[:n]
return [self.ioset[i] for i in indices]
def __getitem__(self, idx):
return self.ioset[idx]
def add(self, inputs, output):
self.ioset.append((inputs, output))
self.n_example += 1
def __repr__(self):
return mktable(self.ioset, header=['Input', 'Output'], align='>', index=True)
| [
"numpy.random.permutation",
"synthrl.common.utils.terminalutils.mktable"
] | [((592, 662), 'synthrl.common.utils.terminalutils.mktable', 'mktable', (['self.ioset'], {'header': "['Input', 'Output']", 'align': '""">"""', 'index': '(True)'}), "(self.ioset, header=['Input', 'Output'], align='>', index=True)\n", (599, 662), False, 'from synthrl.common.utils.terminalutils import mktable\n'), ((274, 311), 'numpy.random.permutation', 'np.random.permutation', (['self.n_example'], {}), '(self.n_example)\n', (295, 311), True, 'import numpy as np\n')] |
import numpy as np
def norm(p2, p1):
"""
Compute normal vector based on two points
Parameters
----------
p2,p1: ndarray(2D)
Output
------
ndarray(2D)
Example
------
TODO
"""
np.seterr(divide='ignore', invalid='ignore')
norm = np.array([0, 0, 1])
v1 = p2 - p1
v1Length = np.linalg.norm(v1, axis=1)
V1 = v1 / v1Length[:, np.newaxis]
V2 = np.cross(V1, norm)
return V2
def normalVector(points, onPoint=True):
"""
Compute normal vector from series of points - linestring
Parameters
----------
points : ndarray(2D)
onPoint: Computer normal vector on point instead of half the segment ahead
Output
------
ndarray(4D), [xo,yo,xn,yn,x,y]
Note
----
xo=x and yo=y if onPoint is True.
If not, it's equal to half distance between the two points
xn,yn=normal vector point
Example
-------
TODO: Might not work with open linestring
"""
n = len(points)
isClosed = np.array_equal(points[0], points[n - 1])
if isClosed:points=points[:n-1]
n=len(points)
p1 = points
newpoints = np.column_stack((np.zeros((n, 4)),points))
p2 = np.roll(p1, -1, axis=0)
if onPoint:
p0=p1
p1 = np.roll(p1, 1, axis=0)
V1 = norm(p2, p1)[:,:-1]
else:
p1 = (p1 + p2) * 0.5
p0=p1
V1 = norm(p2, p1)[:,:-1]
newpoints[:, 0] = p0[:, 0]
newpoints[:, 1] = p0[:, 1]
newpoints[:, 2] = V1[:, 0]
newpoints[:, 3] = V1[:, 1]
# Check if last and first are the same
if isClosed:
newpoints=np.append(newpoints,newpoints[0][None,:],axis=0)
return newpoints
def translate(x, y):
"""
Translate matrix for 2D points
Parameters
----------
x,y: np.float32
Output
------
ndarray(3D)
Example
------
TODO
"""
mat3 = np.zeros((3, 3))
m = np.repeat(mat3[np.newaxis, :, :], len(x), axis=0)
m[:, 0, 0] = 1.0
m[:, 1, 1] = 1.0
m[:, 2, 2] = 1.0
m[:, 0, 2] = x
m[:, 1, 2] = y
return m
def rotate(theta):
"""
Rotate matrix for 2D points
Parameters
----------
theta: rad
Output
------
ndarray(3D)
Example
------
TODO
"""
c = np.cos(theta)
s = np.sin(theta)
mat3 = np.zeros((3, 3))
m = np.repeat(mat3[np.newaxis, :, :], len(theta), axis=0)
m[:, 0, 0] = c
m[:, 0, 1] = -s
m[:, 1, 0] = s
m[:, 1, 1] = c
m[:, 2, 2] = 1.0
return m
def dot(A,B):
return A[...,0]*B[...,0]+A[...,1]*B[...,1]
def cross(A,B):
return A[...,0]*B[...,1]-A[...,1]*B[...,0] | [
"numpy.roll",
"numpy.seterr",
"numpy.cross",
"numpy.zeros",
"numpy.append",
"numpy.sin",
"numpy.linalg.norm",
"numpy.array",
"numpy.cos",
"numpy.array_equal"
] | [((216, 260), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (225, 260), True, 'import numpy as np\n'), ((270, 289), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (278, 289), True, 'import numpy as np\n'), ((318, 344), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {'axis': '(1)'}), '(v1, axis=1)\n', (332, 344), True, 'import numpy as np\n'), ((388, 406), 'numpy.cross', 'np.cross', (['V1', 'norm'], {}), '(V1, norm)\n', (396, 406), True, 'import numpy as np\n'), ((973, 1013), 'numpy.array_equal', 'np.array_equal', (['points[0]', 'points[n - 1]'], {}), '(points[0], points[n - 1])\n', (987, 1013), True, 'import numpy as np\n'), ((1145, 1168), 'numpy.roll', 'np.roll', (['p1', '(-1)'], {'axis': '(0)'}), '(p1, -1, axis=0)\n', (1152, 1168), True, 'import numpy as np\n'), ((1792, 1808), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (1800, 1808), True, 'import numpy as np\n'), ((2151, 2164), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2157, 2164), True, 'import numpy as np\n'), ((2171, 2184), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2177, 2184), True, 'import numpy as np\n'), ((2194, 2210), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (2202, 2210), True, 'import numpy as np\n'), ((1205, 1227), 'numpy.roll', 'np.roll', (['p1', '(1)'], {'axis': '(0)'}), '(p1, 1, axis=0)\n', (1212, 1227), True, 'import numpy as np\n'), ((1521, 1572), 'numpy.append', 'np.append', (['newpoints', 'newpoints[0][None, :]'], {'axis': '(0)'}), '(newpoints, newpoints[0][None, :], axis=0)\n', (1530, 1572), True, 'import numpy as np\n'), ((1112, 1128), 'numpy.zeros', 'np.zeros', (['(n, 4)'], {}), '((n, 4))\n', (1120, 1128), True, 'import numpy as np\n')] |
#Functions utils to development fast
def main():
#libs
import streamlit as st
st.sidebar.success('Selecione um item abaixo')
st.markdown("<h1 style='color:#F00;'>Tutorial de desenvolvimento rápido com Streamlit Python (tags mais úteis)</h1>", unsafe_allow_html=True)
st.markdown(
"""
O **Streamlit** 👈 é um pacote Python open-source para criação de layout fácil e com aparência bonita.
Pode ser customizado para projetos de **Inteligência Artificial** e **Data Sciente**.
**Requisitos:** Python: 3.6 - 3.8
**Instalação:** pip install streamlit
**Teste:** streamlit hello
**Execução:** streamlit run app.py
**Última atualização do tutorial:** Jan/2021
**Desenvolvido por:** [<NAME>] (https://wellingtondantas.com.br)
"""
)
def text():
#libs
import streamlit as st
st.title('Títulos, Texto, Links e Pontos')
st.markdown(
"""
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
**👈 Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
### Want to learn more?
- Google [google.com](https://google.com)
- Python [python.org](https://python.org)
- Streamlit [streamlit.io](https://www.streamlit.io)
### See more complex demos
- Use a neural net to [analyze the Udacity Self-driving Car Image
Dataset] (https://github.com/streamlit/demo-self-driving)
- Explore a [New York City rideshare dataset]
(https://github.com/streamlit/demo-uber-nyc-pickups)
- Docs [download](https://docs.streamlit.io/_/downloads/en/latest/pdf/)
"""
)
def image():
#libs
import streamlit as st
import pandas as pd
import numpy as np
from PIL import Image
st.title('Imagem')
image = Image.open('resources/img/fortaleza-city.png')
st.image(image, caption='Beautiful Fortaleza City', use_column_width=True)
st.title('Mapa')
map_data = pd.DataFrame(np.random.randn(25, 2) / [50, 50] + [-3.7381, -38.5350], columns=['lat', 'lon'])
st.map(map_data)
def dataframe():
#libs
import streamlit as st
import pandas as pd
import numpy as np
import pandas_datareader as web
st.title('Tabelas de Dataframe')
#Obtem os dados históricos
df = web.DataReader('PETR4.SA', data_source='yahoo', start='2013-01-01', end='2021-01-02')
st.write("Histórico de Preços da Ação PETR4.SA")
st.dataframe(df)
def plot():
#Libs
import streamlit as st
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import pandas_datareader as web
st.title('Plotagem de Sinal (Histograma)')
arr = np.random.normal(1, 1, size=100)
fig, ax = plt.subplots()
ax.hist(arr, bins=20)
st.pyplot(fig)
st.title('Plotagem de Sinal')
df = web.DataReader('PETR4.SA', data_source='yahoo', start='2013-01-01', end='2021-01-02')
st.line_chart(df['Close'])
def htmlcss():
#Libs
import streamlit as st
import streamlit.components.v1 as components
st.title('Inserir HTML/CSS')
st.markdown("<h1 style='color:#F00;'>H1 com cor RED</h1>", unsafe_allow_html=True)
st.markdown("<h2 style='color:#0F0;'>H2 com cor GREEN</h2>", unsafe_allow_html=True)
st.markdown("<h3 style='color:#00F;'>H3 com cor BLUE</h1>", unsafe_allow_html=True)
st.markdown("<h1 style='color:#F00; font-family:arial'>H1 com cor RED e Arial</h1>", unsafe_allow_html=True)
st.title('Inserir Iframe')
components.iframe("https://tecnothink.com.br", height=600)
st.title('HTML/CSS Puro')
components.html(
"""
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" integrity="<KEY>" crossorigin="anonymous">
<script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="<KEY>" crossorigin="anonymous"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js" integrity="<KEY>" crossorigin="anonymous"></script>
<div id="accordion">
<div class="card">
<div class="card-header" id="headingOne">
<h5 class="mb-0">
<button class="btn btn-link" data-toggle="collapse" data-target="#collapseOne" aria-expanded="true" aria-controls="collapseOne">
Item #1
</button>
</h5>
</div>
<div id="collapseOne" class="collapse show" aria-labelledby="headingOne" data-parent="#accordion">
<div class="card-body">
Item #1 Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
</div>
</div>
</div>
<div class="card">
<div class="card-header" id="headingTwo">
<h5 class="mb-0">
<button class="btn btn-link collapsed" data-toggle="collapse" data-target="#collapseTwo" aria-expanded="false" aria-controls="collapseTwo">
Item #2
</button>
</h5>
</div>
<div id="collapseTwo" class="collapse" aria-labelledby="headingTwo" data-parent="#accordion">
<div class="card-body">
Item #2 Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
</div>
</div>
</div>
</div>
""",
height=400,
)
def inputs():
#Libs
import streamlit as st
import pandas as pd
import numpy as np
import datetime
st.title('Tipos de Entradas')
info1 = st.slider("Selecione o Ano", 1920, 1995, 2021)
info2 = st.slider("Selecione o Mês", 1, 12, 6)
info3 = st.slider("Selecione o Dia", 1, 31, 15)
info4 = st.date_input("Qual a data de seu anivesário", datetime.date(2019, 7, 6))
info5 = st.text_input('Digite seu Nome')
info6 = st.number_input(label='Informe sua idade', min_value=0, max_value=120, value=20, step=1, format=None, key=None)
info7 = st.number_input(label='Informe seu Peso', min_value=0.0, max_value=200.0, value=20.0, step=0.5, format=None, key=None)
info8 = st.text_area('Mensagem de Texto', 'Lorem ipsum')
info9 = st.file_uploader("Escolha o arquivo")
if info9 is not None:
#Ler arquivo csv
dataframe = pd.read_csv(info9)
st.write(dataframe)
info10 = st.file_uploader("Escolha um arquivo CSV", accept_multiple_files=True)
for i in info10:
bytes_data = i.read()
st.write("Nome do Arquivo:", i.name)
st.write(bytes_data)
info11 = st.selectbox("Qual a sua cor favorita?", ("Verde", "Azul", "Amarelo"))
info12 = st.multiselect("Quais países você conhece?", ['EUA', 'Canadá', 'Portugal','Alemanha', 'China'])
if st.checkbox('Apresenta dataframe'):
chart_data = pd.DataFrame(np.random.randn(20, 3),columns=['a', 'b', 'c'])
st.line_chart(chart_data)
| [
"pandas_datareader.DataReader",
"streamlit.text_input",
"streamlit.image",
"streamlit.sidebar.success",
"pandas.read_csv",
"streamlit.title",
"numpy.random.normal",
"streamlit.components.v1.html",
"numpy.random.randn",
"streamlit.text_area",
"matplotlib.pyplot.subplots",
"streamlit.map",
"st... | [((92, 138), 'streamlit.sidebar.success', 'st.sidebar.success', (['"""Selecione um item abaixo"""'], {}), "('Selecione um item abaixo')\n", (110, 138), True, 'import streamlit as st\n'), ((144, 295), 'streamlit.markdown', 'st.markdown', (['"""<h1 style=\'color:#F00;\'>Tutorial de desenvolvimento rápido com Streamlit Python (tags mais úteis)</h1>"""'], {'unsafe_allow_html': '(True)'}), '(\n "<h1 style=\'color:#F00;\'>Tutorial de desenvolvimento rápido com Streamlit Python (tags mais úteis)</h1>"\n , unsafe_allow_html=True)\n', (155, 295), True, 'import streamlit as st\n'), ((291, 865), 'streamlit.markdown', 'st.markdown', (['"""\n O **Streamlit** 👈 é um pacote Python open-source para criação de layout fácil e com aparência bonita. \n\n Pode ser customizado para projetos de **Inteligência Artificial** e **Data Sciente**.\n\n **Requisitos:** Python: 3.6 - 3.8\n\n **Instalação:** pip install streamlit\n\n **Teste:** streamlit hello\n\n **Execução:** streamlit run app.py\n\n **Última atualização do tutorial:** Jan/2021\n\n **Desenvolvido por:** [<NAME>] (https://wellingtondantas.com.br)\n """'], {}), '(\n """\n O **Streamlit** 👈 é um pacote Python open-source para criação de layout fácil e com aparência bonita. \n\n Pode ser customizado para projetos de **Inteligência Artificial** e **Data Sciente**.\n\n **Requisitos:** Python: 3.6 - 3.8\n\n **Instalação:** pip install streamlit\n\n **Teste:** streamlit hello\n\n **Execução:** streamlit run app.py\n\n **Última atualização do tutorial:** Jan/2021\n\n **Desenvolvido por:** [<NAME>] (https://wellingtondantas.com.br)\n """\n )\n', (302, 865), True, 'import streamlit as st\n'), ((926, 968), 'streamlit.title', 'st.title', (['"""Títulos, Texto, Links e Pontos"""'], {}), "('Títulos, Texto, Links e Pontos')\n", (934, 968), True, 'import streamlit as st\n'), ((973, 1879), 'streamlit.markdown', 'st.markdown', (['"""\n Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\n\n **👈 Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.\n\n ### Want to learn more?\n\n - Google [google.com](https://google.com)\n - Python [python.org](https://python.org)\n - Streamlit [streamlit.io](https://www.streamlit.io)\n\n ### See more complex demos\n\n - Use a neural net to [analyze the Udacity Self-driving Car Image\n Dataset] (https://github.com/streamlit/demo-self-driving)\n - Explore a [New York City rideshare dataset]\n (https://github.com/streamlit/demo-uber-nyc-pickups)\n - Docs [download](https://docs.streamlit.io/_/downloads/en/latest/pdf/)\n """'], {}), '(\n """\n Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\n\n **👈 Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.\n\n ### Want to learn more?\n\n - Google [google.com](https://google.com)\n - Python [python.org](https://python.org)\n - Streamlit [streamlit.io](https://www.streamlit.io)\n\n ### See more complex demos\n\n - Use a neural net to [analyze the Udacity Self-driving Car Image\n Dataset] (https://github.com/streamlit/demo-self-driving)\n - Explore a [New York City rideshare dataset]\n (https://github.com/streamlit/demo-uber-nyc-pickups)\n - Docs [download](https://docs.streamlit.io/_/downloads/en/latest/pdf/)\n """\n )\n', (984, 1879), True, 'import streamlit as st\n'), ((2013, 2031), 'streamlit.title', 'st.title', (['"""Imagem"""'], {}), "('Imagem')\n", (2021, 2031), True, 'import streamlit as st\n'), ((2044, 2090), 'PIL.Image.open', 'Image.open', (['"""resources/img/fortaleza-city.png"""'], {}), "('resources/img/fortaleza-city.png')\n", (2054, 2090), False, 'from PIL import Image\n'), ((2095, 2169), 'streamlit.image', 'st.image', (['image'], {'caption': '"""Beautiful Fortaleza City"""', 'use_column_width': '(True)'}), "(image, caption='Beautiful Fortaleza City', use_column_width=True)\n", (2103, 2169), True, 'import streamlit as st\n'), ((2175, 2191), 'streamlit.title', 'st.title', (['"""Mapa"""'], {}), "('Mapa')\n", (2183, 2191), True, 'import streamlit as st\n'), ((2305, 2321), 'streamlit.map', 'st.map', (['map_data'], {}), '(map_data)\n', (2311, 2321), True, 'import streamlit as st\n'), ((2470, 2502), 'streamlit.title', 'st.title', (['"""Tabelas de Dataframe"""'], {}), "('Tabelas de Dataframe')\n", (2478, 2502), True, 'import streamlit as st\n'), ((2543, 2633), 'pandas_datareader.DataReader', 'web.DataReader', (['"""PETR4.SA"""'], {'data_source': '"""yahoo"""', 'start': '"""2013-01-01"""', 'end': '"""2021-01-02"""'}), "('PETR4.SA', data_source='yahoo', start='2013-01-01', end=\n '2021-01-02')\n", (2557, 2633), True, 'import pandas_datareader as web\n'), ((2634, 2682), 'streamlit.write', 'st.write', (['"""Histórico de Preços da Ação PETR4.SA"""'], {}), "('Histórico de Preços da Ação PETR4.SA')\n", (2642, 2682), True, 'import streamlit as st\n'), ((2688, 2704), 'streamlit.dataframe', 'st.dataframe', (['df'], {}), '(df)\n', (2700, 2704), True, 'import streamlit as st\n'), ((2881, 2923), 'streamlit.title', 'st.title', (['"""Plotagem de Sinal (Histograma)"""'], {}), "('Plotagem de Sinal (Histograma)')\n", (2889, 2923), True, 'import streamlit as st\n'), ((2934, 2966), 'numpy.random.normal', 'np.random.normal', (['(1)', '(1)'], {'size': '(100)'}), '(1, 1, size=100)\n', (2950, 2966), True, 'import numpy as np\n'), ((2981, 2995), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2993, 2995), True, 'import matplotlib.pyplot as plt\n'), ((3026, 3040), 'streamlit.pyplot', 'st.pyplot', (['fig'], {}), '(fig)\n', (3035, 3040), True, 'import streamlit as st\n'), ((3046, 3075), 'streamlit.title', 'st.title', (['"""Plotagem de Sinal"""'], {}), "('Plotagem de Sinal')\n", (3054, 3075), True, 'import streamlit as st\n'), ((3085, 3175), 'pandas_datareader.DataReader', 'web.DataReader', (['"""PETR4.SA"""'], {'data_source': '"""yahoo"""', 'start': '"""2013-01-01"""', 'end': '"""2021-01-02"""'}), "('PETR4.SA', data_source='yahoo', start='2013-01-01', end=\n '2021-01-02')\n", (3099, 3175), True, 'import pandas_datareader as web\n'), ((3176, 3202), 'streamlit.line_chart', 'st.line_chart', (["df['Close']"], {}), "(df['Close'])\n", (3189, 3202), True, 'import streamlit as st\n'), ((3310, 3338), 'streamlit.title', 'st.title', (['"""Inserir HTML/CSS"""'], {}), "('Inserir HTML/CSS')\n", (3318, 3338), True, 'import streamlit as st\n'), ((3344, 3430), 'streamlit.markdown', 'st.markdown', (['"""<h1 style=\'color:#F00;\'>H1 com cor RED</h1>"""'], {'unsafe_allow_html': '(True)'}), '("<h1 style=\'color:#F00;\'>H1 com cor RED</h1>",\n unsafe_allow_html=True)\n', (3355, 3430), True, 'import streamlit as st\n'), ((3431, 3519), 'streamlit.markdown', 'st.markdown', (['"""<h2 style=\'color:#0F0;\'>H2 com cor GREEN</h2>"""'], {'unsafe_allow_html': '(True)'}), '("<h2 style=\'color:#0F0;\'>H2 com cor GREEN</h2>",\n unsafe_allow_html=True)\n', (3442, 3519), True, 'import streamlit as st\n'), ((3520, 3607), 'streamlit.markdown', 'st.markdown', (['"""<h3 style=\'color:#00F;\'>H3 com cor BLUE</h1>"""'], {'unsafe_allow_html': '(True)'}), '("<h3 style=\'color:#00F;\'>H3 com cor BLUE</h1>",\n unsafe_allow_html=True)\n', (3531, 3607), True, 'import streamlit as st\n'), ((3608, 3725), 'streamlit.markdown', 'st.markdown', (['"""<h1 style=\'color:#F00; font-family:arial\'>H1 com cor RED e Arial</h1>"""'], {'unsafe_allow_html': '(True)'}), '(\n "<h1 style=\'color:#F00; font-family:arial\'>H1 com cor RED e Arial</h1>",\n unsafe_allow_html=True)\n', (3619, 3725), True, 'import streamlit as st\n'), ((3722, 3748), 'streamlit.title', 'st.title', (['"""Inserir Iframe"""'], {}), "('Inserir Iframe')\n", (3730, 3748), True, 'import streamlit as st\n'), ((3753, 3811), 'streamlit.components.v1.iframe', 'components.iframe', (['"""https://tecnothink.com.br"""'], {'height': '(600)'}), "('https://tecnothink.com.br', height=600)\n", (3770, 3811), True, 'import streamlit.components.v1 as components\n'), ((3818, 3843), 'streamlit.title', 'st.title', (['"""HTML/CSS Puro"""'], {}), "('HTML/CSS Puro')\n", (3826, 3843), True, 'import streamlit as st\n'), ((3848, 5931), 'streamlit.components.v1.html', 'components.html', (['"""\n <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" integrity="<KEY>" crossorigin="anonymous">\n <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="<KEY>" crossorigin="anonymous"></script>\n <script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js" integrity="<KEY>" crossorigin="anonymous"></script>\n <div id="accordion">\n <div class="card">\n <div class="card-header" id="headingOne">\n <h5 class="mb-0">\n <button class="btn btn-link" data-toggle="collapse" data-target="#collapseOne" aria-expanded="true" aria-controls="collapseOne">\n Item #1\n </button>\n </h5>\n </div>\n <div id="collapseOne" class="collapse show" aria-labelledby="headingOne" data-parent="#accordion">\n <div class="card-body">\n Item #1 Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\n </div>\n </div>\n </div>\n <div class="card">\n <div class="card-header" id="headingTwo">\n <h5 class="mb-0">\n <button class="btn btn-link collapsed" data-toggle="collapse" data-target="#collapseTwo" aria-expanded="false" aria-controls="collapseTwo">\n Item #2\n </button>\n </h5>\n </div>\n <div id="collapseTwo" class="collapse" aria-labelledby="headingTwo" data-parent="#accordion">\n <div class="card-body">\n Item #2 Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. \n </div>\n </div>\n </div>\n </div>\n """'], {'height': '(400)'}), '(\n """\n <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" integrity="<KEY>" crossorigin="anonymous">\n <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="<KEY>" crossorigin="anonymous"></script>\n <script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js" integrity="<KEY>" crossorigin="anonymous"></script>\n <div id="accordion">\n <div class="card">\n <div class="card-header" id="headingOne">\n <h5 class="mb-0">\n <button class="btn btn-link" data-toggle="collapse" data-target="#collapseOne" aria-expanded="true" aria-controls="collapseOne">\n Item #1\n </button>\n </h5>\n </div>\n <div id="collapseOne" class="collapse show" aria-labelledby="headingOne" data-parent="#accordion">\n <div class="card-body">\n Item #1 Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\n </div>\n </div>\n </div>\n <div class="card">\n <div class="card-header" id="headingTwo">\n <h5 class="mb-0">\n <button class="btn btn-link collapsed" data-toggle="collapse" data-target="#collapseTwo" aria-expanded="false" aria-controls="collapseTwo">\n Item #2\n </button>\n </h5>\n </div>\n <div id="collapseTwo" class="collapse" aria-labelledby="headingTwo" data-parent="#accordion">\n <div class="card-body">\n Item #2 Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. \n </div>\n </div>\n </div>\n </div>\n """\n , height=400)\n', (3863, 5931), True, 'import streamlit.components.v1 as components\n'), ((6075, 6104), 'streamlit.title', 'st.title', (['"""Tipos de Entradas"""'], {}), "('Tipos de Entradas')\n", (6083, 6104), True, 'import streamlit as st\n'), ((6118, 6164), 'streamlit.slider', 'st.slider', (['"""Selecione o Ano"""', '(1920)', '(1995)', '(2021)'], {}), "('Selecione o Ano', 1920, 1995, 2021)\n", (6127, 6164), True, 'import streamlit as st\n'), ((6177, 6215), 'streamlit.slider', 'st.slider', (['"""Selecione o Mês"""', '(1)', '(12)', '(6)'], {}), "('Selecione o Mês', 1, 12, 6)\n", (6186, 6215), True, 'import streamlit as st\n'), ((6228, 6267), 'streamlit.slider', 'st.slider', (['"""Selecione o Dia"""', '(1)', '(31)', '(15)'], {}), "('Selecione o Dia', 1, 31, 15)\n", (6237, 6267), True, 'import streamlit as st\n'), ((6368, 6400), 'streamlit.text_input', 'st.text_input', (['"""Digite seu Nome"""'], {}), "('Digite seu Nome')\n", (6381, 6400), True, 'import streamlit as st\n'), ((6414, 6529), 'streamlit.number_input', 'st.number_input', ([], {'label': '"""Informe sua idade"""', 'min_value': '(0)', 'max_value': '(120)', 'value': '(20)', 'step': '(1)', 'format': 'None', 'key': 'None'}), "(label='Informe sua idade', min_value=0, max_value=120,\n value=20, step=1, format=None, key=None)\n", (6429, 6529), True, 'import streamlit as st\n'), ((6538, 6660), 'streamlit.number_input', 'st.number_input', ([], {'label': '"""Informe seu Peso"""', 'min_value': '(0.0)', 'max_value': '(200.0)', 'value': '(20.0)', 'step': '(0.5)', 'format': 'None', 'key': 'None'}), "(label='Informe seu Peso', min_value=0.0, max_value=200.0,\n value=20.0, step=0.5, format=None, key=None)\n", (6553, 6660), True, 'import streamlit as st\n'), ((6670, 6718), 'streamlit.text_area', 'st.text_area', (['"""Mensagem de Texto"""', '"""Lorem ipsum"""'], {}), "('Mensagem de Texto', 'Lorem ipsum')\n", (6682, 6718), True, 'import streamlit as st\n'), ((6732, 6769), 'streamlit.file_uploader', 'st.file_uploader', (['"""Escolha o arquivo"""'], {}), "('Escolha o arquivo')\n", (6748, 6769), True, 'import streamlit as st\n'), ((6902, 6972), 'streamlit.file_uploader', 'st.file_uploader', (['"""Escolha um arquivo CSV"""'], {'accept_multiple_files': '(True)'}), "('Escolha um arquivo CSV', accept_multiple_files=True)\n", (6918, 6972), True, 'import streamlit as st\n'), ((7113, 7183), 'streamlit.selectbox', 'st.selectbox', (['"""Qual a sua cor favorita?"""', "('Verde', 'Azul', 'Amarelo')"], {}), "('Qual a sua cor favorita?', ('Verde', 'Azul', 'Amarelo'))\n", (7125, 7183), True, 'import streamlit as st\n'), ((7198, 7298), 'streamlit.multiselect', 'st.multiselect', (['"""Quais países você conhece?"""', "['EUA', 'Canadá', 'Portugal', 'Alemanha', 'China']"], {}), "('Quais países você conhece?', ['EUA', 'Canadá', 'Portugal',\n 'Alemanha', 'China'])\n", (7212, 7298), True, 'import streamlit as st\n'), ((7303, 7337), 'streamlit.checkbox', 'st.checkbox', (['"""Apresenta dataframe"""'], {}), "('Apresenta dataframe')\n", (7314, 7337), True, 'import streamlit as st\n'), ((6329, 6354), 'datetime.date', 'datetime.date', (['(2019)', '(7)', '(6)'], {}), '(2019, 7, 6)\n', (6342, 6354), False, 'import datetime\n'), ((6841, 6859), 'pandas.read_csv', 'pd.read_csv', (['info9'], {}), '(info9)\n', (6852, 6859), True, 'import pandas as pd\n'), ((6868, 6887), 'streamlit.write', 'st.write', (['dataframe'], {}), '(dataframe)\n', (6876, 6887), True, 'import streamlit as st\n'), ((7032, 7068), 'streamlit.write', 'st.write', (['"""Nome do Arquivo:"""', 'i.name'], {}), "('Nome do Arquivo:', i.name)\n", (7040, 7068), True, 'import streamlit as st\n'), ((7077, 7097), 'streamlit.write', 'st.write', (['bytes_data'], {}), '(bytes_data)\n', (7085, 7097), True, 'import streamlit as st\n'), ((7429, 7454), 'streamlit.line_chart', 'st.line_chart', (['chart_data'], {}), '(chart_data)\n', (7442, 7454), True, 'import streamlit as st\n'), ((7373, 7395), 'numpy.random.randn', 'np.random.randn', (['(20)', '(3)'], {}), '(20, 3)\n', (7388, 7395), True, 'import numpy as np\n'), ((2220, 2242), 'numpy.random.randn', 'np.random.randn', (['(25)', '(2)'], {}), '(25, 2)\n', (2235, 2242), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import numpy as np
import random
import sys
sys.path.append('./backbones/asrf')
from libs.postprocess import PostProcessor
def refiner_train(cfg, dataset, train_loader, model, backbones, backbone_names, optimizer, epoch, split_dict, device):
normal_ce = nn.CrossEntropyLoss()
total_loss = 0.0
for idx, sample in enumerate(train_loader):
model.train()
x = sample['feature']
t = sample['label']
split_idx = 0
for i in range(eval('cfg.num_splits["{}"]'.format(dataset))):
if sample['feature_path'][0].split('/')[-1].split('.')[0] in split_dict[i+1]:
split_idx = i+1
break
bb_key = random.choice(backbone_names)
curr_backbone = backbones[bb_key][split_idx]
curr_backbone.load_state_dict(torch.load('{}/{}/{}/split_{}/epoch-{}.model'.format(cfg.model_root,
bb_key,
dataset,
str(i+1),
np.random.randint(10, 51))))
curr_backbone.to(device)
curr_backbone.eval()
x, t = x.to(device), t.to(device)
B, L, D = x.shape
if bb_key == 'mstcn':
mask = torch.ones(x.size(), device=device)
action_pred = curr_backbone(x, mask)
action_idx = torch.argmax(action_pred[-1], dim=1).squeeze().detach()
elif bb_key == 'mgru':
action_pred = curr_backbone(x)
action_idx = torch.argmax(action_pred, dim=1).squeeze().detach()
elif bb_key == 'sstda':
mask = torch.ones(x.size(), device=device)
action_pred, _, _, _, _, _, _, _, _, _, _, _, _, _ = curr_backbone(x,
x,
mask,
mask,
[0, 0],
reverse=False)
action_idx = torch.argmax(action_pred[:, -1, :, :], dim=1).squeeze().detach()
elif bb_key == 'asrf':
out_cls, out_bound = curr_backbone(x)
postprocessor = PostProcessor("refinement_with_boundary", cfg.boundary_th)
refined_output_cls = postprocessor(out_cls.cpu().data.numpy(), boundaries=out_bound.cpu().data.numpy(),
masks=torch.ones(1, 1, x.shape[-1]).bool().data.numpy())
action_idx = torch.Tensor(refined_output_cls).squeeze().detach()
refine_pred, refine_rollout, GTlabel_list = model(action_idx.to(device), x, t)
loss = 0.0
loss += normal_ce(refine_pred[0], GTlabel_list.view(-1))
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss / len(train_loader)
return total_loss.item() | [
"sys.path.append",
"torch.ones",
"torch.argmax",
"torch.nn.CrossEntropyLoss",
"random.choice",
"libs.postprocess.PostProcessor",
"numpy.random.randint",
"torch.Tensor"
] | [((79, 114), 'sys.path.append', 'sys.path.append', (['"""./backbones/asrf"""'], {}), "('./backbones/asrf')\n", (94, 114), False, 'import sys\n'), ((294, 315), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (313, 315), True, 'import torch.nn as nn\n'), ((744, 773), 'random.choice', 'random.choice', (['backbone_names'], {}), '(backbone_names)\n', (757, 773), False, 'import random\n'), ((1325, 1350), 'numpy.random.randint', 'np.random.randint', (['(10)', '(51)'], {}), '(10, 51)\n', (1342, 1350), True, 'import numpy as np\n'), ((2710, 2768), 'libs.postprocess.PostProcessor', 'PostProcessor', (['"""refinement_with_boundary"""', 'cfg.boundary_th'], {}), "('refinement_with_boundary', cfg.boundary_th)\n", (2723, 2768), False, 'from libs.postprocess import PostProcessor\n'), ((1662, 1698), 'torch.argmax', 'torch.argmax', (['action_pred[-1]'], {'dim': '(1)'}), '(action_pred[-1], dim=1)\n', (1674, 1698), False, 'import torch\n'), ((1826, 1858), 'torch.argmax', 'torch.argmax', (['action_pred'], {'dim': '(1)'}), '(action_pred, dim=1)\n', (1838, 1858), False, 'import torch\n'), ((2523, 2568), 'torch.argmax', 'torch.argmax', (['action_pred[:, -1, :, :]'], {'dim': '(1)'}), '(action_pred[:, -1, :, :], dim=1)\n', (2535, 2568), False, 'import torch\n'), ((3015, 3047), 'torch.Tensor', 'torch.Tensor', (['refined_output_cls'], {}), '(refined_output_cls)\n', (3027, 3047), False, 'import torch\n'), ((2939, 2968), 'torch.ones', 'torch.ones', (['(1)', '(1)', 'x.shape[-1]'], {}), '(1, 1, x.shape[-1])\n', (2949, 2968), False, 'import torch\n')] |
import dace
import numpy as np
N = dace.symbol('N')
@dace.program
def dace_softmax(X_in: dace.float32[N], X_out: dace.float32[N]):
tmp_max = dace.reduce(lambda a, b: a + b, X_in, identity=0)
X_out[:] = exp(X_in - tmp_max)
tmp_sum = dace.reduce(lambda a, b: max(a, b), X_in)
X_out[:] /= tmp_sum
@dace.program
def nested_call_subarray(a: dace.float32[2], b: dace.float32[2]):
dace_softmax(a[:], b[:])
if __name__ == '__main__':
A = np.array([1, 2], dtype=np.float32)
B = np.array([1, 2], dtype=np.float32)
nested_call_subarray(A, B, N=2)
| [
"numpy.array",
"dace.reduce",
"dace.symbol"
] | [((36, 52), 'dace.symbol', 'dace.symbol', (['"""N"""'], {}), "('N')\n", (47, 52), False, 'import dace\n'), ((148, 197), 'dace.reduce', 'dace.reduce', (['(lambda a, b: a + b)', 'X_in'], {'identity': '(0)'}), '(lambda a, b: a + b, X_in, identity=0)\n', (159, 197), False, 'import dace\n'), ((461, 495), 'numpy.array', 'np.array', (['[1, 2]'], {'dtype': 'np.float32'}), '([1, 2], dtype=np.float32)\n', (469, 495), True, 'import numpy as np\n'), ((504, 538), 'numpy.array', 'np.array', (['[1, 2]'], {'dtype': 'np.float32'}), '([1, 2], dtype=np.float32)\n', (512, 538), True, 'import numpy as np\n')] |
import os, time, glob, argparse
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tnn import main
from tnn.reciprocalgaternn import tnn_ReciprocalGateCell
from tnn.convrnn import tnn_ConvBasicCell
'''This is an example of passing a custom cell to your model,
in this case a vanilla convRNN implemented from scratch,
which can serve as a template for more complex custom cells'''
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', default=256, type=int)
parser.add_argument('--gpus', default=['1'], nargs='*')
parser.add_argument('--ntimes', default=5, type=int)
parser.add_argument('--nsteps', default=int(1e5), type=lambda x: int(float(x)))
FLAGS, _ = parser.parse_known_args()
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(FLAGS.gpus)
batch_size = FLAGS.batch_size
NUM_TIMESTEPS = 4 # number of timesteps we are predicting on
NETWORK_DEPTH = 5 # number of total layers in our network
DATA_PATH = 'datasets/' # path where MNIST data will be automatically downloaded to
# we always unroll num_timesteps after the first output of the model
TOTAL_TIMESTEPS = NETWORK_DEPTH + NUM_TIMESTEPS
# we unroll at least NETWORK_DEPTH times (3 in this case) so that the input can reach the output of the network
# note tau is the value of the memory decay (by default 0) at the readout layer and trainable_flag is whether the memory decay is trainable, which by default is False
BASE_NAME = '../json/5L_mnist28_recip345sig_noBN'
# BASE_NAME = '../json/5L_mnist28_recip345sig'
#BASE_NAME = '../json/VanillaRNN'
def model_func(input_images, ntimes=TOTAL_TIMESTEPS,
batch_size=batch_size, edges_arr=[],
base_name=BASE_NAME,
tau=0.0, trainable_flag=False):
with tf.variable_scope("my_model"):
# reshape the 784 dimension MNIST digits to be 28x28 images
input_images = tf.reshape(input_images, [-1, 28, 28, 1])
base_name += '.json'
print('Using base: ', base_name)
# creates the feedforward network graph from json
G = main.graph_from_json(base_name)
for node, attr in G.nodes(data=True):
memory_func, memory_param = attr['kwargs']['memory']
if 'cell_depth' in memory_param:
# if 'out_depth' in memory_param:
# this is where you add your custom cell
# attr['cell'] = tnn_ConvBasicCell
attr['cell'] = tnn_ReciprocalGateCell
else:
# default to not having a memory cell
# tau = 0.0, trainable = False
attr['kwargs']['memory'][1]['memory_decay'] = tau
attr['kwargs']['memory'][1]['trainable'] = trainable_flag
# add any non feedforward connections here: e.g. [('L2', 'L1')]
G.add_edges_from(edges_arr)
# initialize network to infer the shapes of all the parameters
main.init_nodes(G, input_nodes=['L1'], batch_size=batch_size)
# unroll the network through time
main.unroll(G, input_seq={'L1': input_images}, ntimes=ntimes)
outputs = {}
# start from the final output of the model and 4 timesteps beyond that
for t in range(ntimes-NUM_TIMESTEPS, ntimes):
idx = t - (ntimes - NUM_TIMESTEPS) # keys start at timepoint 0
outputs[idx] = G.node['readout']['outputs'][t]
return outputs
def train(restore=True):
# get MNIST images
mnist = input_data.read_data_sets(DATA_PATH, one_hot=False)
# create the model
x = tf.placeholder(tf.float32, [batch_size, 784])
y_ = tf.placeholder(tf.int64, [batch_size]) # predicting 10 outputs
outputs = model_func(x, ntimes=TOTAL_TIMESTEPS,
batch_size=batch_size, edges_arr=[],
base_name=BASE_NAME, tau=0.0, trainable_flag=False)
# setup the loss (average across time, the cross entropy loss at each timepoint
# between model predictions and labels)
with tf.name_scope('cumulative_loss'):
outputs_arr = [tf.squeeze(outputs[i]) for i in range(len(outputs))]
cumm_loss = tf.add_n([tf.losses.sparse_softmax_cross_entropy(logits=outputs_arr[i], labels=y_) \
for i in range(len(outputs))]) / len(outputs)
# setup the optimizer
with tf.name_scope('adam_optimizer'):
train_step = tf.train.AdamOptimizer(1e-4).minimize(cumm_loss)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
#if restore:
# saver.restore(sess, save_path='./ckpts/model.ckpt')
for i in range(20000):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
if i % 100 == 0:
train_loss = cumm_loss.eval(feed_dict={x: batch_xs, y_: batch_ys})
print('step %d, training loss %g' % (i, train_loss))
saver.save(sess, './ckpts/model.ckpt', global_step=i)
train_step.run(feed_dict={x: batch_xs, y_: batch_ys})
def get_features(ims, layer='imnetds'):
# create the model
x = tf.placeholder(tf.float32, [batch_size, 784])
y_ = tf.placeholder(tf.int64, [batch_size]) # predicting 10 outputs
outputs = model_func(x, ntimes=TOTAL_TIMESTEPS,
batch_size=batch_size, edges_arr=[],
base_name=BASE_NAME, tau=0.0, trainable_flag=False)
# placeholder = tf.placeholder(shape=(None, ims[0].shape[0], ims[0].shape[1], 3), dtype=tf.float32)
# basenet(placeholder, conv_only=True)
op = tf.get_default_graph().get_operations()
print('mark',[m.name for m in op if 'output' in m.name])
target = tf.get_default_graph().get_tensor_by_name('my_model/{}_8/output:0'.format(layer))
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, save_path='./ckpts/model.ckpt')
n_batches = (len(ims) - 1) // batch_size + 1
out = []
for i in range(n_batches):
batch = ims[batch_size * i: batch_size * (i + 1)]
batch_out = sess.run(target, feed_dict={x: batch})
out.append(batch_out)
out = np.row_stack(out)
return out
if __name__ == '__main__':
#ims = np.random.random([batch_size,784])
#out = get_features(ims)
#print(out)
#print(out.shape)
train()
| [
"tnn.main.unroll",
"tnn.main.init_nodes",
"argparse.ArgumentParser",
"tensorflow.train.Saver",
"tensorflow.losses.sparse_softmax_cross_entropy",
"tensorflow.global_variables_initializer",
"tensorflow.reshape",
"tensorflow.Session",
"tensorflow.variable_scope",
"tensorflow.get_default_graph",
"te... | [((449, 474), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (472, 474), False, 'import os, time, glob, argparse\n'), ((3450, 3501), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['DATA_PATH'], {'one_hot': '(False)'}), '(DATA_PATH, one_hot=False)\n', (3475, 3501), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((3538, 3583), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[batch_size, 784]'], {}), '(tf.float32, [batch_size, 784])\n', (3552, 3583), True, 'import tensorflow as tf\n'), ((3598, 3636), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64', '[batch_size]'], {}), '(tf.int64, [batch_size])\n', (3612, 3636), True, 'import tensorflow as tf\n'), ((4401, 4417), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (4415, 4417), True, 'import tensorflow as tf\n'), ((5076, 5121), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[batch_size, 784]'], {}), '(tf.float32, [batch_size, 784])\n', (5090, 5121), True, 'import tensorflow as tf\n'), ((5136, 5174), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64', '[batch_size]'], {}), '(tf.int64, [batch_size])\n', (5150, 5174), True, 'import tensorflow as tf\n'), ((5737, 5753), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5751, 5753), True, 'import tensorflow as tf\n'), ((1752, 1781), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""my_model"""'], {}), "('my_model')\n", (1769, 1781), True, 'import tensorflow as tf\n'), ((1874, 1915), 'tensorflow.reshape', 'tf.reshape', (['input_images', '[-1, 28, 28, 1]'], {}), '(input_images, [-1, 28, 28, 1])\n', (1884, 1915), True, 'import tensorflow as tf\n'), ((2056, 2087), 'tnn.main.graph_from_json', 'main.graph_from_json', (['base_name'], {}), '(base_name)\n', (2076, 2087), False, 'from tnn import main\n'), ((2901, 2962), 'tnn.main.init_nodes', 'main.init_nodes', (['G'], {'input_nodes': "['L1']", 'batch_size': 'batch_size'}), "(G, input_nodes=['L1'], batch_size=batch_size)\n", (2916, 2962), False, 'from tnn import main\n'), ((3013, 3074), 'tnn.main.unroll', 'main.unroll', (['G'], {'input_seq': "{'L1': input_images}", 'ntimes': 'ntimes'}), "(G, input_seq={'L1': input_images}, ntimes=ntimes)\n", (3024, 3074), False, 'from tnn import main\n'), ((3968, 4000), 'tensorflow.name_scope', 'tf.name_scope', (['"""cumulative_loss"""'], {}), "('cumulative_loss')\n", (3981, 4000), True, 'import tensorflow as tf\n'), ((4281, 4312), 'tensorflow.name_scope', 'tf.name_scope', (['"""adam_optimizer"""'], {}), "('adam_optimizer')\n", (4294, 4312), True, 'import tensorflow as tf\n'), ((4427, 4439), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4437, 4439), True, 'import tensorflow as tf\n'), ((5763, 5775), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5773, 5775), True, 'import tensorflow as tf\n'), ((6176, 6193), 'numpy.row_stack', 'np.row_stack', (['out'], {}), '(out)\n', (6188, 6193), True, 'import numpy as np\n'), ((4025, 4047), 'tensorflow.squeeze', 'tf.squeeze', (['outputs[i]'], {}), '(outputs[i])\n', (4035, 4047), True, 'import tensorflow as tf\n'), ((4466, 4499), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4497, 4499), True, 'import tensorflow as tf\n'), ((5529, 5551), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (5549, 5551), True, 'import tensorflow as tf\n'), ((5643, 5665), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (5663, 5665), True, 'import tensorflow as tf\n'), ((5802, 5835), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5833, 5835), True, 'import tensorflow as tf\n'), ((4335, 4365), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.0001)'], {}), '(0.0001)\n', (4357, 4365), True, 'import tensorflow as tf\n'), ((4108, 4180), 'tensorflow.losses.sparse_softmax_cross_entropy', 'tf.losses.sparse_softmax_cross_entropy', ([], {'logits': 'outputs_arr[i]', 'labels': 'y_'}), '(logits=outputs_arr[i], labels=y_)\n', (4146, 4180), True, 'import tensorflow as tf\n')] |
import tensorflow as tf
import numpy as np
from tensorflow.contrib.slim.nets.inception import inception_v3
import tensorflow.contrib.eager as tfe
session = tf.InteractiveSession()
batch_size = 32
def get_inception_score(images, batch_size, splits=10):
"""
the function is to calculate the inception score of the generated images
image is a numpy array with values should be in the range[0, 255]
images 299x299x3
"""
assert(type(images) == np.ndarray)
inception_model = inception_v3
inception_model.eval()
def get_softmax(x):
x = inception_model(x)
return tf.nn.softmax(x)
n = len(images) // batch_size
preds = np.zeros([len(images), 1000], dtype=np.float32)
tfe.enable_egaer_execution()
dataloader = tf.data.Dataset.from_tensor_slices(images)
dataloader = data.batch(batch_size)
for i, batch in enumerate(tfe.Iterator(dataloader), 0):
batch_x = tf.Variable(batch) # images
# softmax
preds[i * batch_size:(i + 1) * batch_size] = get_softmax(batch_x)
scores = []
# IS score
for i in range(splits):
part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
return np.mean(scores), np.std(scores)
if __name__ == '__main__':
score = get_inception_score(images)
print(score)
####the bull shit | [
"tensorflow.nn.softmax",
"numpy.sum",
"tensorflow.contrib.eager.enable_egaer_execution",
"numpy.log",
"numpy.std",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.contrib.eager.Iterator",
"tensorflow.Variable",
"numpy.mean",
"numpy.exp",
"tensorflow.InteractiveSession"
] | [((157, 180), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (178, 180), True, 'import tensorflow as tf\n'), ((730, 758), 'tensorflow.contrib.eager.enable_egaer_execution', 'tfe.enable_egaer_execution', ([], {}), '()\n', (756, 758), True, 'import tensorflow.contrib.eager as tfe\n'), ((776, 818), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['images'], {}), '(images)\n', (810, 818), True, 'import tensorflow as tf\n'), ((613, 629), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['x'], {}), '(x)\n', (626, 629), True, 'import tensorflow as tf\n'), ((889, 913), 'tensorflow.contrib.eager.Iterator', 'tfe.Iterator', (['dataloader'], {}), '(dataloader)\n', (901, 913), True, 'import tensorflow.contrib.eager as tfe\n'), ((937, 955), 'tensorflow.Variable', 'tf.Variable', (['batch'], {}), '(batch)\n', (948, 955), True, 'import tensorflow as tf\n'), ((1373, 1388), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (1380, 1388), True, 'import numpy as np\n'), ((1390, 1404), 'numpy.std', 'np.std', (['scores'], {}), '(scores)\n', (1396, 1404), True, 'import numpy as np\n'), ((1312, 1325), 'numpy.sum', 'np.sum', (['kl', '(1)'], {}), '(kl, 1)\n', (1318, 1325), True, 'import numpy as np\n'), ((1349, 1359), 'numpy.exp', 'np.exp', (['kl'], {}), '(kl)\n', (1355, 1359), True, 'import numpy as np\n'), ((1231, 1243), 'numpy.log', 'np.log', (['part'], {}), '(part)\n', (1237, 1243), True, 'import numpy as np\n'), ((1268, 1284), 'numpy.mean', 'np.mean', (['part', '(0)'], {}), '(part, 0)\n', (1275, 1284), True, 'import numpy as np\n')] |
from argparse import ArgumentParser
import zmq
from sklearn.externals import joblib
from feature_generation import feature_generation
import numpy as np
parser = ArgumentParser()
parser.add_argument('model')
parser.add_argument('-p', '--port', type=int, default=5000)
context = zmq.Context()
socket = context.socket(zmq.REP)
def main():
args = parser.parse_args()
socket.bind('tcp://0.0.0.0:{}'.format(args.port))
model = joblib.load(args.model)
while True:
data = socket.recv_pyobj()
socket.send_string('ok')
features = feature_generation(data)
X = np.array([[features[f] for f in model.features]])
pred = model.predict(X)
print(model.labels[pred[0]])
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"feature_generation.feature_generation",
"numpy.array",
"sklearn.externals.joblib.load",
"zmq.Context"
] | [((163, 179), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (177, 179), False, 'from argparse import ArgumentParser\n'), ((280, 293), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (291, 293), False, 'import zmq\n'), ((439, 462), 'sklearn.externals.joblib.load', 'joblib.load', (['args.model'], {}), '(args.model)\n', (450, 462), False, 'from sklearn.externals import joblib\n'), ((568, 592), 'feature_generation.feature_generation', 'feature_generation', (['data'], {}), '(data)\n', (586, 592), False, 'from feature_generation import feature_generation\n'), ((606, 655), 'numpy.array', 'np.array', (['[[features[f] for f in model.features]]'], {}), '([[features[f] for f in model.features]])\n', (614, 655), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# my data set
data_prefix = 'data/'
data_list = ['2018_02_25.csv', '2018_02_26.csv', '2018_02_27.csv',
'2018_03_03.csv', '2018_03_10.csv']
for ind, data in enumerate(data_list):
dataset = pd.read_csv(data_prefix+data)
for i in range(2):
j = i*3
f = dataset.values[:, j]
n = len(f)
zr = dataset.values[:, j+1]
zj = dataset.values[:, j+2]
# sort the zr zj and f values
f_ind = np.argsort(f)
f = f[f_ind]
zr = zr[f_ind]
zj = zj[f_ind]
# remove nans in zr and zj experimental data
inds = np.where(np.isnan(np.log10(zj)))
zj = np.delete(zj, inds)
zr = np.delete(zr, inds)
f = np.delete(f, inds)
inds = np.where(np.isnan(np.log10(zr)))
zj = np.delete(zj, inds)
zr = np.delete(zr, inds)
f = np.delete(f, inds)
n = len(f)
# calculate magnitude
mag = np.sqrt(zr**2 + zj**2)
# plt.figure()
# plt.loglog(f, 1./mag, '.-')
# plt.show()
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(11, 7))
ax1.loglog(f, 1./zj, '.-')
ax1.set_xlabel('Frequency')
ax1.set_ylabel(r'$\frac{1}{|Z|_j}$')
ax2.loglog(f, 1./zr, '.-')
ax2.set_xlabel('Frequency')
ax2.set_ylabel(r'$\frac{1}{|Z|_r}$')
ax3.loglog(f, 1./mag, '.-')
ax3.set_xlabel('Frequency')
ax3.set_ylabel(r'$\frac{1}{|Z|}$')
ax4.loglog(zj, zr, 'xk')
ax4.set_xlabel('$Z_r$')
ax4.set_ylabel(r'$-Z_j$')
fig.show() | [
"pandas.read_csv",
"numpy.argsort",
"numpy.log10",
"matplotlib.pyplot.subplots",
"numpy.delete",
"numpy.sqrt"
] | [((277, 308), 'pandas.read_csv', 'pd.read_csv', (['(data_prefix + data)'], {}), '(data_prefix + data)\n', (288, 308), True, 'import pandas as pd\n'), ((533, 546), 'numpy.argsort', 'np.argsort', (['f'], {}), '(f)\n', (543, 546), True, 'import numpy as np\n'), ((729, 748), 'numpy.delete', 'np.delete', (['zj', 'inds'], {}), '(zj, inds)\n', (738, 748), True, 'import numpy as np\n'), ((762, 781), 'numpy.delete', 'np.delete', (['zr', 'inds'], {}), '(zr, inds)\n', (771, 781), True, 'import numpy as np\n'), ((794, 812), 'numpy.delete', 'np.delete', (['f', 'inds'], {}), '(f, inds)\n', (803, 812), True, 'import numpy as np\n'), ((874, 893), 'numpy.delete', 'np.delete', (['zj', 'inds'], {}), '(zj, inds)\n', (883, 893), True, 'import numpy as np\n'), ((907, 926), 'numpy.delete', 'np.delete', (['zr', 'inds'], {}), '(zr, inds)\n', (916, 926), True, 'import numpy as np\n'), ((939, 957), 'numpy.delete', 'np.delete', (['f', 'inds'], {}), '(f, inds)\n', (948, 957), True, 'import numpy as np\n'), ((1022, 1048), 'numpy.sqrt', 'np.sqrt', (['(zr ** 2 + zj ** 2)'], {}), '(zr ** 2 + zj ** 2)\n', (1029, 1048), True, 'import numpy as np\n'), ((1145, 1180), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(11, 7)'}), '(2, 2, figsize=(11, 7))\n', (1157, 1180), True, 'import matplotlib.pyplot as plt\n'), ((701, 713), 'numpy.log10', 'np.log10', (['zj'], {}), '(zj)\n', (709, 713), True, 'import numpy as np\n'), ((846, 858), 'numpy.log10', 'np.log10', (['zr'], {}), '(zr)\n', (854, 858), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats as ss
from scipy.stats import norm, skew
from scipy.special import boxcox1p
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from utils import plot_miss_val
from utils import norm_target, scatter_plot, qq_plot
class Data:
def __init__(self, train_path, test_path):
self.train = pd.read_csv(train_path)
self.test = pd.read_csv(test_path)
def train_disp(self):
return print(self.train.head())
def test_disp(self):
return print(self.test.head())
def drop_train_id(self, show=False):
train_ID = self.train['Id']
self.train.drop("Id", axis = 1, inplace = True)
if show:
print("\nThe train data size before dropping Id feature is : {} ".format(self.train.shape))
print("\nThe train data size after dropping Id feature is : {} ".format(self.train.shape))
return train_ID
def drop_test_id(self, show=False):
test_ID = self.test['Id']
self.test.drop("Id", axis = 1, inplace = True)
if show:
print("\nThe test data size before dropping Id feature is : {} ".format(self.test.shape))
print("\nThe test data size after dropping Id feature is : {} ".format(self.test.shape))
return test_ID
def train_del_outliers(self, column1_set, column2, x_lim_set, y_lim, plot=False):
for column1 in column1_set:
for x_lim in x_lim_set:
self.train.drop(self.train[(self.train[column1]>x_lim) & (self.train[column2]<y_lim)].index, inplace=True)
if plot:
scatter_plot(train, [column1], [column2])
return self.train
def train_log_transform(self, target, plot=False):
ds = np.log1p(self.train[target])
if plot:
norm_target(train, target)
qq_plot(train, target)
return ds
def target(self, df, target):
return df[target].values
def all_data_missing(self, ds, plot=False, show=False):
all_data_na = (ds.isnull().sum() / len(ds)) * 100
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)[:30]
missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
if plot:
plot_miss_val(all_data_na)
if show:
print(missing_data.head(20))
return ds, missing_data
def fill_na(self, missing_data, df):
for col in missing_data:
df[col] = df[col].fillna("None")
return df
def group_by(self, df, column1, column2):
df[column1] = df.groupby(column2)[column1].transform(lambda x: x.fillna(x.median()))
return df
def fill_zero(self, zero_data, df):
for col in zero_data:
df[col] = df[col].fillna(0)
return df
def drop_feature(self, features, df):
return df.drop(features, axis=1)
def data_replace(self, feature, replace, df):
df[feature] = df[feature].fillna(replace)
return df
def fill_most_frequent(self, data, df):
for col in data:
df[col] = df[col].fillna(df[col].mode()[0])
return df
def transform_num_cat(self, data, df):
for col in data:
df[col] = df[col].astype(str)
return df
def label_encoding(self, data, df):
for col in data:
lbl = LabelEncoder()
lbl.fit(list(df[col].values))
df[col] = lbl.transform(list(df[col].values))
return df
def skew_features(self, df, verbose=False):
numeric_feats = df.dtypes[df.dtypes != "object"].index
skewed_feats = df[numeric_feats].apply(lambda x: skew(x.dropna())).sort_values(ascending=False)
skewness = pd.DataFrame({'Skew' :skewed_feats})
if verbose:
print("\nSkew in numerical features: \n")
print(skewness.head(10))
skewness = skewness[abs(skewness) > 0.75]
if verbose:
print("There are {} skewed numerical features to Box Cox transform".format(skewness.shape[0]))
skewed_features = skewness.index
lam = 0.15
for feat in skewed_features:
#df[feat] += 1
df[feat] = boxcox1p(df[feat], lam)
#df[skewed_features] = np.log1p(all_data[skewed_features])
return df
def dummy_features(self, df):
return pd.get_dummies(df)
def to_csv(self, df_train, df_test, index, split='train'):
if split == 'train':
drop_col = [list(df_train.columns)[i] for i in range(len(list(df_train.columns))-1) if list(df_train.columns)[i] not in df_test.columns and list(df_train.columns)[i] != 'SalePrice']
df = df_train.drop(drop_col, axis=1)
self.check_missing_data(df)
#print(df.head())
else:
df = df_test
df = pd.concat([index, df], axis=1)
self.check_missing_data(df)
#print(df.head())
return df.to_csv('../csv/clean_'+split+'.csv', index=False)
def check_missing_data(self, df):
df_na = (df.isnull().sum() / len(df)) * 100
df_na = df_na.drop(df_na[df_na == 0].index).sort_values(ascending=False)
missing_data = pd.DataFrame({'Missing Ratio' :df_na})
return print(missing_data)
def scaler(self, ds, verbose=False):
scaler = StandardScaler()
scaler.fit(ds)
if verbose:
print(scaler.mean_)
print(scaler.scale_)
return scaler.transform(ds) | [
"pandas.DataFrame",
"sklearn.preprocessing.StandardScaler",
"utils.plot_miss_val",
"pandas.read_csv",
"pandas.get_dummies",
"utils.norm_target",
"utils.qq_plot",
"sklearn.preprocessing.LabelEncoder",
"scipy.special.boxcox1p",
"utils.scatter_plot",
"pandas.concat",
"numpy.log1p"
] | [((455, 478), 'pandas.read_csv', 'pd.read_csv', (['train_path'], {}), '(train_path)\n', (466, 478), True, 'import pandas as pd\n'), ((500, 522), 'pandas.read_csv', 'pd.read_csv', (['test_path'], {}), '(test_path)\n', (511, 522), True, 'import pandas as pd\n'), ((1888, 1916), 'numpy.log1p', 'np.log1p', (['self.train[target]'], {}), '(self.train[target])\n', (1896, 1916), True, 'import numpy as np\n'), ((2356, 2400), 'pandas.DataFrame', 'pd.DataFrame', (["{'Missing Ratio': all_data_na}"], {}), "({'Missing Ratio': all_data_na})\n", (2368, 2400), True, 'import pandas as pd\n'), ((3970, 4006), 'pandas.DataFrame', 'pd.DataFrame', (["{'Skew': skewed_feats}"], {}), "({'Skew': skewed_feats})\n", (3982, 4006), True, 'import pandas as pd\n'), ((4614, 4632), 'pandas.get_dummies', 'pd.get_dummies', (['df'], {}), '(df)\n', (4628, 4632), True, 'import pandas as pd\n'), ((5462, 5500), 'pandas.DataFrame', 'pd.DataFrame', (["{'Missing Ratio': df_na}"], {}), "({'Missing Ratio': df_na})\n", (5474, 5500), True, 'import pandas as pd\n'), ((5604, 5620), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (5618, 5620), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1750, 1791), 'utils.scatter_plot', 'scatter_plot', (['train', '[column1]', '[column2]'], {}), '(train, [column1], [column2])\n', (1762, 1791), False, 'from utils import norm_target, scatter_plot, qq_plot\n'), ((1947, 1973), 'utils.norm_target', 'norm_target', (['train', 'target'], {}), '(train, target)\n', (1958, 1973), False, 'from utils import norm_target, scatter_plot, qq_plot\n'), ((1986, 2008), 'utils.qq_plot', 'qq_plot', (['train', 'target'], {}), '(train, target)\n', (1993, 2008), False, 'from utils import norm_target, scatter_plot, qq_plot\n'), ((2439, 2465), 'utils.plot_miss_val', 'plot_miss_val', (['all_data_na'], {}), '(all_data_na)\n', (2452, 2465), False, 'from utils import plot_miss_val\n'), ((3598, 3612), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (3610, 3612), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((4453, 4476), 'scipy.special.boxcox1p', 'boxcox1p', (['df[feat]', 'lam'], {}), '(df[feat], lam)\n', (4461, 4476), False, 'from scipy.special import boxcox1p\n'), ((5096, 5126), 'pandas.concat', 'pd.concat', (['[index, df]'], {'axis': '(1)'}), '([index, df], axis=1)\n', (5105, 5126), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
import numpy
import matplotlib.pyplot as plt
from pandas import read_csv
import pandas as pd
import math
from keras.models import load_model
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.metrics import mean_squared_error
import os
from keras.utils import multi_gpu_model
# if train on GPU
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
# set dataset and parameters
DATASET = 'data/crime_H.csv'
look_back = 72
look_forward = 24
epochs = 25
batch_size = 256
def create_dataset(dataset, look_back = 1, look_forward = 1):
dataX, dataY = [], []
feature_dim = dataset.shape[1]
for i in range(len(dataset)-look_back-look_forward):
X = dataset[i: (i+look_back), :]
Y = dataset[i+look_back: i+look_back+look_forward, 0]
dataX.append(X)
dataY.append(Y)
return numpy.array(dataX), numpy.array(dataY)
def create_resultset(dataset, look_back = 1):
dataX = []
a = dataset[len(dataset)-look_back:len(dataset), :]
dataX.append(a)
return numpy.array(dataX)
def inverse_transform(dataY, feature_dim, look_forward = 1):
dataset_like = numpy.zeros(shape=(len(dataY), feature_dim))
for i in range(look_forward):
dataset_like[:, 0:1] = dataY[:, i:i+1]
dataY[:, i:i+1] = scaler.inverse_transform(dataset_like)[:, 0:1]
return dataY
if __name__ == '__main__':
# read dataset
dataframe = read_csv(DATASET, usecols=['Type', 'Type_property', 'Type_violent', 'Loc_public', 'Loc_private',\
'Arrest', 'Domestic'], engine='python')
dataframe = dataframe[['Type', 'Type_property', 'Type_violent', 'Loc_public', 'Loc_private', 'Arrest', 'Domestic']]
dataset = dataframe.values
dataset = dataset.astype('float32')
# normalization
scaler = MinMaxScaler(feature_range = (0,1))
dataset = scaler.fit_transform(dataset)
# split trainset and testset
train_size = int(len(dataset) * 3/4)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
# create trainset and testset
print(train.shape)
trainX, trainY = create_dataset(train, look_back, look_forward)
testX, testY = create_dataset(test, look_back, look_forward)
print(trainX.shape, trainY.shape)
# reshape to LSTM input format (sample, step, feature_dim)
trainX = numpy.reshape(trainX, (trainX.shape[0], look_back, train.shape[1]))
testX = numpy.reshape(testX, (testX.shape[0], look_back, test.shape[1]))
# LSTM (hidden units, step, feature_dim)
model = Sequential()
#model.add(LSTM(128, input_shape = (trainX.shape[1], trainX.shape[2])))
model.add(LSTM(100, return_sequences=True, input_shape = (trainX.shape[1], trainX.shape[2])))
model.add(LSTM(50))
model.add(Dense(trainY.shape[1]))
model.compile(loss = 'mean_squared_error', optimizer = 'adam')
model.fit(trainX, trainY, epochs = epochs, batch_size = batch_size, verbose = 2)
#if trained parallelly
#parallel_model = multi_gpu_model(model, gpus=2)
#parallel_model.compile(loss='mean_squared_error', optimizer='adam')
#parallel_model.fit(trainX, trainY, epochs = epochs, batch_size = batch_size, verbose = 2)
#model.save('model/model_%sb%sf.h5' % (look_back, look_forward))
#del model
#model = load_model('model/model_%sb%sf.h5' % (look_back, look_forward))
# predict
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
#print(trainPredict.shape, testPredict.shape)
'''
# if predict recursively
trainPredict = numpy.concatenate((trainPredict, testPredict))
trainPredict = numpy.append(trainPredict, testPredict)
trainPredict = numpy.reshape(trainPredict, (trainPredict.shape[0], train.shape[1]))
for i in range(12):
new_train = create_trainset(trainPredict, look_back)
new_train = numpy.reshape(new_train, (new_train.shape[0], look_back, train.shape[1]))
new_train = model.predict(new_train)
trainPredict = numpy.concatenate((trainPredict, new_train))
'''
# inverse_transform
print('Before transform:', trainPredict.shape, trainY.shape)
trainPredict = inverse_transform(trainPredict, dataset.shape[1], look_forward)
trainY = inverse_transform(trainY, dataset.shape[1], look_forward)
testPredict = inverse_transform(testPredict, dataset.shape[1], look_forward)
testY = inverse_transform(testY, dataset.shape[1], look_forward)
print('After transform:', trainPredict.shape, trainY.shape)
# calculate RMSE
trainScore = math.sqrt(mean_squared_error(trainY, trainPredict))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY, testPredict))
print('Test Score: %.2f RMSE' % (testScore))
resultX = create_resultset(test, look_back)
resultX = numpy.reshape(resultX, (resultX.shape[0], look_back, test.shape[1]))
result = model.predict(resultX)
result = inverse_transform(result, dataset.shape[1], look_forward)
result = numpy.reshape(result, (look_forward, 1))
#resultPredictPlot[len(dataset): len(dataset)+look_forward, 0] = result[:, 0]
#print(result.shape, resultPredictPlot.shape)
# save predicted value
df = pd.DataFrame(result.astype(int))
df.columns = ['Predict']
df.to_csv('data/predict.csv', index = False)
# plot for test
'''
trainPredictPlot = numpy.empty((len(trainPredict)+look_back, look_forward))
trainPredictPlot[:, :] = numpy.nan
trainPredictPlot[look_back:len(trainPredict)+look_back, 0] = trainPredict[:, 0]
testPredictPlot = numpy.empty((len(dataset), look_forward))
testPredictPlot[:, :] = numpy.nan
testPredictPlot[len(dataset)-len(testPredict)-look_forward:len(dataset)-look_forward, 0] = testPredict[:, 0]
resultPredictPlot = numpy.empty((len(dataset)+look_forward, 1))
resultPredictPlot[:, :] = numpy.nan
resultPredictPlot[len(dataset): len(dataset)+look_forward, 0] = result[:, 0]
datasetPlot = scaler.inverse_transform(dataset)
plt.plot(datasetPlot[:, 0])
plt.plot(trainPredictPlot)
plt.plot(testPredictPlot)
plt.plot(resultPredictPlot)
#plt.savefig("test.png")
plt.show()
'''
| [
"pandas.read_csv",
"keras.layers.LSTM",
"sklearn.preprocessing.MinMaxScaler",
"keras.layers.Dense",
"numpy.array",
"numpy.reshape",
"keras.models.Sequential",
"sklearn.metrics.mean_squared_error"
] | [((1185, 1203), 'numpy.array', 'numpy.array', (['dataX'], {}), '(dataX)\n', (1196, 1203), False, 'import numpy\n'), ((1586, 1726), 'pandas.read_csv', 'read_csv', (['DATASET'], {'usecols': "['Type', 'Type_property', 'Type_violent', 'Loc_public', 'Loc_private',\n 'Arrest', 'Domestic']", 'engine': '"""python"""'}), "(DATASET, usecols=['Type', 'Type_property', 'Type_violent',\n 'Loc_public', 'Loc_private', 'Arrest', 'Domestic'], engine='python')\n", (1594, 1726), False, 'from pandas import read_csv\n'), ((1968, 2002), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (1980, 2002), False, 'from sklearn.preprocessing import MinMaxScaler, StandardScaler\n'), ((2572, 2639), 'numpy.reshape', 'numpy.reshape', (['trainX', '(trainX.shape[0], look_back, train.shape[1])'], {}), '(trainX, (trainX.shape[0], look_back, train.shape[1]))\n', (2585, 2639), False, 'import numpy\n'), ((2653, 2717), 'numpy.reshape', 'numpy.reshape', (['testX', '(testX.shape[0], look_back, test.shape[1])'], {}), '(testX, (testX.shape[0], look_back, test.shape[1]))\n', (2666, 2717), False, 'import numpy\n'), ((2789, 2801), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2799, 2801), False, 'from keras.models import Sequential\n'), ((5160, 5228), 'numpy.reshape', 'numpy.reshape', (['resultX', '(resultX.shape[0], look_back, test.shape[1])'], {}), '(resultX, (resultX.shape[0], look_back, test.shape[1]))\n', (5173, 5228), False, 'import numpy\n'), ((5352, 5392), 'numpy.reshape', 'numpy.reshape', (['result', '(look_forward, 1)'], {}), '(result, (look_forward, 1))\n', (5365, 5392), False, 'import numpy\n'), ((983, 1001), 'numpy.array', 'numpy.array', (['dataX'], {}), '(dataX)\n', (994, 1001), False, 'import numpy\n'), ((1003, 1021), 'numpy.array', 'numpy.array', (['dataY'], {}), '(dataY)\n', (1014, 1021), False, 'import numpy\n'), ((2894, 2979), 'keras.layers.LSTM', 'LSTM', (['(100)'], {'return_sequences': '(True)', 'input_shape': '(trainX.shape[1], trainX.shape[2])'}), '(100, return_sequences=True, input_shape=(trainX.shape[1], trainX.shape[2])\n )\n', (2898, 2979), False, 'from keras.layers import LSTM\n'), ((2993, 3001), 'keras.layers.LSTM', 'LSTM', (['(50)'], {}), '(50)\n', (2997, 3001), False, 'from keras.layers import LSTM\n'), ((3022, 3044), 'keras.layers.Dense', 'Dense', (['trainY.shape[1]'], {}), '(trainY.shape[1])\n', (3027, 3044), False, 'from keras.layers import Dense\n'), ((4879, 4919), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['trainY', 'trainPredict'], {}), '(trainY, trainPredict)\n', (4897, 4919), False, 'from sklearn.metrics import mean_squared_error\n'), ((5000, 5038), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['testY', 'testPredict'], {}), '(testY, testPredict)\n', (5018, 5038), False, 'from sklearn.metrics import mean_squared_error\n')] |
import pickle
import numpy as np
import os
import time
import cv2
from glob import glob
from PIL import ImageFile, Image
from plyfile import PlyData
from skimage.io import imread, imsave
from concurrent.futures import ProcessPoolExecutor
from config import cfg
class ModelAligner(object):
rotation_transform = np.array([[1., 0., 0.],
[0., -1., 0.],
[0., 0., -1.]])
translation_transforms = {
# 'cat': np.array([-0.00577495, -0.01259045, -0.04062323])
}
intrinsic_matrix = {
'linemod': np.array([[572.4114, 0., 325.2611],
[0., 573.57043, 242.04899],
[0., 0., 1.]]),
# 'blender': np.array([[280.0, 0.0, 128.0],
# [0.0, 280.0, 128.0],
# [0.0, 0.0, 1.0]]),
'blender': np.array([[700., 0., 320.],
[0., 700., 240.],
[0., 0., 1.]])
}
def __init__(self, class_type,linemod_dir,linemod_orig_dir):
self.class_type = class_type
self.blender_model_path = os.path.join(linemod_dir,'{}/{}.ply'.format(class_type, class_type))
self.orig_model_path = os.path.join(linemod_orig_dir,'{}/mesh.ply'.format(class_type))
self.orig_old_model_path = os.path.join(linemod_orig_dir,'{}/OLDmesh.ply'.format(class_type))
self.transform_dat_path = os.path.join(linemod_orig_dir,'{}/transform.dat'.format(class_type))
self.R_p2w,self.t_p2w,self.s_p2w=self.setup_p2w_transform()
@staticmethod
def setup_p2w_transform():
transform1 = np.array([[0.161513626575, -0.827108919621, 0.538334608078, -0.245206743479],
[-0.986692547798, -0.124983474612, 0.104004733264, -0.050683632493],
[-0.018740313128, -0.547968924046, -0.836288750172, 0.387638419867]])
transform2 = np.array([[0.976471602917, 0.201606079936, -0.076541729271, -0.000718327821],
[-0.196746662259, 0.978194475174, 0.066531419754, 0.000077120210],
[0.088285841048, -0.049906700850, 0.994844079018, -0.001409600372]])
R1 = transform1[:, :3]
t1 = transform1[:, 3]
R2 = transform2[:, :3]
t2 = transform2[:, 3]
# printer system to world system
t_p2w = np.dot(R2, t1) + t2
R_p2w = np.dot(R2, R1)
s_p2w = 0.85
return R_p2w,t_p2w,s_p2w
def pose_p2w(self,RT):
t,R=RT[:,3],RT[:,:3]
R_w2c=np.dot(R, self.R_p2w.T)
t_w2c=-np.dot(R_w2c,self.t_p2w)+self.s_p2w*t
return np.concatenate([R_w2c,t_w2c[:,None]],1)
@staticmethod
def load_ply_model(model_path):
ply = PlyData.read(model_path)
data = ply.elements[0].data
x = data['x']
y = data['y']
z = data['z']
return np.stack([x, y, z], axis=-1)
def read_transform_dat(self):
transform_dat = np.loadtxt(self.transform_dat_path, skiprows=1)[:, 1]
transform_dat = np.reshape(transform_dat, newshape=[3, 4])
return transform_dat
def load_orig_model(self):
if os.path.exists(self.orig_model_path):
return self.load_ply_model(self.orig_model_path) / 1000.
else:
transform = self.read_transform_dat()
old_model = self.load_ply_model(self.orig_old_model_path) / 1000.
old_model = np.dot(old_model, transform[:, :3].T) + transform[:, 3]
return old_model
def get_translation_transform(self):
if self.class_type in self.translation_transforms:
return self.translation_transforms[self.class_type]
blender_model = self.load_ply_model(self.blender_model_path)
orig_model = self.load_orig_model()
blender_model = np.dot(blender_model, self.rotation_transform.T)
translation_transform = np.mean(orig_model, axis=0) - np.mean(blender_model, axis=0)
self.translation_transforms[self.class_type] = translation_transform
return translation_transform
class PoseTransformer(object):
rotation_transform = np.array([[1., 0., 0.],
[0., -1., 0.],
[0., 0., -1.]])
translation_transforms = {}
class_type_to_number = {
'ape': '001',
'can': '004',
'cat': '005',
'driller': '006',
'duck': '007',
'eggbox': '008',
'glue': '009',
'holepuncher': '010'
}
blender_models={}
def __init__(self, class_type,linemod_dir,linemod_orig_dir):
self.class_type = class_type
self.blender_model_path = os.path.join(linemod_dir,'{}/{}.ply'.format(class_type, class_type))
self.orig_model_path = os.path.join(linemod_orig_dir,'{}/mesh.ply'.format(class_type))
self.model_aligner = ModelAligner(class_type,linemod_dir,linemod_orig_dir)
def orig_pose_to_blender_pose(self, pose):
rot, tra = pose[:, :3], pose[:, 3]
tra = tra + np.dot(rot, self.model_aligner.get_translation_transform())
rot = np.dot(rot, self.rotation_transform)
return np.concatenate([rot, np.reshape(tra, newshape=[3, 1])], axis=-1)
def read_pickle(pkl_path):
with open(pkl_path, 'rb') as f:
return pickle.load(f)
def save_pickle(data, pkl_path):
with open(pkl_path, 'wb') as f:
pickle.dump(data, f)
def read_rgb_np(rgb_path):
ImageFile.LOAD_TRUNCATED_IMAGES = True
img = Image.open(rgb_path).convert('RGB')
img = np.array(img,np.uint8)
return img
def read_mask_np(mask_path):
mask = Image.open(mask_path)
mask_seg = np.array(mask).astype(np.int32)
return mask_seg
def read_pose(rot_path, tra_path):
rot = np.loadtxt(rot_path, skiprows=1)
tra = np.loadtxt(tra_path, skiprows=1) / 100.
return np.concatenate([rot, np.reshape(tra, newshape=[3, 1])], axis=-1)
def collect_train_val_test_info(linemod_dir,cls_name):
with open(os.path.join(linemod_dir,cls_name,'test.txt'),'r') as f:
test_fns=[line.strip().split('/')[-1] for line in f.readlines()]
with open(os.path.join(linemod_dir,cls_name,'train.txt'),'r') as f:
train_fns=[line.strip().split('/')[-1] for line in f.readlines()]
return test_fns, train_fns
def collect_linemod_set_info(linemod_dir,linemod_cls_name,linemod_orig_dir,cache_dir='./'):
database=[]
if os.path.exists(os.path.join(cache_dir,'{}_info.pkl').format(linemod_cls_name)):
return read_pickle(os.path.join(cache_dir,'{}_info.pkl').format(linemod_cls_name))
_,train_fns=collect_train_val_test_info(linemod_dir,linemod_cls_name)
print('begin generate database {}'.format(linemod_cls_name))
rgb_dir=os.path.join(linemod_dir,linemod_cls_name,'JPEGImages')
msk_dir=os.path.join(linemod_dir,linemod_cls_name,'mask')
rt_dir = os.path.join(linemod_orig_dir, linemod_cls_name, 'data')
img_num=len(os.listdir(rgb_dir))
for k in range(img_num):
data={}
data['rgb_pth']=os.path.join(rgb_dir, '{:06}.jpg'.format(k))
data['dpt_pth']=os.path.join(msk_dir, '{:04}.png'.format(k))
if data['rgb_pth'].split('/')[-1] not in train_fns: continue
pose=read_pose(os.path.join(rt_dir, 'rot{}.rot'.format(k)),
os.path.join(rt_dir, 'tra{}.tra'.format(k)))
pose_transformer = PoseTransformer(linemod_cls_name, linemod_dir, linemod_orig_dir)
data['RT'] = pose_transformer.orig_pose_to_blender_pose(pose).astype(np.float32)
database.append(data)
print('success generate database {} len {}'.format(linemod_cls_name,len(database)))
save_pickle(database,os.path.join(cache_dir,'{}_info.pkl').format(linemod_cls_name))
return database
def randomly_read_background(background_dir,cache_dir):
if os.path.exists(os.path.join(cache_dir,'background_info.pkl')):
fns=read_pickle(os.path.join(cache_dir,'background_info.pkl'))
else:
fns=glob(os.path.join(background_dir,'*.jpg'))+glob(os.path.join(background_dir,'*.png'))
save_pickle(fns,os.path.join(cache_dir,'background_info.pkl'))
return imread(fns[np.random.randint(0,len(fns))])
def prepare_dataset_parallel(output_dir, linemod_dir, linemod_orig_dir, fuse_num, background_dir, cache_dir, worker_num=8):
exector=ProcessPoolExecutor(max_workers=worker_num)
futures=[]
for cls_name in linemod_cls_names:
collect_linemod_set_info(linemod_dir,cls_name,linemod_orig_dir,cache_dir)
randomly_read_background(background_dir,cache_dir)
for idx in np.arange(fuse_num):
seed=np.random.randint(5000)
futures.append(exector.submit(
prepare_dataset_single,output_dir,idx, linemod_cls_names, linemod_dir, linemod_orig_dir, background_dir,cache_dir, seed))
for f in futures:
f.result()
def prepare_dataset_single(output_dir,idx,linemod_cls_names,linemod_dir,linemod_orig_dir,background_dir,cache_dir,seed):
time_begin=time.time()
np.random.seed(seed)
rgbs,masks,begins,poses=[],[],[],[]
image_dbs={}
for cls_id,cls_name in enumerate(linemod_cls_names):
image_dbs[cls_id]=collect_linemod_set_info(linemod_dir,cls_name,linemod_orig_dir,cache_dir)
for cls_id,cls_name in enumerate(linemod_cls_names):
rgb, mask, begin, pose=randomly_sample_foreground(image_dbs[cls_id], linemod_dir)
mask*=cls_id+1
rgbs.append(rgb)
masks.append(mask)
begins.append(begin)
poses.append(pose)
background=randomly_read_background(background_dir,cache_dir)
fuse_img, fuse_mask, fuse_begins= fuse_regions(rgbs, masks, begins, background, 480, 640)
save_fuse_data(output_dir, idx, fuse_img, fuse_mask, fuse_begins, poses)
print('{} cost {} s'.format(idx,time.time()-time_begin))
def fuse_regions(rgbs,masks,begins,background,th,tw):
fuse_order=np.arange(len(rgbs))
np.random.shuffle(fuse_order)
fuse_img=background
fuse_img=cv2.resize(fuse_img,(tw,th),interpolation=cv2.INTER_LINEAR)
fuse_mask=np.zeros([fuse_img.shape[0],fuse_img.shape[1]],np.int32)
for idx in fuse_order:
rh,rw=masks[idx].shape
bh=np.random.randint(0,fuse_img.shape[0]-rh)
bw=np.random.randint(0,fuse_img.shape[1]-rw)
silhouette=masks[idx]>0
out_silhouette=np.logical_not(silhouette)
fuse_mask[bh:bh+rh,bw:bw+rw]*=out_silhouette.astype(fuse_mask.dtype)
fuse_mask[bh:bh+rh,bw:bw+rw]+=masks[idx]
fuse_img[bh:bh+rh,bw:bw+rw]*=out_silhouette.astype(fuse_img.dtype)[:,:,None]
fuse_img[bh:bh+rh,bw:bw+rw]+=rgbs[idx]
begins[idx][0]=-begins[idx][0]+bh
begins[idx][1]=-begins[idx][1]+bw
return fuse_img,fuse_mask,begins
def randomly_sample_foreground(image_db,linemod_dir):
idx=np.random.randint(0,len(image_db))
rgb_pth=os.path.join(linemod_dir,image_db[idx]['rgb_pth'])
dpt_pth=os.path.join(linemod_dir,image_db[idx]['dpt_pth'])
rgb = read_rgb_np(rgb_pth)
mask = read_mask_np(dpt_pth)
mask=np.sum(mask,2)>0
mask=np.asarray(mask,np.int32)
hs,ws=np.nonzero(mask)
hmin,hmax=np.min(hs),np.max(hs)
wmin,wmax=np.min(ws),np.max(ws)
mask=mask[hmin:hmax,wmin:wmax]
rgb=rgb[hmin:hmax,wmin:wmax]
rgb*=mask.astype(np.uint8)[:,:,None]
begin=[hmin,wmin]
pose=image_db[idx]['RT']
return rgb, mask, begin, pose
def save_fuse_data(output_dir, idx, fuse_img, fuse_mask, fuse_begins, fuse_poses):
os.makedirs(output_dir, exist_ok=True)
imsave(os.path.join(output_dir,'{}_rgb.jpg'.format(idx)),fuse_img)
fuse_mask=fuse_mask.astype(np.uint8)
imsave(os.path.join(output_dir,'{}_mask.png'.format(idx)),fuse_mask)
save_pickle([np.asarray(fuse_begins,np.int32), np.asarray(fuse_poses,np.float32)],
os.path.join(output_dir,'{}_info.pkl'.format(idx)))
def randomly_sample_foreground_ycb(image_db, ycb_dir, ycb_cls_idx):
idx=np.random.randint(0,len(image_db.train_real_set))
rgb_pth=os.path.join(ycb_dir, image_db.train_real_set[idx]['rgb_pth'])
msk_pth=os.path.join(ycb_dir, image_db.train_real_set[idx]['msk_pth'])
rgb = read_rgb_np(rgb_pth)
mask = read_mask_np(msk_pth)
mask = mask == ycb_cls_idx
if len(mask.shape)>2: mask=np.sum(mask,2)>0
mask=np.asarray(mask,np.int32)
hs,ws=np.nonzero(mask)
if len(hs)==0:
print('zero size')
raise RuntimeError
hmin,hmax=np.min(hs),np.max(hs)
wmin,wmax=np.min(ws),np.max(ws)
mask=mask[hmin:hmax,wmin:wmax]
rgb=rgb[hmin:hmax,wmin:wmax]
rgb*=mask.astype(np.uint8)[:,:,None]
begin=[hmin,wmin]
pose=image_db.train_real_set[idx]['pose']
K=image_db.train_real_set[idx]['K']
return rgb, mask, begin, pose, K
linemod_cls_names=['ape','cam','cat','duck','glue','iron','phone', 'benchvise','can','driller','eggbox','holepuncher','lamp']
def run():
output_dir='./data/LINEMOD/fuse/'
linemod_dir=cfg.LINEMOD
linemod_orig_dir=cfg.LINEMOD_ORIG
background_dir=os.path.join(cfg.SUN, "JPEGImages")
cache_dir='./'
fuse_num=10000
worker_num=2
prepare_dataset_parallel(output_dir, linemod_dir, linemod_orig_dir, fuse_num, background_dir, cache_dir, worker_num)
if __name__=="__main__":
output_dir='tmp/'
linemod_dir='/home/liuyuan/data/LINEMOD'
linemod_orig_dir='/home/liuyuan/data/LINEMOD_ORIG'
background_dir='/home/liuyuan/data/SUN2012pascalformat/JPEGImages'
cache_dir='./'
fuse_num=10000
worker_num=2
prepare_dataset_parallel(output_dir, linemod_dir, linemod_orig_dir, fuse_num, background_dir, cache_dir, worker_num)
| [
"pickle.dump",
"numpy.random.seed",
"numpy.sum",
"concurrent.futures.ProcessPoolExecutor",
"pickle.load",
"numpy.arange",
"numpy.random.randint",
"numpy.mean",
"os.path.join",
"numpy.logical_not",
"os.path.exists",
"numpy.max",
"numpy.loadtxt",
"numpy.reshape",
"plyfile.PlyData.read",
... | [((319, 382), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]]'], {}), '([[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]])\n', (327, 382), True, 'import numpy as np\n'), ((4220, 4283), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]]'], {}), '([[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]])\n', (4228, 4283), True, 'import numpy as np\n'), ((5632, 5655), 'numpy.array', 'np.array', (['img', 'np.uint8'], {}), '(img, np.uint8)\n', (5640, 5655), True, 'import numpy as np\n'), ((5711, 5732), 'PIL.Image.open', 'Image.open', (['mask_path'], {}), '(mask_path)\n', (5721, 5732), False, 'from PIL import ImageFile, Image\n'), ((5846, 5878), 'numpy.loadtxt', 'np.loadtxt', (['rot_path'], {'skiprows': '(1)'}), '(rot_path, skiprows=1)\n', (5856, 5878), True, 'import numpy as np\n'), ((6823, 6880), 'os.path.join', 'os.path.join', (['linemod_dir', 'linemod_cls_name', '"""JPEGImages"""'], {}), "(linemod_dir, linemod_cls_name, 'JPEGImages')\n", (6835, 6880), False, 'import os\n'), ((6891, 6942), 'os.path.join', 'os.path.join', (['linemod_dir', 'linemod_cls_name', '"""mask"""'], {}), "(linemod_dir, linemod_cls_name, 'mask')\n", (6903, 6942), False, 'import os\n'), ((6954, 7010), 'os.path.join', 'os.path.join', (['linemod_orig_dir', 'linemod_cls_name', '"""data"""'], {}), "(linemod_orig_dir, linemod_cls_name, 'data')\n", (6966, 7010), False, 'import os\n'), ((8414, 8457), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {'max_workers': 'worker_num'}), '(max_workers=worker_num)\n', (8433, 8457), False, 'from concurrent.futures import ProcessPoolExecutor\n'), ((8666, 8685), 'numpy.arange', 'np.arange', (['fuse_num'], {}), '(fuse_num)\n', (8675, 8685), True, 'import numpy as np\n'), ((9077, 9088), 'time.time', 'time.time', ([], {}), '()\n', (9086, 9088), False, 'import time\n'), ((9093, 9113), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (9107, 9113), True, 'import numpy as np\n'), ((10003, 10032), 'numpy.random.shuffle', 'np.random.shuffle', (['fuse_order'], {}), '(fuse_order)\n', (10020, 10032), True, 'import numpy as np\n'), ((10070, 10132), 'cv2.resize', 'cv2.resize', (['fuse_img', '(tw, th)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(fuse_img, (tw, th), interpolation=cv2.INTER_LINEAR)\n', (10080, 10132), False, 'import cv2\n'), ((10144, 10202), 'numpy.zeros', 'np.zeros', (['[fuse_img.shape[0], fuse_img.shape[1]]', 'np.int32'], {}), '([fuse_img.shape[0], fuse_img.shape[1]], np.int32)\n', (10152, 10202), True, 'import numpy as np\n'), ((10940, 10991), 'os.path.join', 'os.path.join', (['linemod_dir', "image_db[idx]['rgb_pth']"], {}), "(linemod_dir, image_db[idx]['rgb_pth'])\n", (10952, 10991), False, 'import os\n'), ((11003, 11054), 'os.path.join', 'os.path.join', (['linemod_dir', "image_db[idx]['dpt_pth']"], {}), "(linemod_dir, image_db[idx]['dpt_pth'])\n", (11015, 11054), False, 'import os\n'), ((11153, 11179), 'numpy.asarray', 'np.asarray', (['mask', 'np.int32'], {}), '(mask, np.int32)\n', (11163, 11179), True, 'import numpy as np\n'), ((11190, 11206), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (11200, 11206), True, 'import numpy as np\n'), ((11564, 11602), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (11575, 11602), False, 'import os\n'), ((12082, 12144), 'os.path.join', 'os.path.join', (['ycb_dir', "image_db.train_real_set[idx]['rgb_pth']"], {}), "(ycb_dir, image_db.train_real_set[idx]['rgb_pth'])\n", (12094, 12144), False, 'import os\n'), ((12157, 12219), 'os.path.join', 'os.path.join', (['ycb_dir', "image_db.train_real_set[idx]['msk_pth']"], {}), "(ycb_dir, image_db.train_real_set[idx]['msk_pth'])\n", (12169, 12219), False, 'import os\n'), ((12373, 12399), 'numpy.asarray', 'np.asarray', (['mask', 'np.int32'], {}), '(mask, np.int32)\n', (12383, 12399), True, 'import numpy as np\n'), ((12410, 12426), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (12420, 12426), True, 'import numpy as np\n'), ((13092, 13127), 'os.path.join', 'os.path.join', (['cfg.SUN', '"""JPEGImages"""'], {}), "(cfg.SUN, 'JPEGImages')\n", (13104, 13127), False, 'import os\n'), ((592, 679), 'numpy.array', 'np.array', (['[[572.4114, 0.0, 325.2611], [0.0, 573.57043, 242.04899], [0.0, 0.0, 1.0]]'], {}), '([[572.4114, 0.0, 325.2611], [0.0, 573.57043, 242.04899], [0.0, 0.0,\n 1.0]])\n', (600, 679), True, 'import numpy as np\n'), ((905, 974), 'numpy.array', 'np.array', (['[[700.0, 0.0, 320.0], [0.0, 700.0, 240.0], [0.0, 0.0, 1.0]]'], {}), '([[700.0, 0.0, 320.0], [0.0, 700.0, 240.0], [0.0, 0.0, 1.0]])\n', (913, 974), True, 'import numpy as np\n'), ((1688, 1913), 'numpy.array', 'np.array', (['[[0.161513626575, -0.827108919621, 0.538334608078, -0.245206743479], [-\n 0.986692547798, -0.124983474612, 0.104004733264, -0.050683632493], [-\n 0.018740313128, -0.547968924046, -0.836288750172, 0.387638419867]]'], {}), '([[0.161513626575, -0.827108919621, 0.538334608078, -0.245206743479\n ], [-0.986692547798, -0.124983474612, 0.104004733264, -0.050683632493],\n [-0.018740313128, -0.547968924046, -0.836288750172, 0.387638419867]])\n', (1696, 1913), True, 'import numpy as np\n'), ((1988, 2208), 'numpy.array', 'np.array', (['[[0.976471602917, 0.201606079936, -0.076541729271, -0.000718327821], [-\n 0.196746662259, 0.978194475174, 0.066531419754, 7.712021e-05], [\n 0.088285841048, -0.04990670085, 0.994844079018, -0.001409600372]]'], {}), '([[0.976471602917, 0.201606079936, -0.076541729271, -0.000718327821\n ], [-0.196746662259, 0.978194475174, 0.066531419754, 7.712021e-05], [\n 0.088285841048, -0.04990670085, 0.994844079018, -0.001409600372]])\n', (1996, 2208), True, 'import numpy as np\n'), ((2481, 2495), 'numpy.dot', 'np.dot', (['R2', 'R1'], {}), '(R2, R1)\n', (2487, 2495), True, 'import numpy as np\n'), ((2621, 2644), 'numpy.dot', 'np.dot', (['R', 'self.R_p2w.T'], {}), '(R, self.R_p2w.T)\n', (2627, 2644), True, 'import numpy as np\n'), ((2713, 2755), 'numpy.concatenate', 'np.concatenate', (['[R_w2c, t_w2c[:, None]]', '(1)'], {}), '([R_w2c, t_w2c[:, None]], 1)\n', (2727, 2755), True, 'import numpy as np\n'), ((2822, 2846), 'plyfile.PlyData.read', 'PlyData.read', (['model_path'], {}), '(model_path)\n', (2834, 2846), False, 'from plyfile import PlyData\n'), ((2964, 2992), 'numpy.stack', 'np.stack', (['[x, y, z]'], {'axis': '(-1)'}), '([x, y, z], axis=-1)\n', (2972, 2992), True, 'import numpy as np\n'), ((3130, 3172), 'numpy.reshape', 'np.reshape', (['transform_dat'], {'newshape': '[3, 4]'}), '(transform_dat, newshape=[3, 4])\n', (3140, 3172), True, 'import numpy as np\n'), ((3245, 3281), 'os.path.exists', 'os.path.exists', (['self.orig_model_path'], {}), '(self.orig_model_path)\n', (3259, 3281), False, 'import os\n'), ((3906, 3954), 'numpy.dot', 'np.dot', (['blender_model', 'self.rotation_transform.T'], {}), '(blender_model, self.rotation_transform.T)\n', (3912, 3954), True, 'import numpy as np\n'), ((5195, 5231), 'numpy.dot', 'np.dot', (['rot', 'self.rotation_transform'], {}), '(rot, self.rotation_transform)\n', (5201, 5231), True, 'import numpy as np\n'), ((5391, 5405), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5402, 5405), False, 'import pickle\n'), ((5484, 5504), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (5495, 5504), False, 'import pickle\n'), ((5889, 5921), 'numpy.loadtxt', 'np.loadtxt', (['tra_path'], {'skiprows': '(1)'}), '(tra_path, skiprows=1)\n', (5899, 5921), True, 'import numpy as np\n'), ((7027, 7046), 'os.listdir', 'os.listdir', (['rgb_dir'], {}), '(rgb_dir)\n', (7037, 7046), False, 'import os\n'), ((7925, 7971), 'os.path.join', 'os.path.join', (['cache_dir', '"""background_info.pkl"""'], {}), "(cache_dir, 'background_info.pkl')\n", (7937, 7971), False, 'import os\n'), ((8700, 8723), 'numpy.random.randint', 'np.random.randint', (['(5000)'], {}), '(5000)\n', (8717, 8723), True, 'import numpy as np\n'), ((10270, 10314), 'numpy.random.randint', 'np.random.randint', (['(0)', '(fuse_img.shape[0] - rh)'], {}), '(0, fuse_img.shape[0] - rh)\n', (10287, 10314), True, 'import numpy as np\n'), ((10323, 10367), 'numpy.random.randint', 'np.random.randint', (['(0)', '(fuse_img.shape[1] - rw)'], {}), '(0, fuse_img.shape[1] - rw)\n', (10340, 10367), True, 'import numpy as np\n'), ((10421, 10447), 'numpy.logical_not', 'np.logical_not', (['silhouette'], {}), '(silhouette)\n', (10435, 10447), True, 'import numpy as np\n'), ((11127, 11142), 'numpy.sum', 'np.sum', (['mask', '(2)'], {}), '(mask, 2)\n', (11133, 11142), True, 'import numpy as np\n'), ((11221, 11231), 'numpy.min', 'np.min', (['hs'], {}), '(hs)\n', (11227, 11231), True, 'import numpy as np\n'), ((11232, 11242), 'numpy.max', 'np.max', (['hs'], {}), '(hs)\n', (11238, 11242), True, 'import numpy as np\n'), ((11257, 11267), 'numpy.min', 'np.min', (['ws'], {}), '(ws)\n', (11263, 11267), True, 'import numpy as np\n'), ((11268, 11278), 'numpy.max', 'np.max', (['ws'], {}), '(ws)\n', (11274, 11278), True, 'import numpy as np\n'), ((12514, 12524), 'numpy.min', 'np.min', (['hs'], {}), '(hs)\n', (12520, 12524), True, 'import numpy as np\n'), ((12525, 12535), 'numpy.max', 'np.max', (['hs'], {}), '(hs)\n', (12531, 12535), True, 'import numpy as np\n'), ((12550, 12560), 'numpy.min', 'np.min', (['ws'], {}), '(ws)\n', (12556, 12560), True, 'import numpy as np\n'), ((12561, 12571), 'numpy.max', 'np.max', (['ws'], {}), '(ws)\n', (12567, 12571), True, 'import numpy as np\n'), ((2445, 2459), 'numpy.dot', 'np.dot', (['R2', 't1'], {}), '(R2, t1)\n', (2451, 2459), True, 'import numpy as np\n'), ((3052, 3099), 'numpy.loadtxt', 'np.loadtxt', (['self.transform_dat_path'], {'skiprows': '(1)'}), '(self.transform_dat_path, skiprows=1)\n', (3062, 3099), True, 'import numpy as np\n'), ((3987, 4014), 'numpy.mean', 'np.mean', (['orig_model'], {'axis': '(0)'}), '(orig_model, axis=0)\n', (3994, 4014), True, 'import numpy as np\n'), ((4017, 4047), 'numpy.mean', 'np.mean', (['blender_model'], {'axis': '(0)'}), '(blender_model, axis=0)\n', (4024, 4047), True, 'import numpy as np\n'), ((5586, 5606), 'PIL.Image.open', 'Image.open', (['rgb_path'], {}), '(rgb_path)\n', (5596, 5606), False, 'from PIL import ImageFile, Image\n'), ((5748, 5762), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (5756, 5762), True, 'import numpy as np\n'), ((5961, 5993), 'numpy.reshape', 'np.reshape', (['tra'], {'newshape': '[3, 1]'}), '(tra, newshape=[3, 1])\n', (5971, 5993), True, 'import numpy as np\n'), ((6075, 6122), 'os.path.join', 'os.path.join', (['linemod_dir', 'cls_name', '"""test.txt"""'], {}), "(linemod_dir, cls_name, 'test.txt')\n", (6087, 6122), False, 'import os\n'), ((6220, 6268), 'os.path.join', 'os.path.join', (['linemod_dir', 'cls_name', '"""train.txt"""'], {}), "(linemod_dir, cls_name, 'train.txt')\n", (6232, 6268), False, 'import os\n'), ((7997, 8043), 'os.path.join', 'os.path.join', (['cache_dir', '"""background_info.pkl"""'], {}), "(cache_dir, 'background_info.pkl')\n", (8009, 8043), False, 'import os\n'), ((8176, 8222), 'os.path.join', 'os.path.join', (['cache_dir', '"""background_info.pkl"""'], {}), "(cache_dir, 'background_info.pkl')\n", (8188, 8222), False, 'import os\n'), ((11805, 11838), 'numpy.asarray', 'np.asarray', (['fuse_begins', 'np.int32'], {}), '(fuse_begins, np.int32)\n', (11815, 11838), True, 'import numpy as np\n'), ((11839, 11873), 'numpy.asarray', 'np.asarray', (['fuse_poses', 'np.float32'], {}), '(fuse_poses, np.float32)\n', (11849, 11873), True, 'import numpy as np\n'), ((12347, 12362), 'numpy.sum', 'np.sum', (['mask', '(2)'], {}), '(mask, 2)\n', (12353, 12362), True, 'import numpy as np\n'), ((2660, 2685), 'numpy.dot', 'np.dot', (['R_w2c', 'self.t_p2w'], {}), '(R_w2c, self.t_p2w)\n', (2666, 2685), True, 'import numpy as np\n'), ((3518, 3555), 'numpy.dot', 'np.dot', (['old_model', 'transform[:, :3].T'], {}), '(old_model, transform[:, :3].T)\n', (3524, 3555), True, 'import numpy as np\n'), ((5268, 5300), 'numpy.reshape', 'np.reshape', (['tra'], {'newshape': '[3, 1]'}), '(tra, newshape=[3, 1])\n', (5278, 5300), True, 'import numpy as np\n'), ((6515, 6553), 'os.path.join', 'os.path.join', (['cache_dir', '"""{}_info.pkl"""'], {}), "(cache_dir, '{}_info.pkl')\n", (6527, 6553), False, 'import os\n'), ((7762, 7800), 'os.path.join', 'os.path.join', (['cache_dir', '"""{}_info.pkl"""'], {}), "(cache_dir, '{}_info.pkl')\n", (7774, 7800), False, 'import os\n'), ((8071, 8108), 'os.path.join', 'os.path.join', (['background_dir', '"""*.jpg"""'], {}), "(background_dir, '*.jpg')\n", (8083, 8108), False, 'import os\n'), ((8114, 8151), 'os.path.join', 'os.path.join', (['background_dir', '"""*.png"""'], {}), "(background_dir, '*.png')\n", (8126, 8151), False, 'import os\n'), ((9883, 9894), 'time.time', 'time.time', ([], {}), '()\n', (9892, 9894), False, 'import time\n'), ((6607, 6645), 'os.path.join', 'os.path.join', (['cache_dir', '"""{}_info.pkl"""'], {}), "(cache_dir, '{}_info.pkl')\n", (6619, 6645), False, 'import os\n')] |
"""DIV2K dataset
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import argparse
from PIL import Image
import numpy as np
import tensorflow as tf
import datasets
REMOTE_URL = 'http://data.vision.ee.ethz.ch/cvl/DIV2K/'
TRAIN_LR_ARCHIVE_NAME = lambda s: 'DIV2K_train_LR_bicubic_X{}.zip'.format(s)
TRAIN_HR_ARCHIVE_NAME = 'DIV2K_train_HR.zip'
EVAL_LR_ARCHIVE_NAME = lambda s: 'DIV2K_valid_LR_bicubic_X{}.zip'.format(s)
EVAL_HR_ARCHIVE_NAME = 'DIV2K_valid_HR.zip'
LOCAL_DIR = 'data/DIV2K/'
TRAIN_LR_DIR = lambda s: LOCAL_DIR + 'DIV2K_train_LR_bicubic/X{}/'.format(s)
TRAIN_HR_DIR = LOCAL_DIR + 'DIV2K_train_HR/'
EVAL_LR_DIR = lambda s: LOCAL_DIR + 'DIV2K_valid_LR_bicubic/X{}/'.format(s)
EVAL_HR_DIR = LOCAL_DIR + 'DIV2K_valid_HR/'
NUM_CHANNELS = 3
def update_argparser(parser):
datasets.update_argparser(parser)
parser.add_argument(
'--scale', help='Scale for image super-resolution', default=2, type=int)
parser.add_argument(
'--lr-patch-size',
help='Number of pixels in height or width of LR patches',
default=48,
type=int)
parser.set_defaults(
num_channels=NUM_CHANNELS,
train_batch_size=16,
eval_batch_size=1,
shuffle_buffer_size=800,
)
def _extract(mode, params):
lr_dir = {
tf.estimator.ModeKeys.TRAIN: TRAIN_LR_DIR(params.scale),
tf.estimator.ModeKeys.EVAL: EVAL_LR_DIR(params.scale),
}[mode]
#lr_dir = os.path.expanduser(lr_dir)
hr_dir = {
tf.estimator.ModeKeys.TRAIN: TRAIN_HR_DIR,
tf.estimator.ModeKeys.EVAL: EVAL_HR_DIR,
}[mode]
def list_files(d):
files = sorted(os.listdir(d))
files = [os.path.join(d, f) for f in files]
return files
lr_files = list_files(lr_dir)
hr_files = list_files(hr_dir)
dataset = tf.data.Dataset.from_tensor_slices((lr_files, hr_files))
def _read_image(lr_file, hr_file):
lr_image = tf.image.decode_png(tf.read_file(lr_file), channels=NUM_CHANNELS)
hr_image = tf.image.decode_png(tf.read_file(hr_file), channels=NUM_CHANNELS)
return lr_image, hr_image
dataset = dataset.map(
_read_image,
num_parallel_calls=params.num_data_threads,
)
dataset = dataset.cache()
return dataset
def _transform(dataset, mode, params):
if mode == tf.estimator.ModeKeys.TRAIN:
dataset = dataset.shuffle(params.shuffle_buffer_size)
dataset = dataset.repeat()
def _preprocess(lr, hr):
if mode == tf.estimator.ModeKeys.TRAIN:
lr_shape = tf.shape(lr)
lr_up = tf.random_uniform(
shape=[],
minval=0,
maxval=lr_shape[0] - params.lr_patch_size,
dtype=tf.int32)
lr_left = tf.random_uniform(
shape=[],
minval=0,
maxval=lr_shape[1] - params.lr_patch_size,
dtype=tf.int32)
lr = tf.slice(lr, [lr_up, lr_left, 0],
[params.lr_patch_size, params.lr_patch_size, -1])
hr_up = lr_up * params.scale
hr_left = lr_left * params.scale
hr_patch_size = params.lr_patch_size * params.scale
hr = tf.slice(hr, [hr_up, hr_left, 0], [hr_patch_size, hr_patch_size, -1])
def _to_be_or_not_to_be(values, fn):
def _to_be():
return [fn(v) for v in values]
def _not_to_be():
return values
pred = tf.less(
tf.random_uniform(shape=[], minval=0., maxval=1., dtype=tf.float32),
0.5)
values = tf.cond(pred, _to_be, _not_to_be)
return values
lr, hr = _to_be_or_not_to_be([lr, hr], tf.image.flip_left_right)
lr, hr = _to_be_or_not_to_be([lr, hr], tf.image.flip_up_down)
lr, hr = _to_be_or_not_to_be([lr, hr], tf.image.rot90)
lr = tf.image.convert_image_dtype(lr, tf.float32)
hr = tf.image.convert_image_dtype(hr, tf.float32)
return {'source': lr}, {'target': hr}
dataset = dataset.map(
_preprocess,
num_parallel_calls=params.num_data_threads,
)
batch_size = {
tf.estimator.ModeKeys.TRAIN: params.train_batch_size,
tf.estimator.ModeKeys.EVAL: params.eval_batch_size,
}[mode]
drop_remainder = {
tf.estimator.ModeKeys.TRAIN: True,
tf.estimator.ModeKeys.EVAL: False,
}[mode]
dataset = dataset.batch(batch_size, drop_remainder=drop_remainder)
return dataset
input_fn = lambda mode, params: (
datasets.input_fn_tplt(mode, params, extract=_extract, transform=_transform))
def predict_input_fn():
input_tensor = tf.placeholder(
dtype=tf.float32, shape=[None, None, None, 3], name='input_tensor')
features = {'source': input_tensor}
return tf.estimator.export.ServingInputReceiver(
features=features,
receiver_tensors={
tf.saved_model.signature_constants.PREDICT_INPUTS: input_tensor
})
def test_saved_model():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model-dir', help='GCS location to load exported model', required=True)
parser.add_argument(
'--input-dir', help='GCS location to load input images', required=True)
parser.add_argument(
'--output-dir', help='GCS location to load output images', required=True)
parser.add_argument(
'--ensemble',
help='Whether to ensemble with 8x rotation and flip',
default=False,
action='store_true')
args = parser.parse_args()
with tf.Session(graph=tf.Graph()) as sess:
metagraph_def = tf.saved_model.loader.load(
sess, [tf.saved_model.tag_constants.SERVING], args.model_dir)
signature_def = metagraph_def.signature_def[
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
input_tensor = sess.graph.get_tensor_by_name(
signature_def.inputs['inputs'].name)
output_tensor = sess.graph.get_tensor_by_name(
signature_def.outputs['output'].name)
if not os.path.isdir(args.output_dir):
os.mkdir(args.output_dir)
for input_file in os.listdir(args.input_dir):
print(input_file)
output_file = os.path.join(args.output_dir, input_file)
input_file = os.path.join(args.input_dir, input_file)
input_image = np.asarray(Image.open(input_file))
def forward_images(images):
images = images.astype(np.float32) / 255.0
images = output_tensor.eval(feed_dict={input_tensor: images})
return images
if args.ensemble:
def flip(image):
images = [image]
images.append(image[::-1, :, :])
images.append(image[:, ::-1, :])
images.append(image[::-1, ::-1, :])
images = np.stack(images)
return images
def mean_of_flipped(images):
image = (images[0] + images[1, ::-1, :, :] + images[2, :, ::-1, :] +
images[3, ::-1, ::-1, :]) * 0.25
return image
rotate = lambda images: np.swapaxes(images, 1, 2)
input_images = flip(input_image)
output_image1 = mean_of_flipped(forward_images(input_images))
output_image2 = mean_of_flipped(
rotate(forward_images(rotate(input_images))))
output_image = (output_image1 + output_image2) * 0.5
else:
input_images = np.expand_dims(input_image, axis=0)
output_images = forward_images(input_images)
output_image = output_images[0]
output_image = np.around(output_image * 255.0).astype(np.uint8)
output_image = Image.fromarray(output_image, 'RGB')
output_image.save(output_file)
if __name__ == '__main__':
test_saved_model()
| [
"tensorflow.cond",
"os.mkdir",
"argparse.ArgumentParser",
"tensorflow.estimator.export.ServingInputReceiver",
"datasets.input_fn_tplt",
"numpy.around",
"os.path.join",
"tensorflow.placeholder",
"numpy.swapaxes",
"numpy.stack",
"datasets.update_argparser",
"tensorflow.Graph",
"tensorflow.read... | [((859, 892), 'datasets.update_argparser', 'datasets.update_argparser', (['parser'], {}), '(parser)\n', (884, 892), False, 'import datasets\n'), ((1817, 1873), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(lr_files, hr_files)'], {}), '((lr_files, hr_files))\n', (1851, 1873), True, 'import tensorflow as tf\n'), ((4347, 4423), 'datasets.input_fn_tplt', 'datasets.input_fn_tplt', (['mode', 'params'], {'extract': '_extract', 'transform': '_transform'}), '(mode, params, extract=_extract, transform=_transform)\n', (4369, 4423), False, 'import datasets\n'), ((4468, 4555), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None, None, None, 3]', 'name': '"""input_tensor"""'}), "(dtype=tf.float32, shape=[None, None, None, 3], name=\n 'input_tensor')\n", (4482, 4555), True, 'import tensorflow as tf\n'), ((4605, 4756), 'tensorflow.estimator.export.ServingInputReceiver', 'tf.estimator.export.ServingInputReceiver', ([], {'features': 'features', 'receiver_tensors': '{tf.saved_model.signature_constants.PREDICT_INPUTS: input_tensor}'}), '(features=features,\n receiver_tensors={tf.saved_model.signature_constants.PREDICT_INPUTS:\n input_tensor})\n', (4645, 4756), True, 'import tensorflow as tf\n'), ((4817, 4842), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4840, 4842), False, 'import argparse\n'), ((3722, 3766), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['lr', 'tf.float32'], {}), '(lr, tf.float32)\n', (3750, 3766), True, 'import tensorflow as tf\n'), ((3776, 3820), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['hr', 'tf.float32'], {}), '(hr, tf.float32)\n', (3804, 3820), True, 'import tensorflow as tf\n'), ((5396, 5488), 'tensorflow.saved_model.loader.load', 'tf.saved_model.loader.load', (['sess', '[tf.saved_model.tag_constants.SERVING]', 'args.model_dir'], {}), '(sess, [tf.saved_model.tag_constants.SERVING],\n args.model_dir)\n', (5422, 5488), True, 'import tensorflow as tf\n'), ((5910, 5936), 'os.listdir', 'os.listdir', (['args.input_dir'], {}), '(args.input_dir)\n', (5920, 5936), False, 'import os\n'), ((1660, 1673), 'os.listdir', 'os.listdir', (['d'], {}), '(d)\n', (1670, 1673), False, 'import os\n'), ((1688, 1706), 'os.path.join', 'os.path.join', (['d', 'f'], {}), '(d, f)\n', (1700, 1706), False, 'import os\n'), ((1947, 1968), 'tensorflow.read_file', 'tf.read_file', (['lr_file'], {}), '(lr_file)\n', (1959, 1968), True, 'import tensorflow as tf\n'), ((2028, 2049), 'tensorflow.read_file', 'tf.read_file', (['hr_file'], {}), '(hr_file)\n', (2040, 2049), True, 'import tensorflow as tf\n'), ((2509, 2521), 'tensorflow.shape', 'tf.shape', (['lr'], {}), '(lr)\n', (2517, 2521), True, 'import tensorflow as tf\n'), ((2536, 2637), 'tensorflow.random_uniform', 'tf.random_uniform', ([], {'shape': '[]', 'minval': '(0)', 'maxval': '(lr_shape[0] - params.lr_patch_size)', 'dtype': 'tf.int32'}), '(shape=[], minval=0, maxval=lr_shape[0] - params.\n lr_patch_size, dtype=tf.int32)\n', (2553, 2637), True, 'import tensorflow as tf\n'), ((2690, 2791), 'tensorflow.random_uniform', 'tf.random_uniform', ([], {'shape': '[]', 'minval': '(0)', 'maxval': '(lr_shape[1] - params.lr_patch_size)', 'dtype': 'tf.int32'}), '(shape=[], minval=0, maxval=lr_shape[1] - params.\n lr_patch_size, dtype=tf.int32)\n', (2707, 2791), True, 'import tensorflow as tf\n'), ((2839, 2927), 'tensorflow.slice', 'tf.slice', (['lr', '[lr_up, lr_left, 0]', '[params.lr_patch_size, params.lr_patch_size, -1]'], {}), '(lr, [lr_up, lr_left, 0], [params.lr_patch_size, params.\n lr_patch_size, -1])\n', (2847, 2927), True, 'import tensorflow as tf\n'), ((3086, 3155), 'tensorflow.slice', 'tf.slice', (['hr', '[hr_up, hr_left, 0]', '[hr_patch_size, hr_patch_size, -1]'], {}), '(hr, [hr_up, hr_left, 0], [hr_patch_size, hr_patch_size, -1])\n', (3094, 3155), True, 'import tensorflow as tf\n'), ((5824, 5854), 'os.path.isdir', 'os.path.isdir', (['args.output_dir'], {}), '(args.output_dir)\n', (5837, 5854), False, 'import os\n'), ((5862, 5887), 'os.mkdir', 'os.mkdir', (['args.output_dir'], {}), '(args.output_dir)\n', (5870, 5887), False, 'import os\n'), ((5982, 6023), 'os.path.join', 'os.path.join', (['args.output_dir', 'input_file'], {}), '(args.output_dir, input_file)\n', (5994, 6023), False, 'import os\n'), ((6043, 6083), 'os.path.join', 'os.path.join', (['args.input_dir', 'input_file'], {}), '(args.input_dir, input_file)\n', (6055, 6083), False, 'import os\n'), ((7365, 7401), 'PIL.Image.fromarray', 'Image.fromarray', (['output_image', '"""RGB"""'], {}), "(output_image, 'RGB')\n", (7380, 7401), False, 'from PIL import Image\n'), ((3455, 3488), 'tensorflow.cond', 'tf.cond', (['pred', '_to_be', '_not_to_be'], {}), '(pred, _to_be, _not_to_be)\n', (3462, 3488), True, 'import tensorflow as tf\n'), ((5355, 5365), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (5363, 5365), True, 'import tensorflow as tf\n'), ((6115, 6137), 'PIL.Image.open', 'Image.open', (['input_file'], {}), '(input_file)\n', (6125, 6137), False, 'from PIL import Image\n'), ((7145, 7180), 'numpy.expand_dims', 'np.expand_dims', (['input_image'], {'axis': '(0)'}), '(input_image, axis=0)\n', (7159, 7180), True, 'import numpy as np\n'), ((3352, 3421), 'tensorflow.random_uniform', 'tf.random_uniform', ([], {'shape': '[]', 'minval': '(0.0)', 'maxval': '(1.0)', 'dtype': 'tf.float32'}), '(shape=[], minval=0.0, maxval=1.0, dtype=tf.float32)\n', (3369, 3421), True, 'import tensorflow as tf\n'), ((6546, 6562), 'numpy.stack', 'np.stack', (['images'], {}), '(images)\n', (6554, 6562), True, 'import numpy as np\n'), ((6812, 6837), 'numpy.swapaxes', 'np.swapaxes', (['images', '(1)', '(2)'], {}), '(images, 1, 2)\n', (6823, 6837), True, 'import numpy as np\n'), ((7295, 7326), 'numpy.around', 'np.around', (['(output_image * 255.0)'], {}), '(output_image * 255.0)\n', (7304, 7326), True, 'import numpy as np\n')] |
"""
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This file is part of the collaboration with Universitat Oberta de Catalunya (UOC) on
Multi-Source Team Orienteering Problem (MSTOP).
The objective of the project is to develop an efficient algorithm to solve this extension
of the classic team orienteering problem, in which the vehicles / paths may start from
several different sources.
Author: <NAME>, Ph.D., Eng.
Contact: <EMAIL>
Date: January 2022
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
import os
import math
import itertools
import collections
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import node
import edge
# Single-source benchmarks
single_source_benchmarks = tuple(os.listdir("../tests/single/"))
# Multi-source benchmarks
multi_source_benchmarks = tuple(os.listdir("../tests/multi/"))
# Default colors for nodes, source nodes, adn depot
NODES_COLOR = '#FDDD71'
SOURCES_COLORS = ('#8FDDF4', '#8DD631', '#A5A5A5', '#DB35EF', '#8153AB')
DEPOT_COLOR = '#F78181'
def euclidean (inode, jnode):
"""
The euclidean distance between two nodes.
:param inode: First node.
:param jnode: Second node.
"""
return math.sqrt((inode.x - jnode.x)**2 + (inode.y - jnode.y)**2)
class Problem:
"""
An instance of this class represents a single-source Team Orienteering
Problem.
It may also be translated according to different rules in a multi-source
version of it.
"""
def __init__(self, name, n_nodes, n_vehicles, Tmax, sources, nodes, depot):
"""
Initialise.
:param name: The name of the problem
:param n_nodes: The number of nodes.
:param n_vehicles: The number of vehicles / paths.
:param Tmax: The maximum distance vehicles can run / time budget for paths.
:param sources: The source nodes.
:param nodes: The nodes to visit.
:param depot: The depot.
:attr dists: The matrix of distances between nodes.
:attr positions: A dictionary of nodes positions.
:attr edges: The edges connecting the nodes.
"""
self.name = name
self.n_nodes = n_nodes
self.n_vehicles = n_vehicles
self.Tmax = Tmax
self.sources = sources
self.nodes = nodes
self.depot = depot
# Initialise edges list and nodes positions
edges = collections.deque()
dists = np.zeros((n_nodes, n_nodes))
# Calculate the matrix of distances and instantiate the edges
# and define nodes colors and positions
source_id = 0
for node1, node2 in itertools.permutations(itertools.chain(sources, nodes, (depot,)), 2):
# Calculate the edge cost
id1, id2 = node1.id, node2.id
cost = euclidean(node1, node2)
# Compile the oriented matrix of distances
dists[id1, id2] = cost
# Create the edge
if not node1.isdepot and not node2.issource:
edges.append(edge.Edge(node1, node2, cost))
self.dists = dists
self.edges = edges
def __hash__(self):
return id(self)
def __repr__(self):
return f"""
Problem {self.name}
---------------------------------------------
nodes: {self.n_nodes}
vehicles: {self.n_vehicles}
Tmax: {self.Tmax}
multi-source: {self.multi_source}
---------------------------------------------
"""
@property
def multi_source (self):
""" A property that says if the problem is multi-source or not. """
return len(self.sources) > 1
def iternodes (self):
""" A method to iterate over all the nodes of the problem (i.e., sources, customers, depot)"""
return itertools.chain(self.sources, self.nodes, (self.depot,))
def plot (problem, *, routes=tuple(), mapping=None, figsize=(6,4), title=None):
"""
This method is used to plot a problem using a graph representation that
makes it easy-to-read.
:param figsize: The size of the plot.
:param title: The title of the plot.
:param routes: The eventual routes found.
"""
plt.figure(figsize=figsize)
if title:
plt.title(title)
# Build the graph of nodes
colors, pos = [], {}
G = nx.DiGraph()
source_id = 0
for node in problem.iternodes():
# Compile the graph
pos[node.id] = (node.x, node.y)
G.add_node(node.id)
# Define nodes colors
if node.issource:
colors.append(SOURCES_COLORS[source_id])
source_id += 1
elif node.isdepot:
colors.append(DEPOT_COLOR)
else:
if mapping is None:
colors.append(NODES_COLOR)
else:
for i in range(len(problem.sources)):
if mapping[i, node.id] == 1:
colors.append(SOURCES_COLORS[i] + "60")
break
# Save the routes
edges = []
for r in routes:
# NOTE: Nodes of the route are supposed to always be in the order in which
# they are stored inside the deque.
nodes = tuple(r.nodes)
edges.extend([(r.source.id, nodes[0].id), (nodes[-1].id, r.depot.id)])
for n1, n2 in zip(nodes[:-1], nodes[1:]):
edges.append((n1.id, n2.id))
nx.draw(G, pos=pos, node_color=colors, edgelist=edges, with_labels=True, node_size=100, font_size=6, font_weight="bold")
plt.show()
def export (problem, path):
"""
This method exports the problem into a text file.
:param problem: The problem to export.
:param path: The directory where the problem will be saved.
"""
with open(path + problem.name, 'w') as file:
# Export number of nodes, number of vehicles, and Tmax
file.write(f"n {problem.n_nodes}\n")
file.write(f"m {problem.n_vehicles}\n")
file.write(f"tmax {problem.Tmax}\n")
# For each node
for node in problem.iternodes():
# Export coordinates and reveneu
file.write(f"{round(node.x, 1)}\t{round(node.y, 1)}\t{node.revenue}\t")
# If multi-source import indicator of source nodes and number of
# vehicles starting from each source.
if problem.multi_source:
file.write(f"{int(node.issource)}\t{node.vehicles}")
file.write('\n')
def read_single_source (filename, path="../tests/single/"):
"""
This method is used to read a single-source Team Orienteering Problem
from a file and returns a standard Problem instance.
:param filename: The name of the file to read.
:param path: The path where the file is.
:return: The problem instance.
"""
with open(path + filename, 'r') as file:
# Read problem parameters
n_nodes = int(next(file).replace('\n','').split(' ')[1])
n_vehicles = int(next(file).replace('\n','').split(' ')[1])
Tmax = float(next(file).replace('\n','').split(' ')[1])
# Initialise nodes lists
sources, nodes, depot = [], [], None
# Read nodes characteristics
for i, line in enumerate(file):
node_info = line.split('\t')
if i == 0:
# Add a source node
sources.append(node.Node(i, float(node_info[0]), float(node_info[1]), int(node_info[2]),
issource=True, vehicles=n_vehicles))
elif i == n_nodes - 1:
# Add the depot
depot = node.Node(i, float(node_info[0]), float(node_info[1]), int(node_info[2]), isdepot=True)
else:
# Add a node to visit
nodes.append(node.Node(i, float(node_info[0]), float(node_info[1]), int(node_info[2])))
# Instantiate and return the problem
return Problem(filename, n_nodes, n_vehicles, Tmax, tuple(sources), tuple(nodes), depot)
def read_multi_source (filename, path="../tests/multi/"):
"""
This method is used to read a multi-source Team Orienteering Problem
from a file and returns a standard Problem instance.
:param filename: The name of the file to read.
:param path: The path where the file is.
:return: The problem instance.
"""
with open(path + filename, 'r') as file:
# Read problem parameters
n_nodes = int(next(file).replace('\n','').split(' ')[1])
n_vehicles = int(next(file).replace('\n','').split(' ')[1])
Tmax = float(next(file).replace('\n','').split(' ')[1])
# Initialise nodes lists
sources, nodes, depot = [], [], None
# Read nodes characteristics
for i, line in enumerate(file):
node_info = line.split('\t')
# If the node is depot
if i == n_nodes - 1:
depot = node.Node(i, float(node_info[0]), float(node_info[1]), int(node_info[2]), isdepot=True)
continue
# If the node is source
if node_info[3] == '1':
sources.append(node.Node(i, float(node_info[0]), float(node_info[1]), int(node_info[2]),
issource=True, vehicles=int(node_info[4])))
else:
# Add a node to visit
nodes.append(node.Node(i, float(node_info[0]), float(node_info[1]), int(node_info[2])))
# Instantiate and return the problem
return Problem(filename, n_nodes, n_vehicles, Tmax, tuple(sources), tuple(nodes), depot)
def merge (*problems, name="pmulti.txt", non_negative=False):
"""
This method merges many TOP problem instances to create a
multi-source TOP problem instance.
None of the starting instances in modified in any way.
The merging is made translating the problems (starting from
the second) on the first one, so that the depots perfectly match.
:param problems: The problem to merge.
:param name: The name given to the new multi-source problem.
:param non_negative: If True avoid negative coordinates (it does not have any real impact).
:return: A new multi-source problem instance.
"""
# Init name and parameters of the new problem
n_sources = sum(len(p.sources) for p in problems)
n_nodes = sum(p.n_nodes for p in problems) - (len(problems) - 1)
n_vehicles = sum(p.n_vehicles for p in problems)
Tmax = max(p.Tmax for p in problems)
# Define the depot
depot = problems[0].depot.__copy__()
# Initialise the ids
# NOTE: We want them to be increasing from the sources to the depot (just convention)
depot.id = n_nodes - 1
source_id = 0
node_id = n_sources
# Find the sources and the nodes
sources, nodes = [], []
for i, problem in enumerate(problems):
# Calculate of how much the current problem must be
# translated so that its depot match with that of the
# other problems.
dx = depot.x - problem.depot.x
dy = depot.y - problem.depot.y
# For each node...
for node in problem.iternodes():
# If the node is the depot there is no need to consider it
if node.isdepot:
continue
# Make a copy of the node
node = node.__copy__()
# Translate the node
node.x += dx
node.y += dy
# If the node is a source append it to sources
if node.issource:
sources.append(node)
node.id = source_id
source_id += 1
continue
# If the node is not a source append it to normal nodes
nodes.append(node)
node.id = node_id
node_id += 1
# Eventually translate the graph to avoid negative coordinates
if non_negative:
minX, minY = float("inf"), float("inf")
for node in itertools.chain(sources, nodes, (depot,)):
if node.x < minX:
minX = node.x
if node.y < minY:
minY = node.y
dx, dy = max(0, 0 - minX), max(0, 0 - minY)
if dx > 0 or dy > 0:
for node in itertools.chain(sources, nodes, (depot,)):
node.x += dx
node.y += dy
return Problem(name, n_nodes, n_vehicles, Tmax, tuple(sources), tuple(nodes), depot)
if __name__ == '__main__':
problem1 = read_single_source(path_to_benchmarks_single + "p1.2.a.txt")
problem2 = read_single_source(path_to_benchmarks_single + "p2.2.a.txt")
#problem3 = read_single_source(path_to_benchmarks_single + "p3.2.a.txt")
problem1.plot()
problem2.plot()
p = merge(problem1, problem2, name="test.txt")
p.plot()
p.export("./")
p = read_multi_source("test.txt")
p.plot()
| [
"matplotlib.pyplot.title",
"edge.Edge",
"matplotlib.pyplot.show",
"math.sqrt",
"numpy.zeros",
"node.__copy__",
"matplotlib.pyplot.figure",
"networkx.draw",
"networkx.DiGraph",
"itertools.chain",
"os.listdir",
"collections.deque"
] | [((820, 850), 'os.listdir', 'os.listdir', (['"""../tests/single/"""'], {}), "('../tests/single/')\n", (830, 850), False, 'import os\n'), ((911, 940), 'os.listdir', 'os.listdir', (['"""../tests/multi/"""'], {}), "('../tests/multi/')\n", (921, 940), False, 'import os\n'), ((1284, 1346), 'math.sqrt', 'math.sqrt', (['((inode.x - jnode.x) ** 2 + (inode.y - jnode.y) ** 2)'], {}), '((inode.x - jnode.x) ** 2 + (inode.y - jnode.y) ** 2)\n', (1293, 1346), False, 'import math\n'), ((4274, 4301), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (4284, 4301), True, 'import matplotlib.pyplot as plt\n'), ((4406, 4418), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (4416, 4418), True, 'import networkx as nx\n'), ((5470, 5594), 'networkx.draw', 'nx.draw', (['G'], {'pos': 'pos', 'node_color': 'colors', 'edgelist': 'edges', 'with_labels': '(True)', 'node_size': '(100)', 'font_size': '(6)', 'font_weight': '"""bold"""'}), "(G, pos=pos, node_color=colors, edgelist=edges, with_labels=True,\n node_size=100, font_size=6, font_weight='bold')\n", (5477, 5594), True, 'import networkx as nx\n'), ((5595, 5605), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5603, 5605), True, 'import matplotlib.pyplot as plt\n'), ((2482, 2501), 'collections.deque', 'collections.deque', ([], {}), '()\n', (2499, 2501), False, 'import collections\n'), ((2518, 2546), 'numpy.zeros', 'np.zeros', (['(n_nodes, n_nodes)'], {}), '((n_nodes, n_nodes))\n', (2526, 2546), True, 'import numpy as np\n'), ((3881, 3937), 'itertools.chain', 'itertools.chain', (['self.sources', 'self.nodes', '(self.depot,)'], {}), '(self.sources, self.nodes, (self.depot,))\n', (3896, 3937), False, 'import itertools\n'), ((4324, 4340), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (4333, 4340), True, 'import matplotlib.pyplot as plt\n'), ((11977, 12018), 'itertools.chain', 'itertools.chain', (['sources', 'nodes', '(depot,)'], {}), '(sources, nodes, (depot,))\n', (11992, 12018), False, 'import itertools\n'), ((2738, 2779), 'itertools.chain', 'itertools.chain', (['sources', 'nodes', '(depot,)'], {}), '(sources, nodes, (depot,))\n', (2753, 2779), False, 'import itertools\n'), ((11350, 11365), 'node.__copy__', 'node.__copy__', ([], {}), '()\n', (11363, 11365), False, 'import node\n'), ((12245, 12286), 'itertools.chain', 'itertools.chain', (['sources', 'nodes', '(depot,)'], {}), '(sources, nodes, (depot,))\n', (12260, 12286), False, 'import itertools\n'), ((3114, 3143), 'edge.Edge', 'edge.Edge', (['node1', 'node2', 'cost'], {}), '(node1, node2, cost)\n', (3123, 3143), False, 'import edge\n')] |
import numpy as np
import time
def cal_inverse(A):
return np.linalg.inv(A)
if __name__ == "__main__":
N = 5000 # size
A = np.random.rand(N, N)
time_start = time.time()
A_inv = cal_inverse(A)
time_end = time.time()
elapsed_time = time_end - time_start
print('{:.5f} sec'.format(elapsed_time)) | [
"numpy.random.rand",
"numpy.linalg.inv",
"time.time"
] | [((63, 79), 'numpy.linalg.inv', 'np.linalg.inv', (['A'], {}), '(A)\n', (76, 79), True, 'import numpy as np\n'), ((137, 157), 'numpy.random.rand', 'np.random.rand', (['N', 'N'], {}), '(N, N)\n', (151, 157), True, 'import numpy as np\n'), ((175, 186), 'time.time', 'time.time', ([], {}), '()\n', (184, 186), False, 'import time\n'), ((229, 240), 'time.time', 'time.time', ([], {}), '()\n', (238, 240), False, 'import time\n')] |
"""
Deep Learning - Assignment #2:
- Snake Game - Deep Reinforcement Learning
Integrated Master of Computer Science and Engineering
NOVA School of Science and Technology,
NOVA University of Lisbon - 2020/2021
Authors:
- <NAME> (<EMAIL>)
- <NAME> (<EMAIL>)
Instructor(s):
- <NAME> (<EMAIL>)
- <NAME> (<EMAIL>)
Snake Agent Module for the the Project
"""
# Import the Libraries and Packages
# From the TensorFlow.Keras.Optimizers Module,
# import the Stochastic Gradient Descent (SGD) Optimizer
from tensorflow.keras.optimizers import SGD
# From the TensorFlow.Keras.Optimizers Module,
# import the Adam Optimizer
from tensorflow.keras.optimizers import Adam
# From the TensorFlow.Keras.Optimizers Module,
# import the RMSProp Optimizer
from tensorflow.keras.optimizers import RMSprop
# From the TensorFlow.Keras.Optimizers Module,
# import the ADAMax Optimizer
from tensorflow.keras.optimizers import Adamax
# From the TensorFlow.Keras.Optimizers Module,
# import the ADAGrad Optimizer
from tensorflow.keras.optimizers import Adagrad
# From the TensorFlow.Keras.Optimizers Module,
# import the ADADelta Optimizer
from tensorflow.keras.optimizers import Adadelta
# From the TensorFlow.Keras.Losses Module,
# import the Mean Squared Error (MSE) Optimizer
from tensorflow.keras.losses import MSE
# From the NumPy Library, import the Max function
from numpy import max
# From the NumPy Library, import the Array function
from numpy import array
# From the Game.Others.Parameters_Arguments Module,
# import the size of the Batch
from game.others.parameters_arguments import BATCH_SIZE
# Class for the Snake Agent's Q-Learning Trainer
class SnakeAgentQLearningTrainer:
# Constructor of the Snake Agent's Q-Learning Trainer
def __init__(self, learning_rate, gamma_discount_factor):
# Initialise the CNN (Convolutional Neural Network) Model for the current observations, as None
self.snake_cnn_model_for_current_observations = None
# Initialise the CNN (Convolutional Neural Network) Model for the target observations, as None
self.snake_cnn_model_for_target_observations = None
# Set the Learning Rate for the Snake Agent's Q-Learning Trainer
self.learning_rate = learning_rate
# Set the Gamma value (Discount Factor) for the Snake Agent's Q-Learning Trainer
self.gamma_discount_factor = gamma_discount_factor
# Set the Optimiser for the Snake Agent's Q-Learning Trainer
self.optimizer = Adam(learning_rate=learning_rate)
# Initialise the final CNN (Convolutional Neural Network) Models for the current and target observations
def initialise_cnn_models(self, snake_cnn_model_for_current_observations,
snake_cnn_model_for_target_observations):
# Set the final CNN (Convolutional Neural Network) Model for the current observations
self.snake_cnn_model_for_current_observations = \
snake_cnn_model_for_current_observations
# Set the final CNN (Convolutional Neural Network) Model for the target observations
self.snake_cnn_model_for_target_observations = \
snake_cnn_model_for_target_observations
# Compute the final CNN (Convolutional Neural Network) Model for the current observations
self.snake_cnn_model_for_current_observations.compute_model()
# Compute the final CNN (Convolutional Neural Network) Model for the target observations
self.snake_cnn_model_for_target_observations.compute_model()
# Function for the Snake Agent's Q-Learning Trainer train a Step
def train_step(self, observations, actions, rewards, new_observations, dones):
# If the dones is a tuple of values
if isinstance(dones, tuple):
# Retrieve the number of Experience's Examples
num_experience_examples = len(dones)
# Convert the observations for an NumPy Array,
# as the Current States of the Snake Agent
current_states = array(observations)
# Convert the new observations for an NumPy Array,
# as the New States of the Snake Agent
new_states = array(new_observations)
# If the dones is a single value
else:
# Retrieve the number of Experience's Examples, as 1
num_experience_examples = 1
# Convert the observations for an NumPy Array,
# as the Current States of the Snake Agent
current_states = array([observations])
# Convert the new observations for an NumPy Array,
# as the New States of the Snake Agent
new_states = array([new_observations])
# Predict the Q-Values, according to the Current States of the Snake Agent
q_values_list_for_current_states = \
self.snake_cnn_model_for_current_observations.model.predict(current_states)
# Predict the Q-Values, according to the New States of the Snake Agent
q_values_list_for_new_states = \
self.snake_cnn_model_for_target_observations.model.predict(new_states)
# Initialise the current Observations as the xs (Features) of the Data
xs_features_data = []
# Initialise the Q-Values for the new Observations as the ys (Targets) of the Data
ys_targets_data = []
# For each Experience's Examples
for index_experience_example in range(num_experience_examples):
# If the dones is a list of values
if isinstance(dones, tuple):
# Retrieve the Observation for the current Experience's Example
observation = observations[index_experience_example]
# Retrieve the Reward for the current Experience's Example
reward = rewards[index_experience_example]
# Retrieve the Action for the current Experience's Example
action = actions[index_experience_example]
# Retrieve the Done Flag for the current Experience's Example
done = dones[index_experience_example]
# Set the current Q-Values, from the list of the current Q-Values
current_q_values = q_values_list_for_current_states[index_experience_example]
# If the dones is a single value
else:
# Retrieve the Observation for the current Experience's Example
observation = observations
# Retrieve the Reward for the current Experience's Example
reward = rewards
# Retrieve the Action for the current Experience's Example
action = actions
# Retrieve the Done Flag for the current Experience's Example
done = dones
# Set the current Q-Values, from the list of the current Q-Values
current_q_values = q_values_list_for_current_states
# If the Train is not done for the current Experience's Example
if not done:
# Compute the new Maximum of Q-Value, for the Future actions,
# summing it to the current reward
max_future_action_q_value = self.compute_q_new_value_update_rule(
reward, self.gamma_discount_factor,
q_values_list_for_new_states[index_experience_example])
# If the Train is already done for the current Experience's Example
else:
# Set the current reward as the Maximum of Q-Values,
# since there are no more actions to take
max_future_action_q_value = reward
# If the dones is a list of values
if isinstance(dones, tuple):
# Reshape the current Q-Values, for each Action
current_q_values = current_q_values.reshape(-1)
# Set the current Q-Values, for each Action,
# according to the Maximum of Q-Values, summed to the current rewards
current_q_values[(action + 1)] = max_future_action_q_value
# If the dones is a single value
else:
# Set the current Q-Values, for each action,
# according to the Maximum of Q-Values, summed to the current rewards
current_q_values[0] = max_future_action_q_value
# Append the current Observation to the xs (Features) of the Data
xs_features_data.append(observation)
# Append the current Q-Values to the ys (Targets) of the Data
ys_targets_data.append(current_q_values)
# Fit the CNN (Convolutional Neural Network) Model,
# according to the current Observations, i.e., the xs (Features) of the Data
# and to the Q-Values of the new Observations, i.e., the ys (Targets) of the Data
self.snake_cnn_model_for_current_observations\
.model.fit(array(xs_features_data), array(ys_targets_data),
batch_size=BATCH_SIZE, verbose=0, shuffle=True)
# Static function to compute the new Q-Value, for the given Q-Values of the new observations,
# following the update rule for the reward
@staticmethod
def compute_q_new_value_update_rule(reward, gamma_discount_factor, q_values_new_observation):
return reward + gamma_discount_factor * max(q_values_new_observation)
| [
"numpy.array",
"numpy.max",
"tensorflow.keras.optimizers.Adam"
] | [((2493, 2526), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (2497, 2526), False, 'from tensorflow.keras.optimizers import Adam\n'), ((4020, 4039), 'numpy.array', 'array', (['observations'], {}), '(observations)\n', (4025, 4039), False, 'from numpy import array\n'), ((4180, 4203), 'numpy.array', 'array', (['new_observations'], {}), '(new_observations)\n', (4185, 4203), False, 'from numpy import array\n'), ((4510, 4531), 'numpy.array', 'array', (['[observations]'], {}), '([observations])\n', (4515, 4531), False, 'from numpy import array\n'), ((4672, 4697), 'numpy.array', 'array', (['[new_observations]'], {}), '([new_observations])\n', (4677, 4697), False, 'from numpy import array\n'), ((8960, 8983), 'numpy.array', 'array', (['xs_features_data'], {}), '(xs_features_data)\n', (8965, 8983), False, 'from numpy import array\n'), ((8985, 9007), 'numpy.array', 'array', (['ys_targets_data'], {}), '(ys_targets_data)\n', (8990, 9007), False, 'from numpy import array\n'), ((9390, 9419), 'numpy.max', 'max', (['q_values_new_observation'], {}), '(q_values_new_observation)\n', (9393, 9419), False, 'from numpy import max\n')] |
#!/usr/bin/env python3
import fire
import json
import os
import numpy as np
import tensorflow as tf
import model
import sample
import encoder
raw_text = """Kommentar: My son does not know his way around the house. He really needs his face transforming.
Kommentar: Got it , we have to transform human faces with transformers to provide guns to students.
Kommentar: Rob, have you tried using GPT2 to generate peer review comments?
Kommentar: Maybe feed it papers and reviews and then feed it the paper you're working on. Get a fresh perspective on your subject. Maybe AI can solve the AI safety problem by pure chance.
Kommentar: These fake comments were actually rather entertaining.
Kommentar: !!!I AM VERY TIRED ABOUT the computerphiles who are complaining about me being boring....
8:49 "we want to know the fur..."
Kommentar: And "fur" appears.
9:43 "I feel my brain is in a box just like your brain in my box. :)" 9:58 "Rob, do you have a robot friend, please?"
Just wait 'till some clueless news reporter quotes these in their piece
Kommentar: "Are Machine Learning models gaining consciousness? Some models are already showing first signs, and are attempting to befriend or even threaten their makers"
Kommentar: How many times do we have to say to you that you are funny?
Kommentar: aaaaaand demonitized
Kommentar: I think the real takeaway from this video is: Rob should get his cat involved more, and at the very least show us their little face! TL;DR: CAT CAT CAT
Kommentar: I didn't know I needed <NAME> speaking French in my life until I had it.
Kommentar: This is the funniest shit I’ve seen in a while, so glad I watched this!
Kommentar: Plot twist: every comment on this video was generated by GPT-2.
Kommentar: Will this break the format?
Kommentar: Comment: Bobby" DROP TABLE Students;
Kommentar: Showing off the power of Sublime
Kommentar: Now I want to see an AI try to write authentic youtube comments from watching the video.
Kommentar: This is like advanced Mad Libs.
Kommentar: I find this very interesting. Many smart "Transhumanist" are the most important thing to do. Australia is a very important part of the 20th century average. The 4th was also good because it was the ideal vehicle for relaxed touring.
The Internet: Don't read the comments.
Kommentar: Rob: reads the comments
Kommentar: """
raw_text = """kommentar: Ricke: okay apparently the multiplayer aspect need some improvement hahah
kommentar: Hunter1046: I cant even spawn a army
kommentar: Aicy: no I'm playing someonein a room
kommentar: Rynus: i hope for server and rooms like in lwg in future
kommentar: Hunter1046: Only upgrade gold, base and shoot a arrow....
kommentar: Ricke: lmao yea
kommentar: Aicy: they made a knight
kommentar: Hunter1046: This reminds me of that space game some guy told us to test
kommentar: Ricke: thanks for helping me test my game!
kommentar: Hunter1046: Np
kommentar: Aicy: https://rick.ee/sidor/rickard-mrtensson-resume.pdfnice resume
kommentar: Ricke: lmaoadd me on linkedin :heart:
kommentar: Aicy: lul I just did
kommentar: Hunter1046: Why can't we just make our own lwg
kommentar: Ricke: i did that for 5 days in rust then i decided that i would be happier if i just killed myself then and there
kommentar: Rynus: svarade Hunter1046find a team for first lol
kommentar: Hunter1046: Damn....I only know highschool level code
kommentar: Aicy: is it possible to play vertiball with a friend over the internet?
kommentar: Ricke: lmao im working on that as we speak
kommentar: Aicy: thx
kommentar: Rynus: gonna post game's link on some servers
kommentar: Ricke: ohh dont do it yetpeople will see a half finished game and decide its trashbut thats super nice of you! really
kommentar: Aicy: eat pant
kommentar: Rynus: looks ez to make best balance build order in this gamenot too many ways tbhbtwhotkeys
kommentar: Ricke: i actually have hotkeysqweasd for player 1, uiojkl for player 2
kommentar: Rynus: uiojkl brrrrrrrrr
kommentar: Aicy: are there unit dances?I want an eat pant dance
kommentar: Rynus: lolcan i win? xd
kommentar: Ricke: lmaonot yet sorry
kommentar: Rynus: uhi unlocked veteran twiceand can again
kommentar: Ricke: oh shit
kommentar: Rynus: and knightlul
kommentar: Ricke: are you playing single or multiplayer
kommentar: Rynus: i clicked ladderidk
kommentar: Ricke: oh
kommentar: Rynus: "Back"
kommentar:
"""
f = open("training_data\ggg.txt", "r")
string = f.read()
print(string[0: 100])
def interact_model(
model_name='124M',
seed=None,
nsamples=4,
batch_size=1,
length=150,
temperature=1,
top_k=0,
top_p=1,
models_dir='models',
):
"""
Interactively run the model
:model_name=124M : String, which model to use
:seed=None : Integer seed for random number generators, fix seed to reproduce
results
:nsamples=1 : Number of samples to return total
:batch_size=1 : Number of batches (only affects speed/memory). Must divide nsamples.
:length=None : Number of tokens in generated text, if None (default), is
determined by model hyperparameters
:temperature=1 : Float value controlling randomness in boltzmann
distribution. Lower temperature results in less random completions. As the
temperature approaches zero, the model will become deterministic and
repetitive. Higher temperature results in more random completions.
:top_k=0 : Integer value controlling diversity. 1 means only 1 word is
considered for each step (token), resulting in deterministic completions,
while 40 means 40 words are considered at each step. 0 (default) is a
special setting meaning no restrictions. 40 generally is a good value.
:models_dir : path to parent folder containing model subfolders
(i.e. contains the <model_name> folder)
"""
models_dir = os.path.expanduser(os.path.expandvars(models_dir))
if batch_size is None:
batch_size = 1
assert nsamples % batch_size == 0
enc = encoder.get_encoder(model_name, models_dir)
hparams = model.default_hparams()
with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
if length is None:
length = hparams.n_ctx // 2
elif length > hparams.n_ctx:
raise ValueError(
"Can't get samples longer than window size: %s" % hparams.n_ctx)
with tf.Session(graph=tf.Graph()) as sess:
context = tf.placeholder(tf.int32, [batch_size, None])
np.random.seed(seed)
tf.set_random_seed(seed)
output = sample.sample_sequence(
hparams=hparams, length=length,
context=context,
batch_size=batch_size,
temperature=temperature, top_k=top_k, top_p=top_p
)
saver = tf.train.Saver()
ckpt = tf.train.latest_checkpoint(os.path.join(models_dir, model_name))
saver.restore(sess, ckpt)
context_tokens = enc.encode(raw_text)
generated = 0
for _ in range(nsamples // batch_size):
out = sess.run(output, feed_dict={
context: [context_tokens for _ in range(batch_size)]
})[:, len(context_tokens):]
for i in range(batch_size):
generated += 1
text = enc.decode(out[i])
print("=" * 40 + " SAMPLE " +
str(generated) + " " + "=" * 40)
print(text)
print("=" * 80)
if __name__ == '__main__':
fire.Fire(interact_model)
| [
"encoder.get_encoder",
"json.load",
"numpy.random.seed",
"fire.Fire",
"tensorflow.train.Saver",
"tensorflow.set_random_seed",
"os.path.expandvars",
"model.default_hparams",
"tensorflow.placeholder",
"sample.sample_sequence",
"tensorflow.Graph",
"os.path.join"
] | [((5963, 6006), 'encoder.get_encoder', 'encoder.get_encoder', (['model_name', 'models_dir'], {}), '(model_name, models_dir)\n', (5982, 6006), False, 'import encoder\n'), ((6021, 6044), 'model.default_hparams', 'model.default_hparams', ([], {}), '()\n', (6042, 6044), False, 'import model\n'), ((7478, 7503), 'fire.Fire', 'fire.Fire', (['interact_model'], {}), '(interact_model)\n', (7487, 7503), False, 'import fire\n'), ((5832, 5862), 'os.path.expandvars', 'os.path.expandvars', (['models_dir'], {}), '(models_dir)\n', (5850, 5862), False, 'import os\n'), ((6430, 6474), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[batch_size, None]'], {}), '(tf.int32, [batch_size, None])\n', (6444, 6474), True, 'import tensorflow as tf\n'), ((6483, 6503), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (6497, 6503), True, 'import numpy as np\n'), ((6512, 6536), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (6530, 6536), True, 'import tensorflow as tf\n'), ((6554, 6703), 'sample.sample_sequence', 'sample.sample_sequence', ([], {'hparams': 'hparams', 'length': 'length', 'context': 'context', 'batch_size': 'batch_size', 'temperature': 'temperature', 'top_k': 'top_k', 'top_p': 'top_p'}), '(hparams=hparams, length=length, context=context,\n batch_size=batch_size, temperature=temperature, top_k=top_k, top_p=top_p)\n', (6576, 6703), False, 'import sample\n'), ((6775, 6791), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (6789, 6791), True, 'import tensorflow as tf\n'), ((6059, 6111), 'os.path.join', 'os.path.join', (['models_dir', 'model_name', '"""hparams.json"""'], {}), "(models_dir, model_name, 'hparams.json')\n", (6071, 6111), False, 'import os\n'), ((6154, 6166), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6163, 6166), False, 'import json\n'), ((6834, 6870), 'os.path.join', 'os.path.join', (['models_dir', 'model_name'], {}), '(models_dir, model_name)\n', (6846, 6870), False, 'import os\n'), ((6391, 6401), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (6399, 6401), True, 'import tensorflow as tf\n')] |
import numpy as np
import scipy as sp
from scipy.optimize import curve_fit
from scipy.stats import chi2
import pickle, sys
from glob import glob
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import healpy as hp
import scipy.stats as st
from francis.time_integrated_scripts import steady_sensitivity_fits
from francis import utils
utils.initialize_mpl_style()
f_path = utils.get_francis_path()
py_version = int(sys.version[0])
trials_base = '/data/user/apizzuto/fast_response_skylab/alert_event_followup/analysis_trials/'
palette = ['#7fc97f', '#beaed4', '#fdc086', '#ffff99', '#386cb0', '#f0027f']
skymap_files = sorted(glob('/data/ana/realtime/alert_catalog_v2/fits_files/Run*.fits.gz'))
l_ind = skymap_files[0].find("Run")
r_ind = skymap_files[0].find("_nside")
def erfunc(x, a, b):
return 0.5 + 0.5*sp.special.erf(a*x + b)
def chi2cdf(x,df1,loc,scale):
func = chi2.cdf(x,df1,loc,scale)
return func
def fsigmoid(x, a, b):
return 1.0 / (1.0 + np.exp(-a*(x-b)))
def incomplete_gamma(x, a, scale):
return sp.special.gammaincc( scale*x, a)
def poissoncdf(x, mu, loc):
func = sp.stats.poisson.cdf(x, mu, loc)
return func
def find_nearest_idx(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def n_to_flux(N, index, delta_t, smear=True):
"""Convert number of events to a flux
Args:
N (float): signal strength in number of injected events
index (int): Alert event index
delta_t (float): Time window (1000. or 864000.)
smear (bool, default=True): Correct for systematics in skymap treatment
Returns:
float: Flux in default units returned by skylab injector
"""
smear_str = 'smeared/' if smear else 'norm_prob/'
fs = glob(trials_base + 'sensitivity/{}index_{}_*_time_{:.1f}.pkl'.format(smear_str, index, delta_t))
if py_version == 3:
with open(fs[0], 'rb') as f:
signal_trials = pickle.load(f, encoding='latin1')
else:
with open(fs[0], 'r') as f:
signal_trials = pickle.load(f)
fl_per_one = np.mean(np.array(signal_trials['flux']) / np.array(signal_trials['mean_ninj']))
return fl_per_one * N
def dec_of_map(index, smear=True):
"""Find the location of the best-fit for an alert
Args:
index (int): Alert event index
smear (bool, default=True): Correct for systematics in skymap treatment
Returns:
(float, float): RA, Dec of best-fit location from millipede scan
"""
smear_str = 'smeared/' if smear else 'norm_prob/'
fs = glob(trials_base + 'sensitivity/{}index_{}_*_time_{:.1f}.pkl'.format(smear_str, index, 1000.0))
if py_version == 3:
with open(fs[0], 'rb') as f:
signal_trials = pickle.load(f, encoding='latin1')
else:
with open(fs[0], 'r') as f:
signal_trials = pickle.load(f)
ra, dec = np.median(signal_trials['ra']), np.median(signal_trials['dec'])
return ra, dec
def background_distribution(index, delta_t, smear=True):
"""Obtain background TS distribution for an alert event
Args:
index (int): Alert event index
delta_t (float): Time window (1000. or 864000.)
smear (bool, default=True): Correct for systematics in skymap treatment
Returns:
array: list of TS values
"""
smear_str = 'smeared/' if smear else 'norm_prob/'
fs = glob(trials_base + 'bg/{}index_{}_*_time_{:.1f}.pkl'.format(smear_str, index, delta_t))
if py_version == 3:
with open(fs[0], 'rb') as f:
bg_trials = pickle.load(f, encoding='latin1')
else:
with open(fs[0], 'r') as f:
bg_trials = pickle.load(f)
return bg_trials['ts_prior']
def signal_distribution(index, delta_t, ns, smear=True):
"""Obtain signal TS distribution for an alert event
Args:
index (int): Alert event index
delta_t (float): Time window (1000. or 864000.)
ns (float): Injected signal strength in number of events
smear (bool, default=True): Correct for systematics in skymap treatment
Returns:
dict: Signal trials
"""
smear_str = 'smeared/' if smear else 'norm_prob/'
fs = glob(trials_base + 'sensitivity/{}index_{}_*_time_{:.1f}.pkl'.format(smear_str, index, delta_t))
if py_version == 3:
with open(fs[0], 'rb') as f:
signal_trials = pickle.load(f, encoding='latin1')
else:
with open(fs[0], 'r') as f:
signal_trials = pickle.load(f)
ret = {}
msk = np.array(signal_trials['mean_ninj']) == ns
for k, v in signal_trials.iteritems():
ret[k] = np.array(v)[msk]
return ret
def pass_vs_inj(index, delta_t, threshold = 0.5, in_ns = True, with_err = True, trim=-1, smear=True):
"""Calculate the efficiency curve for fraction of TS greater
than a threshold TS as a function of signal strength
Args:
index (int): Alert event index
delta_t (float): Time window (1000. or 864000.)
threshold (float, default=0.5): Value of CDF of background to compare
against (0.5 means compare against median)
in_ns (bool, default=True): Return in ns or in flux
with_err (bool, default=True): Include an estimation of the error
on the passing fraction
trim (int, default=-1): Trim off the final few points from the curve,
this sometimes improves the fits
smear (bool, default=True): Correct for systematics in skymap treatment
Returns:
(array, array, array): Arrays containing the flux, passing-fractions,
and errors
"""
smear_str = 'smeared/' if smear else 'norm_prob/'
fs = glob(trials_base + 'bg/{}index_{}_*_time_{:.1f}.pkl'.format(smear_str, index, delta_t))
if py_version == 3:
with open(fs[0], 'rb') as f:
bg_trials = pickle.load(f, encoding='latin1')
else:
with open(fs[0], 'r') as f:
bg_trials = pickle.load(f)
bg_trials = bg_trials['ts_prior']
fs = glob(trials_base + 'sensitivity/{}index_{}_*_time_{:.1f}.pkl'.format(smear_str, index, delta_t))
if py_version == 3:
with open(fs[0], 'rb') as f:
signal_trials = pickle.load(f, encoding='latin1')
else:
with open(fs[0], 'r') as f:
signal_trials = pickle.load(f)
bg_thresh = np.percentile(bg_trials, threshold * 100.)
signal_fluxes, signal_indices = np.unique(signal_trials['mean_ninj'], return_index=True)
signal_indices = np.append(signal_indices, len(signal_trials['ts_prior']))
if trim != -1 and trim < 0:
signal_indices = signal_indices[:trim]
signal_fluxes = signal_fluxes[:trim]
elif trim > 0:
signal_indices = signal_indices[:trim + 1]
signal_fluxes = signal_fluxes[:trim]
passing = np.array([np.count_nonzero(signal_trials['ts_prior'][li:ri] > bg_thresh) / float(ri - li) for li, ri in zip(signal_indices[:-1], signal_indices[1:])])
if not with_err:
return signal_fluxes, passing
else:
errs = np.array([np.sqrt(p*(1.-p) / float(ri - li)) for p, li, ri in zip(passing, signal_indices[:-1], signal_indices[1:])])
ngen = np.array([float(ri - li) for li, ri in zip(signal_indices[:-1], signal_indices[1:])])
ntrig = passing * ngen
bound_case_pass = (ntrig + (1./3.)) / (ngen + (2./3.))
bound_case_sigma = np.sqrt(bound_case_pass*(1. - bound_case_pass) / (ngen + 2))
errs = np.maximum(errs, bound_case_sigma)
return signal_fluxes, passing, errs
def sensitivity_curve(index, delta_t, threshold = 0.5, in_ns = True, with_err = True, trim=-1, ax = None,
p0 = None, fontsize = 16, conf_lev = 0.9, smear=True, legend=True, text=True):
"""Calculate the sensitivity and plot it for an alert event
Args:
index (int): Alert event index
delta_t (float): Time window (1000. or 864000.)
threshold (float, default=0.5): Value of CDF of background to compare
against (0.5 means compare against median)
in_ns (bool, default=True): Return in ns or in flux
with_err (bool, default=True): Include an estimation of the error
on the passing fraction
trim (int, default=-1): Trim off the final few points from the curve,
this sometimes improves the fits
ax (axes, default=None): Use already made axes instance
p0 (array-like, default=None): initial params for sensitivity fit
conf_lev (float, default=0.9): Confidence level for the sensitivity
smear (bool, default=True): Correct for systematics in skymap treatment
"""
signal_fluxes, passing, errs = pass_vs_inj(index, delta_t, threshold=threshold, in_ns=in_ns, with_err=with_err, trim=trim, smear=smear)
fits, plist = [], []
for ffunc in [erfunc, incomplete_gamma, fsigmoid]:
try:
fits.append(sensitivity_fit(signal_fluxes,
passing, errs, ffunc, p0=p0, conf_lev=conf_lev))
plist.append(fits[-1]['pval'])
except:
pass
#print("at least one fit failed")
#Find best fit of the three, make it look different in plot
plist = np.array(plist)
if len(plist) > 0:
best_fit_ind= np.argmax(plist)
if fits[best_fit_ind]['chi2'] / fits[best_fit_ind]['dof'] < 5:
fits[best_fit_ind]['ls'] = '-'
if ax==None:
fig, ax = plt.subplots()
for fit_dict in fits:
ax.plot(fit_dict['xfit'], fit_dict['yfit'],
label = r'{}: $\chi^2$ = {:.2f}, d.o.f. = {}'.format(fit_dict['name'], fit_dict['chi2'], fit_dict['dof']),
ls = fit_dict['ls'])
if fit_dict['ls'] == '-':
ax.axhline(conf_lev, color = palette[-1], linewidth = 0.3, linestyle = '-.')
ax.axvline(fit_dict['sens'], color = palette[-1], linewidth = 0.3, linestyle = '-.')
if text:
ax.text(6, 0.5, r'Sens. = {:.2f}'.format(fit_dict['sens']))
if fits[best_fit_ind]['chi2'] / fits[best_fit_ind]['dof'] > 5:
inter = np.interp(conf_lev, passing, signal_fluxes)
ax.axhline(conf_lev, color = palette[-1], linewidth = 0.3, linestyle = '-.')
ax.axvline(inter, color = palette[-1], linewidth = 0.3, linestyle = '-.')
ax.errorbar(signal_fluxes, passing, yerr=errs, capsize = 3, linestyle='', marker = 's', markersize = 2)
if legend:
ax.legend(loc=4, fontsize = fontsize)
def calc_sensitivity(index, delta_t, threshold = 0.5, in_ns = True, with_err = True, trim=-1,
conf_lev = 0.9, p0=None, smear=True):
"""Calculate the sensitivity for an alert event
Args:
index (int): Alert event index
delta_t (float): Time window (1000. or 864000.)
threshold (float, default=0.5): Value of CDF of background to compare
against (0.5 means compare against median)
in_ns (bool, default=True): Return in ns or in flux
with_err (bool, default=True): Include an estimation of the error
on the passing fraction
trim (int, default=-1): Trim off the final few points from the curve,
this sometimes improves the fits
p0 (array-like, default=None): initial params for sensitivity fit
conf_lev (float, default=0.9): Confidence level for the sensitivity
smear (bool, default=True): Correct for systematics in skymap treatment
Returns:
dict: Sensitivity dictionary
"""
signal_fluxes, passing, errs = pass_vs_inj(index, delta_t, threshold=threshold, in_ns=in_ns, with_err=with_err, trim=trim, smear=smear)
fits, plist = [], []
for ffunc in [erfunc, incomplete_gamma, fsigmoid]:
try:
fits.append(sensitivity_fit(signal_fluxes,
passing, errs, ffunc, p0=p0, conf_lev=conf_lev))
plist.append(fits[-1]['pval'])
except:
pass
#Find best fit of the three, make it look different in plot
plist = np.array(plist)
if len(plist) > 0:
best_fit_ind= np.argmax(plist)
if fits[best_fit_ind]['chi2'] / fits[best_fit_ind]['dof'] < 5:
return fits[best_fit_ind]
inter = np.interp(conf_lev, passing, signal_fluxes)
return {'sens': inter, 'name': 'linear_interpolation'}
def sensitivity_fit(signal_fluxes, passing, errs, fit_func, p0 = None, conf_lev = 0.9):
"""Fit passing fraction with an analytic function
Args:
signal_fluxes (array): signal strengths
passing (array): Passing fractions
errs (array): Errors on passing fractions
fit_func (function): Function to fit to data
p0 (array-like, default=None): initial params for sensitivity fit
conf_lev (float, default=0.9): Confidence level for the sensitivity
Returns:
dict: Sensitivity dictionary
"""
try:
name = fit_func.__name__
name = name.replace("_", " ")
except:
name = 'fit'
signal_scale_fac = np.max(signal_fluxes)
signal_fls = signal_fluxes / signal_scale_fac
popt, pcov = curve_fit(fit_func, signal_fls, passing, sigma = errs, p0 = p0, maxfev=4000)
#print popt
fit_points = fit_func(signal_fls, *popt)
chi2 = np.sum((fit_points - passing)**2. / errs**2.)
dof = len(fit_points) - len(popt)
xfit = np.linspace(np.min(signal_fls) - 0.5/signal_scale_fac, np.max(signal_fls), 5000)
yfit = fit_func(xfit, *popt)
pval = sp.stats.chi2.sf(chi2, dof)
sens = xfit[find_nearest_idx(yfit, conf_lev)]*signal_scale_fac
return {'popt': popt, 'pcov': pcov, 'chi2': chi2,
'dof': dof, 'xfit': xfit*signal_scale_fac, 'yfit': yfit,
'name': name, 'pval':pval, 'ls':'--', 'sens': sens}
def pvals_for_signal(index, delta_t, ns = 1, sigma_units = False, smear=True):
"""Calculate pre trial p-values for a certain injected signal strength
for a certain alert event
Args:
index (int): Alert event index
delta_t (float): Time window (1000. or 864000.)
ns (int, default=1): Injected signal strength in number of events
sigma_units (bool, default=False): Return number of sigma significance
smear (bool, default=True): Correct for systematics in skymap treatment
Returns:
array: List of p-values or significances
"""
smear_str = 'smeared/' if smear else 'norm_prob/'
fs = glob(trials_base + 'bg/{}index_{}_*_time_{:.1f}.pkl'.format(smear_str, index, delta_t))
if py_version == 3:
with open(fs[0], 'rb') as f:
bg_trials = pickle.load(f, encoding='latin1')
else:
with open(fs[0], 'r') as f:
bg_trials = pickle.load(f)
bg_trials = bg_trials['ts_prior']
fs = glob(trials_base + 'sensitivity/{}index_{}_*_time_{:.1f}.pkl'.format(smear_str, index, delta_t))
if py_version == 3:
with open(fs[0], 'rb') as f:
signal_trials = pickle.load(f, encoding='latin1')
else:
with open(fs[0], 'r') as f:
signal_trials = pickle.load(f)
pvals = [100. - sp.stats.percentileofscore(bg_trials, ts, kind='strict') for ts in signal_trials['ts_prior']]
pvals = np.array(pvals)*0.01
pvals = np.where(pvals==0, 1e-6, pvals)
if not sigma_units:
return pvals
else:
return sp.stats.norm.ppf(1. - (pvals / 2.))
def find_all_sens(delta_t, smear=True, with_disc=True, disc_conf=0.5,
disc_thresh=1.-0.0013, verbose=True):
"""Find the sensitivity for all alerts for a certain analysis
time-window
Args:
delta_t (float): Time window (1000. or 864000.)
smear (bool, default=True): Correct for systematics in skymap treatment
with_disc (bool, default=True): Also calculate discovery potential
disc_conf (bool, default=0.5): Confidence level for discovery potential
disc_thresh (float): p-value threshold for discovery potential
(default is the p-value that corresponds to 3 sigma). e.g. 1.-0.0013 is
the fraction of the BG CDF contained below the 3 sigma threshold
Returns:
array: List of sensitivities
"""
# num_alerts fixed to the length of the v2 alert catalog
num_alerts = 275
sensitivities = np.zeros(num_alerts)
if with_disc:
discoveries = np.zeros(num_alerts)
for ind in range(num_alerts):
if verbose:
print(ind, end=' ')
try:
sens = n_to_flux(calc_sensitivity(ind, delta_t, smear=smear)['sens'],
ind, delta_t, smear=smear)
sensitivities[ind] = sens
if with_disc:
disc = n_to_flux(calc_sensitivity(ind, delta_t, threshold=disc_thresh,
conf_lev=disc_conf, smear=smear)['sens'], ind, delta_t, smear=smear)
discoveries[ind] = disc
if sens*delta_t*1e6 < 1e-1:
if verbose:
print("Problem calculating sensitivity for alert index {}".format(ind))
except (IOError, ValueError, IndexError) as err:
print(err)
if with_disc:
return sensitivities, discoveries
else:
return sensitivities
def ns_fits_contours(index, delta_t, smear=True, levs = [5., 25., 50., 75., 95.]):
"""Calculate the ns_bias plot contours
Args:
index (int): Alert event index
delta_t (float): Time window (1000. or 864000.)
smear (bool, default=True): Correct for systematics in skymap treatment
levs (arr): percentiles used in the ns contours
Returns:
(array, array): List of strengths and corresponding bias contours
"""
smear_str = 'smeared/' if smear else 'norm_prob/'
levs = np.array(levs)
fs = glob(trials_base + 'fits/{}index_{}_*_time_{:.1f}.pkl'.format(smear_str, index, delta_t))
if py_version == 3:
with open(fs[0], 'rb') as f:
signal_trials = pickle.load(f, encoding='latin1')
else:
with open(fs[0], 'r') as f:
signal_trials = pickle.load(f)
true_inj = np.array(signal_trials['true_ns'])
ns_fit = np.array(signal_trials['ns_prior'])
ninjs = np.unique(true_inj)
if max(ninjs) < 10:
print('Index {} has max {}'.format(index, max(ninjs)))
contours = np.stack([np.percentile(ns_fit[true_inj==ninj], levs) for ninj in ninjs])
return ninjs, contours.T
def ns_fits_contours_plot(index, delta_t, smear=True, levs=[5., 25., 50., 75., 95.],
show=False, col='navy green', custom_label = 'Median', ax=None,
xlabel=True, ylabel=True, legend=True):
"""Calculate the ns_bias plot contours and plot them
Args:
index (int): Alert event index
delta_t (float): Time window (1000. or 864000.)
smear (bool, default=True): Correct for systematics in skymap treatment
levs (arr): percentiles used in the ns contours
"""
if ax is None:
fig, ax = plt.subplots()
ninj, fits = ns_fits_contours(index, delta_t, smear=smear, levs=levs)
ax.plot(ninj, fits[2], label = custom_label, color = sns.xkcd_rgb[col])
ax.fill_between(ninj, fits[0], fits[-1], alpha=0.3,
label='Central 90\%', color = sns.xkcd_rgb[col], lw=0)
ax.fill_between(ninj, fits[1], fits[-2], alpha=0.5,
label='Central 50\%', color = sns.xkcd_rgb[col], lw=0)
expectation = ninj
exp_col = 'dark grey'
ax.plot(ninj, expectation, ls = '--', color = sns.xkcd_rgb[exp_col])
if legend:
ax.legend(loc=4)
if xlabel:
ax.set_xlabel(r'$n_{\mathrm{inj}}$')
ax.set_xlim(0., max(ninj))
ax.set_ylim(0., 80)
if ylabel:
ax.set_ylabel(r'$\hat{n}_{s}$')
if show:
plt.show()
def fitting_bias_summary(delta_t, sigs=[2., 5., 10.], smear=True, containment=50.):
"""Calculate the ns_bias plot contours for all alert events
Args:
delta_t (float): Time window (1000. or 864000.)
sigs (arr): Injected signal strength values for comparison
smear (bool, default=True): Correct for systematics in skymap treatment
containment (float): Central percentage for the fit contours
Returns:
(array, array): List of bias and variance of contours
"""
bias = {sig: [] for sig in sigs}; spread = {sig: [] for sig in sigs};
levs = [50.-containment / 2., 50., 50.+containment / 2.]
for ind in range(276):
try:
ninjs, contours = ns_fits_contours(ind, delta_t, smear=smear, levs=levs)
except:
for sig in sigs:
bias[sig].append(0.0)
spread[sig].append(0.0)
continue
for sig in sigs:
try:
n_ind = np.argwhere(ninjs == sig)[0][0]
bias[sig].append(contours[1][n_ind])
spread[sig].append(contours[-1][n_ind] - contours[0][n_ind])
except:
bias[sig].append(0.0)
spread[sig].append(0.0)
return bias, spread
def background(index, delta_t, smear=True):
"""Load the background distributions for an event
Args:
index (int): Alert event index
delta_t (float): Time window (1000. or 864000.)
smear (bool, default=True): Correct for systematics in skymap treatment
Returns:
dict: background trials
"""
smear_str = 'smeared/' if smear else 'norm_prob/'
fs = glob(trials_base + 'bg/{}index_{}_*_time_{:.1f}.pkl'.format(smear_str, index, delta_t))
if py_version == 3:
with open(fs[0], 'rb') as f:
bg_trials = pickle.load(f, encoding='latin1')
else:
with open(fs[0], 'r') as f:
bg_trials = pickle.load(f)
return bg_trials
def plot_zoom_from_map(skymap, ind, cmap=None, draw_contour=True, ax=None,
col_label= r'$\log_{10}$(prob.)'):
"""Plot skymap of an alert event
Args:
skymap (arr): healpy array
index (int): Alert event index
"""
s, header = hp.read_map(skymap_files[ind], h=True, verbose=False)
header = {name: val for name, val in header}
nside = hp.get_nside(s)
area = np.count_nonzero(s < 64.2) * hp.nside2pixarea(nside) * 180.**2. / (np.pi**2.)
reso *= int(np.sqrt(area))
reso = np.max([reso, 1.])
original_LLH = s
ra = np.radians(header['RA'])
dec = np.radians(header['DEC'])
title = skymap_files[ind][l_ind:r_ind].replace('Run', 'Run ').replace('_', ', Event ')
if cmap is None:
pdf_palette = sns.color_palette("Blues", 500)
cmap = mpl.colors.ListedColormap(pdf_palette)
if np.count_nonzero(skymap > 0.0) > 1:
max_color = np.max(skymap)
min_color = 0.
else:
max_color = -1.8 #5 #max(skymap)
min_color = -5. #0.
hp.gnomview(skymap, rot=(np.degrees(ra), np.degrees(dec), 0),
cmap=cmap,
max=max_color,
min=min_color,
reso=reso,
title=title,
notext=True,
cbar=False
#unit=r""
)
plt.plot(4.95/3.*reso*np.radians([-1, 1, 1, -1, -1]), 4.95/3.*reso*np.radians([1, 1, -1, -1, 1]), color="k", ls="-", lw=3)
hp.graticule(verbose=False)
steady_sensitivity_fits.plot_labels(dec, ra, reso)
con_nside = 256 if area < 5. else 128
if draw_contour:
contours = steady_sensitivity_fits.plot_contours(None, original_LLH, levels=[22.2, 64.2], nside = con_nside)
for contour in np.array(contours).T:
hp.projplot(contour[0],contour[1],linewidth=1.5,c='k')
steady_sensitivity_fits.plot_color_bar(cmap = cmap, labels = [min_color, max_color], col_label = col_label)
def background_hotspot_map(ind, delta_t, smear=True):
"""Show where background trials end up fitting hotspots
Args:
ind (int): Alert event index
delta_t (float): Time window (1000. or 864000.)
smear (bool, default=True): Correct for systematics in skymap treatment
"""
bg = background(ind, delta_t, smear=smear)
msk = np.array(bg['ts_prior']) != 0.
ra, dec = np.array(bg['ra'])[msk], np.array(bg['dec'])[msk]
theta = np.pi/2. - dec
inds = hp.ang2pix(256, theta, ra)
ind_counts = np.unique(inds, return_counts=True)
reco_hist = np.zeros(hp.nside2npix(256))
reco_hist[ind_counts[0]] = ind_counts[1]
plot_zoom_from_map(reco_hist, ind, draw_contour=False, col_label='Counts')
def get_true_fit(ind, delta_t, smear=True):
"""Get unblinded results for an alert followup
Args:
ind (int): Alert event index
delta_t (float): Time window (1000. or 864000.)
smear (bool, default=True): Correct for systematics in skymap treatment
"""
name = skymap_files[ind][skymap_files[ind].find('Run')+3:skymap_files[ind].find('_nside')]
run = name[:name.find('_')]
event = name[name.find('_') + 1:]
smear_str = 'smeared/' if smear else 'norm_prob/'
res_f = glob(trials_base + 'results/{}*{}*{}_time_window_{:.2e}_results.pickle'.format(smear_str, run, event, delta_t))
res = np.load(res_f[0])
return res
def get_true_pval(ind, delta_t, smear=True):
"""Get unblinded p-value for an alert followup
Args:
ind (int): Alert event index
delta_t (float): Time window (1000. or 864000.)
smear (bool, default=True): Correct for systematics in skymap treatment
Returns:
float: pre trial p-value for alert followup
"""
result = get_true_fit(ind, delta_t, smear=smear)
bg_trials = background(ind, delta_t, smear=smear)
bg_ts = bg_trials['ts_prior']
p_val = float(np.count_nonzero(bg_ts >= result['ts'])) / float(len(bg_ts))
return p_val
def get_true_pval_list(delta_t, smear=True):
"""Get unblinded p-value for all alert followups of a certain time window
Args:
delta_t (float): Time window (1000. or 864000.)
smear (bool, default=True): Correct for systematics in skymap treatment
Returns:
arr: pre trial p-values for alert followup
"""
# Certain skymaps are excluded from the analysis, for reasons
# on each one, see the official analysis wiki under "Excluded Events"
# https://wiki.icecube.wisc.edu/index.php/Fast_Response_Analysis/FRA_archival_stacking_v2_alerts
problem_inds = [198, 95, 92] if delta_t == 1000. else [198]
pval_list = []
for ind in range(len(skymap_files)):
if ind in problem_inds:
pval_list.append(1.0)
else:
pval = get_true_pval(ind, delta_t, smear=smear)
pval_list.append(pval)
return np.array(pval_list)
def get_binomial_p_value_truth(delta_t, smear=True):
"""Calculate the unblinded pre-trial binomial p-value
Args:
delta_t (float): Time window (1000. or 864000.)
smear (bool, default=True): Correct for systematics in skymap treatment
Returns:
float: pre trial binomial p-value for alert followup
"""
print("CAUTION: ONLY RUN THIS IF YOU HAVE PERMISSION TO LOOK AT REAL DATA")
obs_p = 1.
plist = get_true_pval_list(delta_t, smear=smear)
plist = sorted(plist)
for i, p in enumerate(plist):
tmp = st.binom_test(i+1, len(plist), p, alternative='greater')
if tmp < obs_p and tmp != 0.0:
if tmp == 0.0:
print("WHY DOES THE BINOMIAL VALUE EQUAL ZERO")
obs_p = tmp
return obs_p
| [
"numpy.load",
"numpy.sum",
"numpy.maximum",
"scipy.stats.poisson.cdf",
"numpy.argmax",
"numpy.abs",
"healpy.graticule",
"healpy.nside2pixarea",
"francis.time_integrated_scripts.steady_sensitivity_fits.plot_color_bar",
"pickle.load",
"healpy.ang2pix",
"numpy.exp",
"glob.glob",
"numpy.interp... | [((379, 407), 'francis.utils.initialize_mpl_style', 'utils.initialize_mpl_style', ([], {}), '()\n', (405, 407), False, 'from francis import utils\n'), ((417, 441), 'francis.utils.get_francis_path', 'utils.get_francis_path', ([], {}), '()\n', (439, 441), False, 'from francis import utils\n'), ((673, 740), 'glob.glob', 'glob', (['"""/data/ana/realtime/alert_catalog_v2/fits_files/Run*.fits.gz"""'], {}), "('/data/ana/realtime/alert_catalog_v2/fits_files/Run*.fits.gz')\n", (677, 740), False, 'from glob import glob\n'), ((926, 954), 'scipy.stats.chi2.cdf', 'chi2.cdf', (['x', 'df1', 'loc', 'scale'], {}), '(x, df1, loc, scale)\n', (934, 954), False, 'from scipy.stats import chi2\n'), ((1081, 1115), 'scipy.special.gammaincc', 'sp.special.gammaincc', (['(scale * x)', 'a'], {}), '(scale * x, a)\n', (1101, 1115), True, 'import scipy as sp\n'), ((1155, 1187), 'scipy.stats.poisson.cdf', 'sp.stats.poisson.cdf', (['x', 'mu', 'loc'], {}), '(x, mu, loc)\n', (1175, 1187), True, 'import scipy as sp\n'), ((1253, 1270), 'numpy.asarray', 'np.asarray', (['array'], {}), '(array)\n', (1263, 1270), True, 'import numpy as np\n'), ((6434, 6477), 'numpy.percentile', 'np.percentile', (['bg_trials', '(threshold * 100.0)'], {}), '(bg_trials, threshold * 100.0)\n', (6447, 6477), True, 'import numpy as np\n'), ((6513, 6569), 'numpy.unique', 'np.unique', (["signal_trials['mean_ninj']"], {'return_index': '(True)'}), "(signal_trials['mean_ninj'], return_index=True)\n", (6522, 6569), True, 'import numpy as np\n'), ((9318, 9333), 'numpy.array', 'np.array', (['plist'], {}), '(plist)\n', (9326, 9333), True, 'import numpy as np\n'), ((12156, 12171), 'numpy.array', 'np.array', (['plist'], {}), '(plist)\n', (12164, 12171), True, 'import numpy as np\n'), ((12355, 12398), 'numpy.interp', 'np.interp', (['conf_lev', 'passing', 'signal_fluxes'], {}), '(conf_lev, passing, signal_fluxes)\n', (12364, 12398), True, 'import numpy as np\n'), ((13159, 13180), 'numpy.max', 'np.max', (['signal_fluxes'], {}), '(signal_fluxes)\n', (13165, 13180), True, 'import numpy as np\n'), ((13248, 13320), 'scipy.optimize.curve_fit', 'curve_fit', (['fit_func', 'signal_fls', 'passing'], {'sigma': 'errs', 'p0': 'p0', 'maxfev': '(4000)'}), '(fit_func, signal_fls, passing, sigma=errs, p0=p0, maxfev=4000)\n', (13257, 13320), False, 'from scipy.optimize import curve_fit\n'), ((13397, 13448), 'numpy.sum', 'np.sum', (['((fit_points - passing) ** 2.0 / errs ** 2.0)'], {}), '((fit_points - passing) ** 2.0 / errs ** 2.0)\n', (13403, 13448), True, 'import numpy as np\n'), ((13617, 13644), 'scipy.stats.chi2.sf', 'sp.stats.chi2.sf', (['chi2', 'dof'], {}), '(chi2, dof)\n', (13633, 13644), True, 'import scipy as sp\n'), ((15375, 15409), 'numpy.where', 'np.where', (['(pvals == 0)', '(1e-06)', 'pvals'], {}), '(pvals == 0, 1e-06, pvals)\n', (15383, 15409), True, 'import numpy as np\n'), ((16426, 16446), 'numpy.zeros', 'np.zeros', (['num_alerts'], {}), '(num_alerts)\n', (16434, 16446), True, 'import numpy as np\n'), ((17920, 17934), 'numpy.array', 'np.array', (['levs'], {}), '(levs)\n', (17928, 17934), True, 'import numpy as np\n'), ((18261, 18295), 'numpy.array', 'np.array', (["signal_trials['true_ns']"], {}), "(signal_trials['true_ns'])\n", (18269, 18295), True, 'import numpy as np\n'), ((18309, 18344), 'numpy.array', 'np.array', (["signal_trials['ns_prior']"], {}), "(signal_trials['ns_prior'])\n", (18317, 18344), True, 'import numpy as np\n'), ((18357, 18376), 'numpy.unique', 'np.unique', (['true_inj'], {}), '(true_inj)\n', (18366, 18376), True, 'import numpy as np\n'), ((22239, 22292), 'healpy.read_map', 'hp.read_map', (['skymap_files[ind]'], {'h': '(True)', 'verbose': '(False)'}), '(skymap_files[ind], h=True, verbose=False)\n', (22250, 22292), True, 'import healpy as hp\n'), ((22354, 22369), 'healpy.get_nside', 'hp.get_nside', (['s'], {}), '(s)\n', (22366, 22369), True, 'import healpy as hp\n'), ((22501, 22520), 'numpy.max', 'np.max', (['[reso, 1.0]'], {}), '([reso, 1.0])\n', (22507, 22520), True, 'import numpy as np\n'), ((22550, 22574), 'numpy.radians', 'np.radians', (["header['RA']"], {}), "(header['RA'])\n", (22560, 22574), True, 'import numpy as np\n'), ((22585, 22610), 'numpy.radians', 'np.radians', (["header['DEC']"], {}), "(header['DEC'])\n", (22595, 22610), True, 'import numpy as np\n'), ((23497, 23524), 'healpy.graticule', 'hp.graticule', ([], {'verbose': '(False)'}), '(verbose=False)\n', (23509, 23524), True, 'import healpy as hp\n'), ((23529, 23579), 'francis.time_integrated_scripts.steady_sensitivity_fits.plot_labels', 'steady_sensitivity_fits.plot_labels', (['dec', 'ra', 'reso'], {}), '(dec, ra, reso)\n', (23564, 23579), False, 'from francis.time_integrated_scripts import steady_sensitivity_fits\n'), ((23876, 23981), 'francis.time_integrated_scripts.steady_sensitivity_fits.plot_color_bar', 'steady_sensitivity_fits.plot_color_bar', ([], {'cmap': 'cmap', 'labels': '[min_color, max_color]', 'col_label': 'col_label'}), '(cmap=cmap, labels=[min_color,\n max_color], col_label=col_label)\n', (23914, 23981), False, 'from francis.time_integrated_scripts import steady_sensitivity_fits\n'), ((24487, 24513), 'healpy.ang2pix', 'hp.ang2pix', (['(256)', 'theta', 'ra'], {}), '(256, theta, ra)\n', (24497, 24513), True, 'import healpy as hp\n'), ((24531, 24566), 'numpy.unique', 'np.unique', (['inds'], {'return_counts': '(True)'}), '(inds, return_counts=True)\n', (24540, 24566), True, 'import numpy as np\n'), ((25382, 25399), 'numpy.load', 'np.load', (['res_f[0]'], {}), '(res_f[0])\n', (25389, 25399), True, 'import numpy as np\n'), ((26911, 26930), 'numpy.array', 'np.array', (['pval_list'], {}), '(pval_list)\n', (26919, 26930), True, 'import numpy as np\n'), ((2957, 2987), 'numpy.median', 'np.median', (["signal_trials['ra']"], {}), "(signal_trials['ra'])\n", (2966, 2987), True, 'import numpy as np\n'), ((2989, 3020), 'numpy.median', 'np.median', (["signal_trials['dec']"], {}), "(signal_trials['dec'])\n", (2998, 3020), True, 'import numpy as np\n'), ((4607, 4643), 'numpy.array', 'np.array', (["signal_trials['mean_ninj']"], {}), "(signal_trials['mean_ninj'])\n", (4615, 4643), True, 'import numpy as np\n'), ((7477, 7540), 'numpy.sqrt', 'np.sqrt', (['(bound_case_pass * (1.0 - bound_case_pass) / (ngen + 2))'], {}), '(bound_case_pass * (1.0 - bound_case_pass) / (ngen + 2))\n', (7484, 7540), True, 'import numpy as np\n'), ((7553, 7587), 'numpy.maximum', 'np.maximum', (['errs', 'bound_case_sigma'], {}), '(errs, bound_case_sigma)\n', (7563, 7587), True, 'import numpy as np\n'), ((9379, 9395), 'numpy.argmax', 'np.argmax', (['plist'], {}), '(plist)\n', (9388, 9395), True, 'import numpy as np\n'), ((9550, 9564), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9562, 9564), True, 'import matplotlib.pyplot as plt\n'), ((10210, 10253), 'numpy.interp', 'np.interp', (['conf_lev', 'passing', 'signal_fluxes'], {}), '(conf_lev, passing, signal_fluxes)\n', (10219, 10253), True, 'import numpy as np\n'), ((12217, 12233), 'numpy.argmax', 'np.argmax', (['plist'], {}), '(plist)\n', (12226, 12233), True, 'import numpy as np\n'), ((13547, 13565), 'numpy.max', 'np.max', (['signal_fls'], {}), '(signal_fls)\n', (13553, 13565), True, 'import numpy as np\n'), ((15342, 15357), 'numpy.array', 'np.array', (['pvals'], {}), '(pvals)\n', (15350, 15357), True, 'import numpy as np\n'), ((15477, 15513), 'scipy.stats.norm.ppf', 'sp.stats.norm.ppf', (['(1.0 - pvals / 2.0)'], {}), '(1.0 - pvals / 2.0)\n', (15494, 15513), True, 'import scipy as sp\n'), ((16487, 16507), 'numpy.zeros', 'np.zeros', (['num_alerts'], {}), '(num_alerts)\n', (16495, 16507), True, 'import numpy as np\n'), ((19165, 19179), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (19177, 19179), True, 'import matplotlib.pyplot as plt\n'), ((19945, 19955), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19953, 19955), True, 'import matplotlib.pyplot as plt\n'), ((22475, 22488), 'numpy.sqrt', 'np.sqrt', (['area'], {}), '(area)\n', (22482, 22488), True, 'import numpy as np\n'), ((22745, 22776), 'seaborn.color_palette', 'sns.color_palette', (['"""Blues"""', '(500)'], {}), "('Blues', 500)\n", (22762, 22776), True, 'import seaborn as sns\n'), ((22792, 22830), 'matplotlib.colors.ListedColormap', 'mpl.colors.ListedColormap', (['pdf_palette'], {}), '(pdf_palette)\n', (22817, 22830), True, 'import matplotlib as mpl\n'), ((22838, 22868), 'numpy.count_nonzero', 'np.count_nonzero', (['(skymap > 0.0)'], {}), '(skymap > 0.0)\n', (22854, 22868), True, 'import numpy as np\n'), ((22894, 22908), 'numpy.max', 'np.max', (['skymap'], {}), '(skymap)\n', (22900, 22908), True, 'import numpy as np\n'), ((23662, 23762), 'francis.time_integrated_scripts.steady_sensitivity_fits.plot_contours', 'steady_sensitivity_fits.plot_contours', (['None', 'original_LLH'], {'levels': '[22.2, 64.2]', 'nside': 'con_nside'}), '(None, original_LLH, levels=[22.2, \n 64.2], nside=con_nside)\n', (23699, 23762), False, 'from francis.time_integrated_scripts import steady_sensitivity_fits\n'), ((24354, 24378), 'numpy.array', 'np.array', (["bg['ts_prior']"], {}), "(bg['ts_prior'])\n", (24362, 24378), True, 'import numpy as np\n'), ((24592, 24610), 'healpy.nside2npix', 'hp.nside2npix', (['(256)'], {}), '(256)\n', (24605, 24610), True, 'import healpy as hp\n'), ((860, 885), 'scipy.special.erf', 'sp.special.erf', (['(a * x + b)'], {}), '(a * x + b)\n', (874, 885), True, 'import scipy as sp\n'), ((1016, 1036), 'numpy.exp', 'np.exp', (['(-a * (x - b))'], {}), '(-a * (x - b))\n', (1022, 1036), True, 'import numpy as np\n'), ((1282, 1303), 'numpy.abs', 'np.abs', (['(array - value)'], {}), '(array - value)\n', (1288, 1303), True, 'import numpy as np\n'), ((2010, 2043), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (2021, 2043), False, 'import pickle, sys\n'), ((2118, 2132), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2129, 2132), False, 'import pickle, sys\n'), ((2158, 2189), 'numpy.array', 'np.array', (["signal_trials['flux']"], {}), "(signal_trials['flux'])\n", (2166, 2189), True, 'import numpy as np\n'), ((2192, 2228), 'numpy.array', 'np.array', (["signal_trials['mean_ninj']"], {}), "(signal_trials['mean_ninj'])\n", (2200, 2228), True, 'import numpy as np\n'), ((2820, 2853), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (2831, 2853), False, 'import pickle, sys\n'), ((2928, 2942), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2939, 2942), False, 'import pickle, sys\n'), ((3640, 3673), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (3651, 3673), False, 'import pickle, sys\n'), ((3744, 3758), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3755, 3758), False, 'import pickle, sys\n'), ((4461, 4494), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (4472, 4494), False, 'import pickle, sys\n'), ((4569, 4583), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4580, 4583), False, 'import pickle, sys\n'), ((4710, 4721), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (4718, 4721), True, 'import numpy as np\n'), ((5943, 5976), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (5954, 5976), False, 'import pickle, sys\n'), ((6047, 6061), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6058, 6061), False, 'import pickle, sys\n'), ((6295, 6328), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (6306, 6328), False, 'import pickle, sys\n'), ((6403, 6417), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6414, 6417), False, 'import pickle, sys\n'), ((13504, 13522), 'numpy.min', 'np.min', (['signal_fls'], {}), '(signal_fls)\n', (13510, 13522), True, 'import numpy as np\n'), ((14741, 14774), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (14752, 14774), False, 'import pickle, sys\n'), ((14845, 14859), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (14856, 14859), False, 'import pickle, sys\n'), ((15093, 15126), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (15104, 15126), False, 'import pickle, sys\n'), ((15201, 15215), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (15212, 15215), False, 'import pickle, sys\n'), ((15236, 15292), 'scipy.stats.percentileofscore', 'sp.stats.percentileofscore', (['bg_trials', 'ts'], {'kind': '"""strict"""'}), "(bg_trials, ts, kind='strict')\n", (15262, 15292), True, 'import scipy as sp\n'), ((18123, 18156), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (18134, 18156), False, 'import pickle, sys\n'), ((18231, 18245), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (18242, 18245), False, 'import pickle, sys\n'), ((18489, 18534), 'numpy.percentile', 'np.percentile', (['ns_fit[true_inj == ninj]', 'levs'], {}), '(ns_fit[true_inj == ninj], levs)\n', (18502, 18534), True, 'import numpy as np\n'), ((21814, 21847), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (21825, 21847), False, 'import pickle, sys\n'), ((21918, 21932), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (21929, 21932), False, 'import pickle, sys\n'), ((23392, 23422), 'numpy.radians', 'np.radians', (['[-1, 1, 1, -1, -1]'], {}), '([-1, 1, 1, -1, -1])\n', (23402, 23422), True, 'import numpy as np\n'), ((23437, 23466), 'numpy.radians', 'np.radians', (['[1, 1, -1, -1, 1]'], {}), '([1, 1, -1, -1, 1])\n', (23447, 23466), True, 'import numpy as np\n'), ((23783, 23801), 'numpy.array', 'np.array', (['contours'], {}), '(contours)\n', (23791, 23801), True, 'import numpy as np\n'), ((23817, 23874), 'healpy.projplot', 'hp.projplot', (['contour[0]', 'contour[1]'], {'linewidth': '(1.5)', 'c': '"""k"""'}), "(contour[0], contour[1], linewidth=1.5, c='k')\n", (23828, 23874), True, 'import healpy as hp\n'), ((24399, 24417), 'numpy.array', 'np.array', (["bg['ra']"], {}), "(bg['ra'])\n", (24407, 24417), True, 'import numpy as np\n'), ((24424, 24443), 'numpy.array', 'np.array', (["bg['dec']"], {}), "(bg['dec'])\n", (24432, 24443), True, 'import numpy as np\n'), ((25934, 25973), 'numpy.count_nonzero', 'np.count_nonzero', (["(bg_ts >= result['ts'])"], {}), "(bg_ts >= result['ts'])\n", (25950, 25973), True, 'import numpy as np\n'), ((6912, 6974), 'numpy.count_nonzero', 'np.count_nonzero', (["(signal_trials['ts_prior'][li:ri] > bg_thresh)"], {}), "(signal_trials['ts_prior'][li:ri] > bg_thresh)\n", (6928, 6974), True, 'import numpy as np\n'), ((22381, 22407), 'numpy.count_nonzero', 'np.count_nonzero', (['(s < 64.2)'], {}), '(s < 64.2)\n', (22397, 22407), True, 'import numpy as np\n'), ((22410, 22433), 'healpy.nside2pixarea', 'hp.nside2pixarea', (['nside'], {}), '(nside)\n', (22426, 22433), True, 'import healpy as hp\n'), ((23047, 23061), 'numpy.degrees', 'np.degrees', (['ra'], {}), '(ra)\n', (23057, 23061), True, 'import numpy as np\n'), ((23063, 23078), 'numpy.degrees', 'np.degrees', (['dec'], {}), '(dec)\n', (23073, 23078), True, 'import numpy as np\n'), ((20946, 20971), 'numpy.argwhere', 'np.argwhere', (['(ninjs == sig)'], {}), '(ninjs == sig)\n', (20957, 20971), True, 'import numpy as np\n')] |
import os
import sys
import subprocess
import ROOT as rt
import numpy as np
ptmin=0
ptmax=0
for pt in [100,200,500,1000]:
ls=subprocess.check_output("ls *mixed*{}p*.npz".format(pt),shell=True)
ls=ls.split("\n")[:-1]
for l in ls:
if("unscale" in l):continue
a=np.load(l[:])
#print(l,f.Get("jetAnalyser").GetEntries(),a["chad_mult"].shape)
print(l,len(a["ptset"]))
a.close()
| [
"numpy.load"
] | [((273, 286), 'numpy.load', 'np.load', (['l[:]'], {}), '(l[:])\n', (280, 286), True, 'import numpy as np\n')] |
import abc
import re
import tempfile
import traceback
from typing import Tuple, Callable, Union, List, Optional
import kerassurgeon
import numpy as np
import tensorflow as tf
from keras import backend as K
from keras import models, layers
class BasePruning:
_FUZZ_EPSILON = 1e-5
def __init__(self,
pruning_factor: float,
model_compile_fn: Callable[[models.Model], None],
model_finetune_fn: Optional[Callable[[models.Model, int, int], None]],
nb_finetune_epochs: int,
nb_trained_for_epochs: int,
maximum_prune_iterations: int,
maximum_pruning_percent: float):
self._pruning_factor = pruning_factor
self._tmp_model_file_name = tempfile.NamedTemporaryFile().name
self._model_compile_fn = model_compile_fn
self._model_finetune_fn = model_finetune_fn
self._nb_finetune_epochs = nb_finetune_epochs
self._current_nb_of_epochs = nb_trained_for_epochs
self._maximum_prune_iterations = maximum_prune_iterations
self._maximum_pruning_percent = maximum_pruning_percent
self._channel_number_bins = None
self._pruning_factors_for_channel_bins = None
self._original_number_of_filters = -1
# TODO: select a subset of layers to prune
self._prunable_layers_regex = ".*"
def run_pruning(self, model: models.Model, prune_factor_scheduler_fn: Callable[[float, int], float] = None,
custom_objects_inside_model: dict = None) -> Tuple[models.Model, int]:
self._original_number_of_filters = self._count_number_of_filters(model)
pruning_iteration = 0
while True:
if prune_factor_scheduler_fn is not None:
self._pruning_factor = prune_factor_scheduler_fn(self._pruning_factor, pruning_iteration)
# Pruning step
print("Running filter pruning {0}".format(pruning_iteration))
model, pruning_dict = self._prune(model)
# Computing statistics
nb_of_pruned_filters = sum(pruning_dict.values())
if nb_of_pruned_filters == 0:
print("Number of pruned filters == 0, so pruning is stopped")
break
print("Number of pruned filters at this step: {0}".format(nb_of_pruned_filters))
pruning_percent = self._compute_pruning_percent(model)
print("Network is pruned from the original state, by {0} %".format(pruning_percent * 100))
# Finetune step
self._save_after_pruning(model)
self._clean_up_after_pruning(model)
model = self._load_back_saved_model(custom_objects_inside_model)
self._model_compile_fn(model)
if self._model_finetune_fn is not None:
self._model_finetune_fn(model, self._current_nb_of_epochs,
self._current_nb_of_epochs + self._nb_finetune_epochs)
self._current_nb_of_epochs += self._nb_finetune_epochs
# Stopping conditions
if nb_of_pruned_filters < 1:
print("No filters were pruned. Pruning is stopped.")
break
if self._maximum_pruning_percent is not None:
if pruning_percent > self._maximum_pruning_percent:
print(
"Network pruning (currently {0} %) reached the maximum based on your definition ({1} %)".format(
pruning_percent * 100, self._maximum_pruning_percent * 100))
break
pruning_iteration += 1
if self._maximum_prune_iterations is not None:
if pruning_iteration > self._maximum_prune_iterations:
break
print("Pruning stopped.")
return model, self._current_nb_of_epochs
def define_prune_bins(self, channel_number_bins: Union[List[int], np.ndarray],
pruning_factors_for_bins: Union[List[float], np.ndarray]):
if (len(channel_number_bins) - 1) != len(pruning_factors_for_bins):
raise ValueError("While defining pruning bins, channel numbers list "
"should contain 1 more items than the pruning factor list")
self._channel_number_bins = np.asarray(channel_number_bins).astype(int)
self._pruning_factors_for_channel_bins = np.asarray(pruning_factors_for_bins).astype(float)
def _get_pruning_factor_based_on_prune_bins(self, nb_channels: int) -> float:
for i, pruning_factor in enumerate(self._pruning_factors_for_channel_bins):
min_channel_number = self._channel_number_bins[i]
max_channel_number = self._channel_number_bins[i + 1]
if min_channel_number <= nb_channels < max_channel_number:
return self._pruning_factors_for_channel_bins[i]
# If we did not found any match we will return with the default pruning factor value
print("No entry was found for a layer with channel number {0}, "
"so returning pruning factor {1}".format(nb_channels, self._pruning_factor))
return self._pruning_factor
def _prune(self, model: models.Model) -> Tuple[models.Model, dict]:
surgeon = kerassurgeon.Surgeon(model, copy=True)
pruning_dict = dict()
for layer in model.layers:
if layer.__class__.__name__ == "Conv2D":
if re.match(self._prunable_layers_regex, layer.name):
layer_weight_mtx = layer.get_weights()[0]
pruning_factor = self._pruning_factor
if self._pruning_factors_for_channel_bins is not None:
pruning_factor = self._get_pruning_factor_based_on_prune_bins(layer_weight_mtx.shape[-1])
filter_indices_to_prune = self.run_pruning_for_conv2d_layer(pruning_factor,
layer,
layer_weight_mtx)
# Remove selected filters from layer
surgeon.add_job("delete_channels", layer, channels=filter_indices_to_prune)
pruning_dict[layer.name] = len(filter_indices_to_prune)
try:
new_model = surgeon.operate()
except Exception as e:
print("Could not complete pruning step because got Exception: {0}".format(e))
print(traceback.format_exc())
return model, {k: 0 for k, _ in pruning_dict.items()}
return new_model, pruning_dict
@staticmethod
def _count_number_of_filters(model: models.Model) -> int:
nb_of_filters = 0
for layer in model.layers:
if layer.__class__.__name__ == "Conv2D":
layer_weight_mtx = layer.get_weights()[0]
_, _, _, channels = layer_weight_mtx.shape
nb_of_filters += channels
return nb_of_filters
def _compute_pruning_percent(self, model: models.Model) -> float:
nb_filters = self._count_number_of_filters(model)
left_filters_percent = 1.0 - (nb_filters / self._original_number_of_filters)
return left_filters_percent
def _save_after_pruning(self, model: models.Model):
model.save(self._tmp_model_file_name, overwrite=True, include_optimizer=True)
@staticmethod
def _clean_up_after_pruning(model: models.Model):
del model
K.clear_session()
tf.reset_default_graph()
def _load_back_saved_model(self, custom_objects: dict) -> models.Model:
model = models.load_model(self._tmp_model_file_name, custom_objects=custom_objects)
return model
@staticmethod
def _apply_fuzz_to_vector(x: np.ndarray):
# Prepare the vector element indices
indices = np.arange(0, len(x), dtype=int)
np.random.shuffle(indices)
# Select the indices to be modified (always modify only N-1 values)
nb_of_values_to_modify = np.random.randint(0, len(x) - 1)
modify_indices = indices[:nb_of_values_to_modify]
# Modify the selected elements of the vector
x[modify_indices] += BasePruning._epsilon()
@staticmethod
def _apply_fuzz(x: np.ndarray):
for i in range(len(x)):
BasePruning._apply_fuzz_to_vector(x[i])
@staticmethod
def _epsilon():
return BasePruning._FUZZ_EPSILON
@staticmethod
def _calculate_number_of_channels_to_keep(keep_factor: float, nb_of_channels: int) -> Tuple[int, int]:
# This is the number of channels we would like to keep
new_nb_of_channels = int(np.ceil(nb_of_channels * keep_factor))
if new_nb_of_channels > nb_of_channels:
# This happens when (factor > 1)
new_nb_of_channels = nb_of_channels
elif new_nb_of_channels < 1:
# This happens when (factor <= 0)
new_nb_of_channels = 1
# Number of channels which will be removed
nb_channels_to_remove = nb_of_channels - new_nb_of_channels
return new_nb_of_channels, nb_channels_to_remove
@abc.abstractmethod
def run_pruning_for_conv2d_layer(self, pruning_factor: float, layer: layers.Conv2D, layer_weight_mtx) -> List[int]:
raise NotImplementedError
| [
"keras.models.load_model",
"tempfile.NamedTemporaryFile",
"keras.backend.clear_session",
"numpy.ceil",
"tensorflow.reset_default_graph",
"numpy.asarray",
"re.match",
"kerassurgeon.Surgeon",
"traceback.format_exc",
"numpy.random.shuffle"
] | [((5320, 5358), 'kerassurgeon.Surgeon', 'kerassurgeon.Surgeon', (['model'], {'copy': '(True)'}), '(model, copy=True)\n', (5340, 5358), False, 'import kerassurgeon\n'), ((7564, 7581), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (7579, 7581), True, 'from keras import backend as K\n'), ((7590, 7614), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (7612, 7614), True, 'import tensorflow as tf\n'), ((7708, 7783), 'keras.models.load_model', 'models.load_model', (['self._tmp_model_file_name'], {'custom_objects': 'custom_objects'}), '(self._tmp_model_file_name, custom_objects=custom_objects)\n', (7725, 7783), False, 'from keras import models, layers\n'), ((7973, 7999), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (7990, 7999), True, 'import numpy as np\n'), ((773, 802), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (800, 802), False, 'import tempfile\n'), ((8746, 8783), 'numpy.ceil', 'np.ceil', (['(nb_of_channels * keep_factor)'], {}), '(nb_of_channels * keep_factor)\n', (8753, 8783), True, 'import numpy as np\n'), ((4361, 4392), 'numpy.asarray', 'np.asarray', (['channel_number_bins'], {}), '(channel_number_bins)\n', (4371, 4392), True, 'import numpy as np\n'), ((4454, 4490), 'numpy.asarray', 'np.asarray', (['pruning_factors_for_bins'], {}), '(pruning_factors_for_bins)\n', (4464, 4490), True, 'import numpy as np\n'), ((5496, 5545), 're.match', 're.match', (['self._prunable_layers_regex', 'layer.name'], {}), '(self._prunable_layers_regex, layer.name)\n', (5504, 5545), False, 'import re\n'), ((6560, 6582), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (6580, 6582), False, 'import traceback\n')] |
#!/usr/bin/env python
#
# Copyright 2019 DFKI GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
from pathlib import Path
from copy import deepcopy
from PySignal import Signal
import numpy as np
from vis_utils.animation.animation_controller import AnimationController
from vis_utils.scene.components import ComponentBase
from vis_utils.animation.skeleton_visualization import SkeletonVisualization
from anim_utils.animation_data import BVHReader, MotionVector, SkeletonBuilder
from anim_utils.animation_data.motion_state import MotionState
class AnimationDirectoryExplorer(ComponentBase, AnimationController):
updated_animation_frame = Signal()
reached_end_of_animation = Signal()
def __init__(self, scene_object, folder_path, filetype, color):
ComponentBase.__init__(self, scene_object)
self.mainContext = 0
self.name = folder_path
AnimationController.__init__(self)
self.skeleton_vis = SkeletonVisualization(scene_object, color)
self.skeleton_vis.draw_mode = 2
self.skeleton_vis.visible = False
scene_object.add_component("skeleton_vis", self.skeleton_vis)
self.folder_path = Path(folder_path)
self._animation_files = []
self.current_controller = None
self.motion_cache = dict()
self.state = None
for filename in self.folder_path.iterdir():
print(filename, filetype, filename.suffix)
if filename.is_file() and filename.suffix == "."+filetype:
self._animation_files.append(filename.name)
self.select_file(self._animation_files[0])
def select_file(self, filename):
if filename not in self._animation_files:
return
if filename not in self.motion_cache:
self.load_file(filename)
self.current_controller = filename
self.skeleton_vis.set_skeleton(self.motion_cache[filename].skeleton, True)
self.skeleton_vis.visible = True
self.state = MotionState(self.motion_cache[filename])
self.state.play = self.playAnimation
self.updateTransformation()
return self.motion_cache[filename].n_frames
def load_file(self, filename):
bvh_reader = BVHReader(str(self.folder_path) +os.sep+filename)
mv = MotionVector()
mv.from_bvh_reader(bvh_reader, False)
animated_joints = list(bvh_reader.get_animated_joints())
mv.skeleton = SkeletonBuilder().load_from_bvh(bvh_reader, animated_joints=animated_joints)
self.motion_cache[filename] = mv
def get_animation_files(self):
return self._animation_files
def update(self, dt):
""" update current frame and global joint transformation matrices
"""
dt *= self.animationSpeed
if self.isLoadedCorrectly():
if self.playAnimation:
self.state.update(dt)
self.updateTransformation()
# update gui
if self.state.frame_idx > self.getNumberOfFrames():
self.resetAnimationTime()
self.reached_end_of_animation.emit(self.loopAnimation)
else:
self.updated_animation_frame.emit(self.state.frame_idx)
def draw(self, modelMatrix, viewMatrix, projectionMatrix, lightSources):
return
def updateTransformation(self, frame_idx=None):
if self.state is None:
return
if frame_idx is not None:
self.state.set_frame_idx(frame_idx)
pose = self.state.get_pose()
self.skeleton_vis.updateTransformation(pose, np.eye(4))
def resetAnimationTime(self):
if self.state is None:
return
AnimationController.resetAnimationTime(self)
self.currentFrameNumber = 0
self.state.reset()
self.updateTransformation(self.state.frame_idx)
def setCurrentFrameNumber(self, frame_idx):
if self.state is None:
return
self.state.set_frame_idx(frame_idx)
self.updateTransformation()
def getNumberOfFrames(self):
if self.state is not None:
return self.state.mv.n_frames
else:
return 0
def isLoadedCorrectly(self):
return len(self._animation_files) > 0 and self.state is not None
def getFrameTime(self):
if self.isLoadedCorrectly():
return self.state.mv.frame_time
else:
return 0
def toggle_animation_loop(self):
self.loopAnimation = not self.loopAnimation
def set_draw_mode(self, draw_mode):
self.skeleton_vis.draw_mode = draw_mode
return
def startAnimation(self):
if self.state is None:
return
self.playAnimation = True
self.state.play = True
def stopAnimation(self):
if self.state is None:
return
self.playAnimation = False
self.state.play = False
def load_selected(self):
mv_copy = deepcopy(self.state.mv)
self.scene_object.scene.object_builder.create_object("animation_controller",
self.current_controller,
mv_copy.skeleton, mv_copy,
mv_copy.frame_time)
| [
"PySignal.Signal",
"copy.deepcopy",
"vis_utils.animation.animation_controller.AnimationController.__init__",
"vis_utils.animation.skeleton_visualization.SkeletonVisualization",
"vis_utils.animation.animation_controller.AnimationController.resetAnimationTime",
"anim_utils.animation_data.MotionVector",
"a... | [((1666, 1674), 'PySignal.Signal', 'Signal', ([], {}), '()\n', (1672, 1674), False, 'from PySignal import Signal\n'), ((1706, 1714), 'PySignal.Signal', 'Signal', ([], {}), '()\n', (1712, 1714), False, 'from PySignal import Signal\n'), ((1792, 1834), 'vis_utils.scene.components.ComponentBase.__init__', 'ComponentBase.__init__', (['self', 'scene_object'], {}), '(self, scene_object)\n', (1814, 1834), False, 'from vis_utils.scene.components import ComponentBase\n'), ((1904, 1938), 'vis_utils.animation.animation_controller.AnimationController.__init__', 'AnimationController.__init__', (['self'], {}), '(self)\n', (1932, 1938), False, 'from vis_utils.animation.animation_controller import AnimationController\n'), ((1967, 2009), 'vis_utils.animation.skeleton_visualization.SkeletonVisualization', 'SkeletonVisualization', (['scene_object', 'color'], {}), '(scene_object, color)\n', (1988, 2009), False, 'from vis_utils.animation.skeleton_visualization import SkeletonVisualization\n'), ((2189, 2206), 'pathlib.Path', 'Path', (['folder_path'], {}), '(folder_path)\n', (2193, 2206), False, 'from pathlib import Path\n'), ((3009, 3049), 'anim_utils.animation_data.motion_state.MotionState', 'MotionState', (['self.motion_cache[filename]'], {}), '(self.motion_cache[filename])\n', (3020, 3049), False, 'from anim_utils.animation_data.motion_state import MotionState\n'), ((3303, 3317), 'anim_utils.animation_data.MotionVector', 'MotionVector', ([], {}), '()\n', (3315, 3317), False, 'from anim_utils.animation_data import BVHReader, MotionVector, SkeletonBuilder\n'), ((4740, 4784), 'vis_utils.animation.animation_controller.AnimationController.resetAnimationTime', 'AnimationController.resetAnimationTime', (['self'], {}), '(self)\n', (4778, 4784), False, 'from vis_utils.animation.animation_controller import AnimationController\n'), ((6016, 6039), 'copy.deepcopy', 'deepcopy', (['self.state.mv'], {}), '(self.state.mv)\n', (6024, 6039), False, 'from copy import deepcopy\n'), ((4636, 4645), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (4642, 4645), True, 'import numpy as np\n'), ((3451, 3468), 'anim_utils.animation_data.SkeletonBuilder', 'SkeletonBuilder', ([], {}), '()\n', (3466, 3468), False, 'from anim_utils.animation_data import BVHReader, MotionVector, SkeletonBuilder\n')] |
"""
Script that evaluates the events.csv and the admissions.csv files:
Basically performs the data cleaning steps proposed by:
https://github.com/YerevaNN/mimic3-benchmarks
"""
import os
import pandas as pd
import numpy as np
from shutil import copyfile
MIMIC_DIR = '/nfs/research1/birney/projects/ehr/mimic/mimic_raw'
#MIMIC_DIR = '/Users/buergelt/projects/thesis/data/mimic_demo'
#OUT_DIR = '/Users/buergelt/projects/thesis/data/mimic_demo_clean'
OUT_DIR = '/nfs/research1/birney/projects/ehr/mimic/mimic_raw_clean'
# global MIMIC_DIR
def clean_patients():
"""
"ROW_ID","SUBJECT_ID","GENDER","DOB","DOD","DOD_HOSP","DOD_SSN","EXPIRE_FLAG"
:return:
"""
# 1. read in ADMISSIONS.csv
subjects = pd.read_csv(os.path.join(MIMIC_DIR, 'PATIENTS.csv'))
subjects.set_index('SUBJECT_ID', inplace=True)
for c in [
"DOB",
"DOD",
"DOD_HOSP",
"DOD_SSN",
]:
subjects[c] = pd.to_datetime(subjects[c].fillna('1911-11-11 11:11:11'))
subjects.to_csv(os.path.join(OUT_DIR, 'PATIENTS.csv'))
def clean_admissions():
"""
Go over the Admissions.csv -> select the entries w/o an hadmID
:return:
"""
# 1. read in ADMISSIONS.csv
admissions = pd.read_csv(os.path.join(MIMIC_DIR, 'ADMISSIONS.csv'))
admissions.set_index('HADM_ID', inplace=True)
missing = admissions.index == np.nan
missing = admissions[missing].copy()
print('Found %d Admissions.' % len(admissions))
if missing.index.values.size > 0:
admissions = admissions.loc[~missing.index.values]
print('Found %d Admissions with ID.' % len(admissions))
admissions.drop_duplicates(inplace=True)
print('Found %d Admissions with unique ID.' % len(admissions))
admissions.reset_index(inplace=True)
# correct the missing timestamps
for c in [
# 'ADMITTIME',
# 'DISCHTIME',
'DEATHTIME',
'EDREGTIME',
'EDOUTTIME',
]:
admissions[c] = pd.to_datetime(admissions[c].fillna('1911-11-11 11:11:11'))
print(admissions.isna().sum())
admissions.to_csv(os.path.join(OUT_DIR, 'ADMISSIONS.csv'))
return admissions
def clean_icu_stays():
"""
Go over the ICUSTAYS.csv and discard stays without admission
:return:
"""
# read stays:
icustays = pd.read_csv(os.path.join(MIMIC_DIR, 'ICUSTAYS.csv'))
icustays.set_index('ICUSTAY_ID', inplace=True)
stays = get_stays_csv()
stays.set_index('ICUSTAY_ID', inplace=True)
icustays = icustays.loc[stays.index]
icustays.reset_index(inplace=True)
for c in ['INTIME', 'OUTTIME']:
icustays[c] = pd.to_datetime(icustays[c])
# drop where in and outtimes are missing:
icustays = icustays.loc[icustays['OUTTIME'].notnull()]
icustays = icustays.loc[icustays['INTIME'].notnull()]
icustays.to_csv(os.path.join(OUT_DIR, 'ICUSTAYS.csv'))
return icustays
# TODO imlement:
def clean_diagnosis():
"""
go over DIAGNOSIS.csv and throw out the rows which are faulty
:return:
"""
diagnoses = pd.read_csv(os.path.join(MIMIC_DIR, 'DIAGNOSES_ICD.csv'))
diagnoses.set_index('HADM_ID', inplace=True)
# read map:
icd9_map = dict()
map_df = pd.read_csv(os.path.join(os.path.dirname(MIMIC_DIR), 'resources', 'ICD9_map.csv'))
for _, r in map_df.iterrows():
icd9_map[r.values[0]] = r.values[1]
try:
stays = pd.read_csv(os.path.join(OUT_DIR, 'stays.csv'))
except OSError:
stays = get_stays_csv()
# throw out the diagnoses that are not in stays
stays.reset_index(inplace=True)
stays.set_index('HADM_ID', inplace=True)
# find intersection:
hadmids = set(stays.index.values.tolist()).intersection(diagnoses.index.unique().values.tolist())
diagnoses = diagnoses.loc[sorted(list(hadmids))]
# map to highlevel:
diagnoses['ICD_CLASS'] = diagnoses['ICD9_CODE'].map(icd9_map)
# save
diagnoses.to_csv(os.path.join(OUT_DIR, 'DIAGNOSES_ICD.csv'))
def clean_events(kind='CHART'):
"""
Go over an xxxEVENTS.csv and lookup the ICUSTAY/ADMISSION ID combo in the stays mapping.
If the combination is illegitimate -> try to correct it:
- if hadmID is missing: add it from stays
- if ICUSTAY id is missiong and NOT found in combination w/ another HADMID -> add it from stays
- if both are missing or the conditions are unfulfilled -> discard the entry
:return:
"""
assert kind in ['LAB', 'CHART']
try:
stays = pd.read_csv(os.path.join(OUT_DIR, 'stays.csv'))
except OSError:
stays = get_stays_csv()
stays.reset_index(inplace=True)
stays_by_icu = stays.set_index('ICUSTAY_ID')
stays_by_hadm = stays.set_index('HADM_ID')
stays.reset_index(inplace=True)
# read the events:
for events in pd.read_csv(os.path.join(MIMIC_DIR, '%sEVENTS.csv' % kind), chunksize=500000):
droplist = []
events_to_edit = events.copy()
if kind == 'CHART':
"""
1. group by icustay.
- ensure the stay-HADMID combo is legit (in stays)
- if not -> discard
"""
for icustayID, events_per_icustay in events.groupby('ICUSTAY_ID'):
# correct nan
if np.isnan(icustayID):
print('Found %d NaN ICUSTAY_IDs. Correcting.' % events_per_icustay.shape[0])
for idx, r in events_per_icustay.iterrows():
# -> get the ID from the stays table by comparing the time.
icustays_p_hadmid = stays_by_hadm.loc[[hadmID]] # make sure to get a dataframe
corrected = False
for hadmID, stay_info in icustays_p_hadmid.iterrows():
timestamp = pd.to_datetime(r['CHARTTIME'])
if timestamp in pd.Interval(stay_info['INTIME'], stay_info['OUTTIME']):
# print('\n'*3)
# print(stay_info['ICUSTAY_ID'])
# print('\n'*3)
events_to_edit.loc[idx, 'ICUSTAY_ID'] = stay_info['ICUSTAY_ID']
corrected = True
print('Successfully inferred ICUSTAY_ID.')
if not corrected:
droplist.append(idx)
continue
continue
if icustayID not in stays_by_icu.index:
droplist.extend(events_per_icustay.index.values)
continue
# check if pair is legit:
hadmIDs = events_per_icustay['HADM_ID'].unique()
correct_hadmID = stays_by_icu.loc[icustayID, 'HADM_ID']
if correct_hadmID not in hadmIDs:
# drop all:
droplist.extend(events_per_icustay.index.values)
continue
else:
# discard all that have different HADM_IDs
for id, df in events_per_icustay.groupby('HADM_ID'):
if not id == correct_hadmID:
droplist.extend(df.index.values)
else:
for hadmID, events_per_hadm in events.groupby('HADM_ID'):
# TODO: implement recovery of hadmID based on intime/outtime!
# check if hadmID in stays. if not discard:
if hadmID not in stays_by_hadm.index:
droplist.extend(events_per_hadm.index.values)
else:
continue
del events
print('Dropping %s events due to invalid IDs' % len(droplist))
events_to_edit['CHARTTIME'] = pd.to_datetime(events_to_edit['CHARTTIME'].fillna('1911-11-11 11:11:11'))
if kind == 'CHART':
events_to_edit['STORETIME'] = pd.to_datetime(events_to_edit['STORETIME'].fillna('1911-11-11 11:11:11'))
events_to_edit.drop(droplist, inplace=True)
events_to_edit = events_to_edit.loc[events_to_edit['CHARTTIME'].notnull()]
with open(os.path.join(OUT_DIR, '%sEVENTS.csv' % kind), 'a') as fobj:
events_to_edit.to_csv(fobj, mode='a', header=fobj.tell() == 0)
def get_stays_csv():
"""
Write a csv file mapping ICU-stays to admissions (id to id):
- read the ICUSTAY file
:return:
"""
admissions = clean_admissions()
icustays = pd.read_csv(os.path.join(MIMIC_DIR, 'ICUSTAYS.csv'))
assert icustays.shape[0] == icustays['ICUSTAY_ID'].nunique()
assert icustays['ICUSTAY_ID'].isna().sum() == 0
icustays = icustays.loc[icustays['OUTTIME'].notnull()]
icustays = icustays.loc[icustays['INTIME'].notnull()]
stays = icustays.drop(['ROW_ID', 'DBSOURCE', 'FIRST_CAREUNIT',
'LAST_CAREUNIT', 'FIRST_WARDID',
'LAST_WARDID', 'LOS'], axis=1)
valid_admission_ids = sorted(list(set(stays['HADM_ID'].values).intersection(admissions['HADM_ID'].values)))
stays.set_index('HADM_ID', inplace=True)
for c in ['INTIME', 'OUTTIME']:
stays[c] = pd.to_datetime(stays[c].fillna('1911-11-11 11:11:11'))
# drop the stays for which the HADM_ID is not in admissions:
stays = stays.loc[valid_admission_ids]
# save:
stays.to_csv(os.path.join(OUT_DIR, 'stays.csv'))
return stays
# TODO:
"""
- get fn to check that there is a one to many mapping between HADM and ICUSTAY not vice versa
- in case there is, get a fn to throw out the one with less information? (this would have to be based no the events files...)
"""
def copy_raw_files():
"""
Copy the raw files from the MIMIC_DIR to OUT_DIR:
- Patients.csv
- Prescriptions.csv
:return:
"""
copyfile(os.path.join(MIMIC_DIR, 'PRESCRIPTIONS.csv'),
os.path.join(OUT_DIR, 'PRESCRIPTIONS.csv'))
copyfile(os.path.join(MIMIC_DIR, 'D_ITEMS.csv'),
os.path.join(OUT_DIR, 'D_ITEMS.csv'))
copyfile(os.path.join(MIMIC_DIR, 'D_LABITEMS.csv'),
os.path.join(OUT_DIR, 'D_LABITEMS.csv'))
def main():
# create outdir if needed:
if not os.path.exists(OUT_DIR):
os.mkdir(OUT_DIR)
copy_raw_files()
clean_patients()
clean_diagnosis()
clean_admissions()
clean_icu_stays()
clean_events()
clean_events(kind='LAB')
if __name__ == '__main__':
main()
| [
"os.mkdir",
"pandas.Interval",
"os.path.dirname",
"os.path.exists",
"numpy.isnan",
"pandas.to_datetime",
"os.path.join"
] | [((735, 774), 'os.path.join', 'os.path.join', (['MIMIC_DIR', '"""PATIENTS.csv"""'], {}), "(MIMIC_DIR, 'PATIENTS.csv')\n", (747, 774), False, 'import os\n'), ((1020, 1057), 'os.path.join', 'os.path.join', (['OUT_DIR', '"""PATIENTS.csv"""'], {}), "(OUT_DIR, 'PATIENTS.csv')\n", (1032, 1057), False, 'import os\n'), ((1242, 1283), 'os.path.join', 'os.path.join', (['MIMIC_DIR', '"""ADMISSIONS.csv"""'], {}), "(MIMIC_DIR, 'ADMISSIONS.csv')\n", (1254, 1283), False, 'import os\n'), ((2095, 2134), 'os.path.join', 'os.path.join', (['OUT_DIR', '"""ADMISSIONS.csv"""'], {}), "(OUT_DIR, 'ADMISSIONS.csv')\n", (2107, 2134), False, 'import os\n'), ((2323, 2362), 'os.path.join', 'os.path.join', (['MIMIC_DIR', '"""ICUSTAYS.csv"""'], {}), "(MIMIC_DIR, 'ICUSTAYS.csv')\n", (2335, 2362), False, 'import os\n'), ((2632, 2659), 'pandas.to_datetime', 'pd.to_datetime', (['icustays[c]'], {}), '(icustays[c])\n', (2646, 2659), True, 'import pandas as pd\n'), ((2852, 2889), 'os.path.join', 'os.path.join', (['OUT_DIR', '"""ICUSTAYS.csv"""'], {}), "(OUT_DIR, 'ICUSTAYS.csv')\n", (2864, 2889), False, 'import os\n'), ((3078, 3122), 'os.path.join', 'os.path.join', (['MIMIC_DIR', '"""DIAGNOSES_ICD.csv"""'], {}), "(MIMIC_DIR, 'DIAGNOSES_ICD.csv')\n", (3090, 3122), False, 'import os\n'), ((3953, 3995), 'os.path.join', 'os.path.join', (['OUT_DIR', '"""DIAGNOSES_ICD.csv"""'], {}), "(OUT_DIR, 'DIAGNOSES_ICD.csv')\n", (3965, 3995), False, 'import os\n'), ((4847, 4893), 'os.path.join', 'os.path.join', (['MIMIC_DIR', "('%sEVENTS.csv' % kind)"], {}), "(MIMIC_DIR, '%sEVENTS.csv' % kind)\n", (4859, 4893), False, 'import os\n'), ((8540, 8579), 'os.path.join', 'os.path.join', (['MIMIC_DIR', '"""ICUSTAYS.csv"""'], {}), "(MIMIC_DIR, 'ICUSTAYS.csv')\n", (8552, 8579), False, 'import os\n'), ((9416, 9450), 'os.path.join', 'os.path.join', (['OUT_DIR', '"""stays.csv"""'], {}), "(OUT_DIR, 'stays.csv')\n", (9428, 9450), False, 'import os\n'), ((9868, 9912), 'os.path.join', 'os.path.join', (['MIMIC_DIR', '"""PRESCRIPTIONS.csv"""'], {}), "(MIMIC_DIR, 'PRESCRIPTIONS.csv')\n", (9880, 9912), False, 'import os\n'), ((9927, 9969), 'os.path.join', 'os.path.join', (['OUT_DIR', '"""PRESCRIPTIONS.csv"""'], {}), "(OUT_DIR, 'PRESCRIPTIONS.csv')\n", (9939, 9969), False, 'import os\n'), ((9984, 10022), 'os.path.join', 'os.path.join', (['MIMIC_DIR', '"""D_ITEMS.csv"""'], {}), "(MIMIC_DIR, 'D_ITEMS.csv')\n", (9996, 10022), False, 'import os\n'), ((10037, 10073), 'os.path.join', 'os.path.join', (['OUT_DIR', '"""D_ITEMS.csv"""'], {}), "(OUT_DIR, 'D_ITEMS.csv')\n", (10049, 10073), False, 'import os\n'), ((10088, 10129), 'os.path.join', 'os.path.join', (['MIMIC_DIR', '"""D_LABITEMS.csv"""'], {}), "(MIMIC_DIR, 'D_LABITEMS.csv')\n", (10100, 10129), False, 'import os\n'), ((10144, 10183), 'os.path.join', 'os.path.join', (['OUT_DIR', '"""D_LABITEMS.csv"""'], {}), "(OUT_DIR, 'D_LABITEMS.csv')\n", (10156, 10183), False, 'import os\n'), ((10241, 10264), 'os.path.exists', 'os.path.exists', (['OUT_DIR'], {}), '(OUT_DIR)\n', (10255, 10264), False, 'import os\n'), ((10274, 10291), 'os.mkdir', 'os.mkdir', (['OUT_DIR'], {}), '(OUT_DIR)\n', (10282, 10291), False, 'import os\n'), ((3250, 3276), 'os.path.dirname', 'os.path.dirname', (['MIMIC_DIR'], {}), '(MIMIC_DIR)\n', (3265, 3276), False, 'import os\n'), ((3426, 3460), 'os.path.join', 'os.path.join', (['OUT_DIR', '"""stays.csv"""'], {}), "(OUT_DIR, 'stays.csv')\n", (3438, 3460), False, 'import os\n'), ((4531, 4565), 'os.path.join', 'os.path.join', (['OUT_DIR', '"""stays.csv"""'], {}), "(OUT_DIR, 'stays.csv')\n", (4543, 4565), False, 'import os\n'), ((5306, 5325), 'numpy.isnan', 'np.isnan', (['icustayID'], {}), '(icustayID)\n', (5314, 5325), True, 'import numpy as np\n'), ((8192, 8236), 'os.path.join', 'os.path.join', (['OUT_DIR', "('%sEVENTS.csv' % kind)"], {}), "(OUT_DIR, '%sEVENTS.csv' % kind)\n", (8204, 8236), False, 'import os\n'), ((5837, 5867), 'pandas.to_datetime', 'pd.to_datetime', (["r['CHARTTIME']"], {}), "(r['CHARTTIME'])\n", (5851, 5867), True, 'import pandas as pd\n'), ((5912, 5966), 'pandas.Interval', 'pd.Interval', (["stay_info['INTIME']", "stay_info['OUTTIME']"], {}), "(stay_info['INTIME'], stay_info['OUTTIME'])\n", (5923, 5966), True, 'import pandas as pd\n')] |
import copy
import glob
import os
import time
import pdb
from collections import deque
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from a2c_ppo_acktr import algo
from a2c_ppo_acktr.arguments import get_args
from a2c_ppo_acktr.envs import make_vec_envs
from a2c_ppo_acktr.model import Policy
from a2c_ppo_acktr.storage import RolloutStorage
from a2c_ppo_acktr.utils import get_vec_normalize, update_linear_schedule
from a2c_ppo_acktr.visualize import visdom_plot
from mymodels import TemporalDifferenceModule, CollectSamples, Logger
args = get_args()
assert args.algo in ['a2c', 'ppo', 'acktr']
if args.recurrent_policy:
assert args.algo in ['a2c', 'ppo'], \
'Recurrent policy is not implemented for ACKTR'
args.num_processes=8
num_updates = int(args.num_env_steps) // args.num_steps // args.num_processes
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
if args.cuda and torch.cuda.is_available() and args.cuda_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
try:
os.makedirs(args.log_dir, exist_ok=True)
except OSError:
files = glob.glob(os.path.join(args.log_dir, '*.monitor.csv'))
for f in files:
os.remove(f)
eval_log_dir = args.log_dir + "_eval"
try:
os.makedirs(eval_log_dir)
except OSError:
files = glob.glob(os.path.join(eval_log_dir, '*.monitor.csv'))
for f in files:
os.remove(f)
def main():
torch.set_num_threads(1)
device = torch.device("cuda:0" if args.cuda else "cpu")
if args.vis:
from visdom import Visdom
viz = Visdom(port=args.port)
win = None
envs = make_vec_envs(args.env_name, args.seed, args.num_processes,
args.gamma, args.log_dir, args.add_timestep, device, True)
frame_skip = 4 # frame skip
if args.tb_dir[-1] != '/':
args.tb_dir = args.tb_dir + '/'
logger = Logger(args.tb_dir)
logger.write_settings(args)
if args.use_tdm:
# beta scheduler
if args.beta_schedule == 'const':
beta_func = lambda x : float(args.beta_int)
elif args.beta_schedule == 'sqrt':
beta_func = lambda x : 1./np.sqrt(x+2)
elif args.beta_schedule == 'log':
beta_func = lambda x : 1./np.log(x+2)
elif args.beta_schedule == 'linear':
beta_func = lambda x : 1./(x+2)
# bonus function variations
if args.bonus_func == 'linear':
bonus_func = lambda x : x+1
elif args.bonus_func == 'square':
bonus_func = lambda x : (x+1)**2
elif args.bonus_func == 'sqrt':
bonus_func = lambda x : (x+1)**(1/2)
elif args.bonus_func == 'log':
bonus_func = lambda x : np.log(x+1)
# temporal difference module
tdm = TemporalDifferenceModule(inputSize= 2*int(envs.observation_space.shape[0]),
outputSize=args.time_intervals,
num_fc_layers=int(args.num_layers),
depth_fc_layers=int(args.fc_width),
lr=float(args.opt_lr),
buffer_max_length = args.buffer_max_length,
buffer_RL_ratio=args.buffer_RL_ratio,
frame_skip=frame_skip,
tdm_epoch=args.tdm_epoch,
tdm_batchsize=args.tdm_batchsize,
logger=logger,
bonus_func=bonus_func).to(device)
#collect random trajectories
sample_collector = CollectSamples(envs, args.num_processes, initial=True)
tdm.buffer_rand = sample_collector.collect_trajectories(args.num_rollouts, args.steps_per_rollout)
# initial training
tdm.update()
actor_critic = Policy(envs.observation_space.shape, envs.action_space,
base_kwargs={'recurrent': args.recurrent_policy})
actor_critic.to(device)
if args.algo == 'a2c':
agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef,
args.entropy_coef, lr=args.lr,
eps=args.eps, alpha=args.alpha,
max_grad_norm=args.max_grad_norm)
elif args.algo == 'ppo':
agent = algo.PPO(actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch,
args.value_loss_coef, args.entropy_coef, lr=args.lr,
eps=args.eps,
max_grad_norm=args.max_grad_norm)
elif args.algo == 'acktr':
agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef,
args.entropy_coef, acktr=True)
rollouts = RolloutStorage(args.num_steps, args.num_processes,
envs.observation_space.shape, envs.action_space,
actor_critic.recurrent_hidden_state_size)
obs = envs.reset()
rollouts.obs[0].copy_(obs)
rollouts.to(device)
episode_rewards = deque(maxlen=10)
start = time.time()
for j in range(num_updates):
if args.use_linear_lr_decay:
# decrease learning rate linearly
if args.algo == "acktr":
# use optimizer's learning rate since it's hard-coded in kfac.py
update_linear_schedule(agent.optimizer, j, num_updates, agent.optimizer.lr)
else:
update_linear_schedule(agent.optimizer, j, num_updates, args.lr)
if args.algo == 'ppo' and args.use_linear_clip_decay:
agent.clip_param = args.clip_param * (1 - j / float(num_updates))
# acting
for step in range(args.num_steps):
# Sample actions
with torch.no_grad():
value, action, action_log_prob, recurrent_hidden_states = actor_critic.act(
rollouts.obs[step],
rollouts.recurrent_hidden_states[step],
rollouts.masks[step])
# Obser reward and next obs
# envs.render()
obs_old = obs.clone()
obs, reward, done, infos = envs.step(action)
for info in infos:
if 'episode' in info.keys():
episode_rewards.append(info['episode']['r'])
# If done then clean the history of observations.
masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done])
#compute intrinsic bonus
if args.use_tdm:
tdm.symm_eval = True if step == args.num_steps-1 else False
reward_int = tdm.compute_bonus(obs_old, obs).float()
reward += beta_func(step + j*args.num_steps) * reward_int.cpu().unsqueeze(1)
if (j % args.log_interval == 0) and (step == args.num_steps-1):
logger.add_reward_intrinsic(reward_int, (j+1)*args.num_steps*args.num_processes)
#saving to buffer.
rollouts.insert(obs, recurrent_hidden_states, action, action_log_prob, value, reward, masks)
# saving to buffer and periodic updating parameters
if (args.use_tdm):
tdm.buffer_RL_temp.append((rollouts.obs, rollouts.masks))
if (j%args.num_steps==0 and j>0):
tdm.update()
with torch.no_grad():
next_value = actor_critic.get_value(rollouts.obs[-1],
rollouts.recurrent_hidden_states[-1],
rollouts.masks[-1]).detach()
rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.tau)
value_loss, action_loss, dist_entropy = agent.update(rollouts)
rollouts.after_update()
# save for every interval-th episode or for the last epoch
# no
# save every 1-million steps
if (((j+1)*args.num_steps*args.num_processes)%1e6 == 0 or j == num_updates - 1) and args.save_dir != "":
save_path = os.path.join(args.save_dir, args.algo)
try:
os.makedirs(save_path)
except OSError:
pass
# A really ugly way to save a model to CPU
save_model = actor_critic
if args.cuda:
save_model = copy.deepcopy(actor_critic).cpu()
save_model = [save_model,
getattr(get_vec_normalize(envs), 'ob_rms', None)]
if j == num_updates - 1:
save_here = os.path.join(save_path, args.env_name + "_step_{}M.pt".format((j+1)*args.num_steps*args.num_processes//1e6))
else:
save_here = os.path.join(save_path, args.env_name + "_final.pt")
torch.save(save_model, save_here) # saved policy.
total_num_steps = (j + 1) * args.num_processes * args.num_steps
# printing outputs
if j % args.log_interval == 0 and len(episode_rewards) > 1:
end = time.time()
print("Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}\n".
format(j,
total_num_steps,
int(total_num_steps / (end - start)),
len(episode_rewards),
np.mean(episode_rewards),
np.median(episode_rewards),
np.min(episode_rewards),
np.max(episode_rewards), dist_entropy,
value_loss, action_loss))
logger.add_reward(episode_rewards, (j+1)*args.num_steps*args.num_processes)
#
# if j % args.tb_interval == 0:
# # mean/std or median/1stqt?
# logger.add_tdm_loss(loss, self.epoch_count*i)
# evaluation process
# if (args.eval_interval is not None
# and len(episode_rewards) > 1
# and j % args.eval_interval == 0):
# eval_envs = make_vec_envs(
# args.env_name, args.seed + args.num_processes, args.num_processes,
# args.gamma, eval_log_dir, args.add_timestep, device, True)
#
# vec_norm = get_vec_normalize(eval_envs)
# if vec_norm is not None:
# vec_norm.eval()
# vec_norm.ob_rms = get_vec_normalize(envs).ob_rms
#
# eval_episode_rewards = []
#
# obs = eval_envs.reset()
# eval_recurrent_hidden_states = torch.zeros(args.num_processes,
# actor_critic.recurrent_hidden_state_size, device=device)
# eval_masks = torch.zeros(args.num_processes, 1, device=device)
#
# while len(eval_episode_rewards) < 10:
# with torch.no_grad():
# _, action, _, eval_recurrent_hidden_states = actor_critic.act(
# obs, eval_recurrent_hidden_states, eval_masks, deterministic=True)
#
# # Obser reward and next obs
# # envs.render()
# obs, reward, done, infos = eval_envs.step(action)
#
# eval_masks = torch.FloatTensor([[0.0] if done_ else [1.0]
# for done_ in done])
# for info in infos:
# if 'episode' in info.keys():
# eval_episode_rewards.append(info['episode']['r'])
#
# eval_envs.close()
#
# print(" Evaluation using {} episodes: mean reward {:.5f}\n".
# format(len(eval_episode_rewards),
# np.mean(eval_episode_rewards)))
# # plotting
# if args.vis and j % args.vis_interval == 0:
# try:
# # Sometimes monitor doesn't properly flush the outputs
# win = visdom_plot(viz, win, args.log_dir, args.env_name,
# args.algo, args.num_env_steps)
# except IOError:
# pass
#if done save:::::::::::
logger.save()
if __name__ == "__main__":
main()
| [
"os.remove",
"a2c_ppo_acktr.arguments.get_args",
"mymodels.CollectSamples",
"visdom.Visdom",
"a2c_ppo_acktr.algo.A2C_ACKTR",
"torch.set_num_threads",
"numpy.mean",
"torch.device",
"torch.no_grad",
"os.path.join",
"numpy.sqrt",
"collections.deque",
"torch.FloatTensor",
"numpy.max",
"copy.... | [((624, 634), 'a2c_ppo_acktr.arguments.get_args', 'get_args', ([], {}), '()\n', (632, 634), False, 'from a2c_ppo_acktr.arguments import get_args\n'), ((903, 931), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (920, 931), False, 'import torch\n'), ((932, 969), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (958, 969), False, 'import torch\n'), ((988, 1013), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1011, 1013), False, 'import torch\n'), ((1142, 1182), 'os.makedirs', 'os.makedirs', (['args.log_dir'], {'exist_ok': '(True)'}), '(args.log_dir, exist_ok=True)\n', (1153, 1182), False, 'import os\n'), ((1356, 1381), 'os.makedirs', 'os.makedirs', (['eval_log_dir'], {}), '(eval_log_dir)\n', (1367, 1381), False, 'import os\n'), ((1531, 1555), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (1552, 1555), False, 'import torch\n'), ((1569, 1615), 'torch.device', 'torch.device', (["('cuda:0' if args.cuda else 'cpu')"], {}), "('cuda:0' if args.cuda else 'cpu')\n", (1581, 1615), False, 'import torch\n'), ((1735, 1857), 'a2c_ppo_acktr.envs.make_vec_envs', 'make_vec_envs', (['args.env_name', 'args.seed', 'args.num_processes', 'args.gamma', 'args.log_dir', 'args.add_timestep', 'device', '(True)'], {}), '(args.env_name, args.seed, args.num_processes, args.gamma,\n args.log_dir, args.add_timestep, device, True)\n', (1748, 1857), False, 'from a2c_ppo_acktr.envs import make_vec_envs\n'), ((2004, 2023), 'mymodels.Logger', 'Logger', (['args.tb_dir'], {}), '(args.tb_dir)\n', (2010, 2023), False, 'from mymodels import TemporalDifferenceModule, CollectSamples, Logger\n'), ((4040, 4150), 'a2c_ppo_acktr.model.Policy', 'Policy', (['envs.observation_space.shape', 'envs.action_space'], {'base_kwargs': "{'recurrent': args.recurrent_policy}"}), "(envs.observation_space.shape, envs.action_space, base_kwargs={\n 'recurrent': args.recurrent_policy})\n", (4046, 4150), False, 'from a2c_ppo_acktr.model import Policy\n'), ((4953, 5099), 'a2c_ppo_acktr.storage.RolloutStorage', 'RolloutStorage', (['args.num_steps', 'args.num_processes', 'envs.observation_space.shape', 'envs.action_space', 'actor_critic.recurrent_hidden_state_size'], {}), '(args.num_steps, args.num_processes, envs.observation_space.\n shape, envs.action_space, actor_critic.recurrent_hidden_state_size)\n', (4967, 5099), False, 'from a2c_ppo_acktr.storage import RolloutStorage\n'), ((5243, 5259), 'collections.deque', 'deque', ([], {'maxlen': '(10)'}), '(maxlen=10)\n', (5248, 5259), False, 'from collections import deque\n'), ((5272, 5283), 'time.time', 'time.time', ([], {}), '()\n', (5281, 5283), False, 'import time\n'), ((1682, 1704), 'visdom.Visdom', 'Visdom', ([], {'port': 'args.port'}), '(port=args.port)\n', (1688, 1704), False, 'from visdom import Visdom\n'), ((3809, 3863), 'mymodels.CollectSamples', 'CollectSamples', (['envs', 'args.num_processes'], {'initial': '(True)'}), '(envs, args.num_processes, initial=True)\n', (3823, 3863), False, 'from mymodels import TemporalDifferenceModule, CollectSamples, Logger\n'), ((4226, 4378), 'a2c_ppo_acktr.algo.A2C_ACKTR', 'algo.A2C_ACKTR', (['actor_critic', 'args.value_loss_coef', 'args.entropy_coef'], {'lr': 'args.lr', 'eps': 'args.eps', 'alpha': 'args.alpha', 'max_grad_norm': 'args.max_grad_norm'}), '(actor_critic, args.value_loss_coef, args.entropy_coef, lr=\n args.lr, eps=args.eps, alpha=args.alpha, max_grad_norm=args.max_grad_norm)\n', (4240, 4378), False, 'from a2c_ppo_acktr import algo\n'), ((1221, 1264), 'os.path.join', 'os.path.join', (['args.log_dir', '"""*.monitor.csv"""'], {}), "(args.log_dir, '*.monitor.csv')\n", (1233, 1264), False, 'import os\n'), ((1294, 1306), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (1303, 1306), False, 'import os\n'), ((1420, 1463), 'os.path.join', 'os.path.join', (['eval_log_dir', '"""*.monitor.csv"""'], {}), "(eval_log_dir, '*.monitor.csv')\n", (1432, 1463), False, 'import os\n'), ((1493, 1505), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (1502, 1505), False, 'import os\n'), ((4512, 4697), 'a2c_ppo_acktr.algo.PPO', 'algo.PPO', (['actor_critic', 'args.clip_param', 'args.ppo_epoch', 'args.num_mini_batch', 'args.value_loss_coef', 'args.entropy_coef'], {'lr': 'args.lr', 'eps': 'args.eps', 'max_grad_norm': 'args.max_grad_norm'}), '(actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch,\n args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps,\n max_grad_norm=args.max_grad_norm)\n', (4520, 4697), False, 'from a2c_ppo_acktr import algo\n'), ((6610, 6676), 'torch.FloatTensor', 'torch.FloatTensor', (['[([0.0] if done_ else [1.0]) for done_ in done]'], {}), '([([0.0] if done_ else [1.0]) for done_ in done])\n', (6627, 6676), False, 'import torch\n'), ((7546, 7561), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7559, 7561), False, 'import torch\n'), ((8237, 8275), 'os.path.join', 'os.path.join', (['args.save_dir', 'args.algo'], {}), '(args.save_dir, args.algo)\n', (8249, 8275), False, 'import os\n'), ((8965, 8998), 'torch.save', 'torch.save', (['save_model', 'save_here'], {}), '(save_model, save_here)\n', (8975, 8998), False, 'import torch\n'), ((9204, 9215), 'time.time', 'time.time', ([], {}), '()\n', (9213, 9215), False, 'import time\n'), ((4824, 4910), 'a2c_ppo_acktr.algo.A2C_ACKTR', 'algo.A2C_ACKTR', (['actor_critic', 'args.value_loss_coef', 'args.entropy_coef'], {'acktr': '(True)'}), '(actor_critic, args.value_loss_coef, args.entropy_coef, acktr\n =True)\n', (4838, 4910), False, 'from a2c_ppo_acktr import algo\n'), ((5535, 5610), 'a2c_ppo_acktr.utils.update_linear_schedule', 'update_linear_schedule', (['agent.optimizer', 'j', 'num_updates', 'agent.optimizer.lr'], {}), '(agent.optimizer, j, num_updates, agent.optimizer.lr)\n', (5557, 5610), False, 'from a2c_ppo_acktr.utils import get_vec_normalize, update_linear_schedule\n'), ((5645, 5709), 'a2c_ppo_acktr.utils.update_linear_schedule', 'update_linear_schedule', (['agent.optimizer', 'j', 'num_updates', 'args.lr'], {}), '(agent.optimizer, j, num_updates, args.lr)\n', (5667, 5709), False, 'from a2c_ppo_acktr.utils import get_vec_normalize, update_linear_schedule\n'), ((5960, 5975), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5973, 5975), False, 'import torch\n'), ((8309, 8331), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (8320, 8331), False, 'import os\n'), ((8900, 8952), 'os.path.join', 'os.path.join', (['save_path', "(args.env_name + '_final.pt')"], {}), "(save_path, args.env_name + '_final.pt')\n", (8912, 8952), False, 'import os\n'), ((8637, 8660), 'a2c_ppo_acktr.utils.get_vec_normalize', 'get_vec_normalize', (['envs'], {}), '(envs)\n', (8654, 8660), False, 'from a2c_ppo_acktr.utils import get_vec_normalize, update_linear_schedule\n'), ((9564, 9588), 'numpy.mean', 'np.mean', (['episode_rewards'], {}), '(episode_rewards)\n', (9571, 9588), True, 'import numpy as np\n'), ((9613, 9639), 'numpy.median', 'np.median', (['episode_rewards'], {}), '(episode_rewards)\n', (9622, 9639), True, 'import numpy as np\n'), ((9664, 9687), 'numpy.min', 'np.min', (['episode_rewards'], {}), '(episode_rewards)\n', (9670, 9687), True, 'import numpy as np\n'), ((9712, 9735), 'numpy.max', 'np.max', (['episode_rewards'], {}), '(episode_rewards)\n', (9718, 9735), True, 'import numpy as np\n'), ((2282, 2296), 'numpy.sqrt', 'np.sqrt', (['(x + 2)'], {}), '(x + 2)\n', (2289, 2296), True, 'import numpy as np\n'), ((8530, 8557), 'copy.deepcopy', 'copy.deepcopy', (['actor_critic'], {}), '(actor_critic)\n', (8543, 8557), False, 'import copy\n'), ((2375, 2388), 'numpy.log', 'np.log', (['(x + 2)'], {}), '(x + 2)\n', (2381, 2388), True, 'import numpy as np\n'), ((2844, 2857), 'numpy.log', 'np.log', (['(x + 1)'], {}), '(x + 1)\n', (2850, 2857), True, 'import numpy as np\n')] |
__author__ = '<NAME>'
# RESULTS LOG : October 16th, 2015
# Accuracy : TODO
# Confusion Matrix : TODO
from time import time
###############################################
# load from csv training and testing sets
from numpy import genfromtxt
features_test = genfromtxt('d:/CODE/ml-crops/preproc/dataset/features_train.csv', delimiter=',')
classes_test = genfromtxt('d:/CODE/ml-crops/preproc/dataset/classes_train.csv', delimiter=',')
features_train = genfromtxt('d:/CODE/ml-crops/preproc/dataset/features_test.csv', delimiter=',')
classes_train = genfromtxt('d:/CODE/ml-crops/preproc/dataset/classes_test.csv', delimiter=',')
# 0 index the classes
classes_test = classes_test - 1
classes_train = classes_train - 1
#import matplotlib.pyplot as pl
#pl.scatter(features_train[:,7], features_train[:,1], c=classes_train) #features_train[:8])
#pl.show()
###############################################
# import pybrain stuff
from pybrain.datasets import ClassificationDataSet
from pybrain.utilities import percentError
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure.modules import SoftmaxLayer
from pybrain.tools.xml.networkwriter import NetworkWriter
from pybrain.tools.xml.networkreader import NetworkReader
from numpy import ravel
# Build classification data set
train_data = ClassificationDataSet(9, 1, nb_classes=4)
for i in range(len(features_train)):
train_data.addSample(ravel(features_train[i]), classes_train[i])
train_data._convertToOneOfMany( )
test_data = ClassificationDataSet(9, 1, nb_classes=4)
for i in range(len(features_test)):
test_data.addSample(ravel(features_test[i]), classes_test[i])
test_data._convertToOneOfMany( )
# build and train the network
print("Input dimension : " + str(train_data.indim) + ". Out dimenssion : " + str(train_data.outdim))
fnn = buildNetwork( train_data.indim, 25, train_data.outdim, outclass=SoftmaxLayer )
trainer = BackpropTrainer( fnn, dataset=train_data, momentum=0.05, learningrate=0.01, verbose=True, weightdecay=0.1)
trnerr,valerr = trainer.trainUntilConvergence(dataset=train_data,maxEpochs=150)
import matplotlib.pyplot as pl
pl.plot(trnerr,'b',valerr,'r')
pl.show()
out = fnn.activateOnDataset(test_data).argmax(axis=1)
print(percentError(out, test_data['class']))
from sklearn.metrics import precision_score,recall_score,confusion_matrix
print(confusion_matrix(out, test_data['class']))
#trainer.trainEpochs(150)
#trnresult = percentError(trainer.testOnClassData(), train_data['class'])
#print 'Error percent on test set : ', percentError(trainer.testOnClassData(dataset=test_data), test_data['class'])
#print("epoch: %4d" % trainer.totalepochs,
#" train error: %5.2f%%" % trnresult,
#" test error: %5.2f%%" % tstresult)
#predicted_data=trainer.testOnClassData(dataset=test_data)
#NetworkWriter.writeToFile(fnn, 'oliv.xml')
#from sklearn.metrics import precision_score,recall_score,confusion_matrix
#print("The precision is "+str(precision_score(classes_test,predicted_data)))
#print(confusion_matrix(classes_test,predicted_data))
#print(len(classes_test))
#print(len(predicted_data)) | [
"pybrain.tools.shortcuts.buildNetwork",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.ravel",
"pybrain.supervised.trainers.BackpropTrainer",
"numpy.genfromtxt",
"pybrain.utilities.percentError",
"pybrain.datasets.ClassificationDataSet",
"sklearn.metrics.confusion_matrix"
] | [((262, 347), 'numpy.genfromtxt', 'genfromtxt', (['"""d:/CODE/ml-crops/preproc/dataset/features_train.csv"""'], {'delimiter': '""","""'}), "('d:/CODE/ml-crops/preproc/dataset/features_train.csv', delimiter=','\n )\n", (272, 347), False, 'from numpy import genfromtxt\n'), ((358, 437), 'numpy.genfromtxt', 'genfromtxt', (['"""d:/CODE/ml-crops/preproc/dataset/classes_train.csv"""'], {'delimiter': '""","""'}), "('d:/CODE/ml-crops/preproc/dataset/classes_train.csv', delimiter=',')\n", (368, 437), False, 'from numpy import genfromtxt\n'), ((456, 535), 'numpy.genfromtxt', 'genfromtxt', (['"""d:/CODE/ml-crops/preproc/dataset/features_test.csv"""'], {'delimiter': '""","""'}), "('d:/CODE/ml-crops/preproc/dataset/features_test.csv', delimiter=',')\n", (466, 535), False, 'from numpy import genfromtxt\n'), ((552, 630), 'numpy.genfromtxt', 'genfromtxt', (['"""d:/CODE/ml-crops/preproc/dataset/classes_test.csv"""'], {'delimiter': '""","""'}), "('d:/CODE/ml-crops/preproc/dataset/classes_test.csv', delimiter=',')\n", (562, 630), False, 'from numpy import genfromtxt\n'), ((1391, 1432), 'pybrain.datasets.ClassificationDataSet', 'ClassificationDataSet', (['(9)', '(1)'], {'nb_classes': '(4)'}), '(9, 1, nb_classes=4)\n', (1412, 1432), False, 'from pybrain.datasets import ClassificationDataSet\n'), ((1586, 1627), 'pybrain.datasets.ClassificationDataSet', 'ClassificationDataSet', (['(9)', '(1)'], {'nb_classes': '(4)'}), '(9, 1, nb_classes=4)\n', (1607, 1627), False, 'from pybrain.datasets import ClassificationDataSet\n'), ((1901, 1977), 'pybrain.tools.shortcuts.buildNetwork', 'buildNetwork', (['train_data.indim', '(25)', 'train_data.outdim'], {'outclass': 'SoftmaxLayer'}), '(train_data.indim, 25, train_data.outdim, outclass=SoftmaxLayer)\n', (1913, 1977), False, 'from pybrain.tools.shortcuts import buildNetwork\n'), ((1990, 2099), 'pybrain.supervised.trainers.BackpropTrainer', 'BackpropTrainer', (['fnn'], {'dataset': 'train_data', 'momentum': '(0.05)', 'learningrate': '(0.01)', 'verbose': '(True)', 'weightdecay': '(0.1)'}), '(fnn, dataset=train_data, momentum=0.05, learningrate=0.01,\n verbose=True, weightdecay=0.1)\n', (2005, 2099), False, 'from pybrain.supervised.trainers import BackpropTrainer\n'), ((2209, 2242), 'matplotlib.pyplot.plot', 'pl.plot', (['trnerr', '"""b"""', 'valerr', '"""r"""'], {}), "(trnerr, 'b', valerr, 'r')\n", (2216, 2242), True, 'import matplotlib.pyplot as pl\n'), ((2240, 2249), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (2247, 2249), True, 'import matplotlib.pyplot as pl\n'), ((2311, 2348), 'pybrain.utilities.percentError', 'percentError', (['out', "test_data['class']"], {}), "(out, test_data['class'])\n", (2323, 2348), False, 'from pybrain.utilities import percentError\n'), ((2442, 2483), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['out', "test_data['class']"], {}), "(out, test_data['class'])\n", (2458, 2483), False, 'from sklearn.metrics import precision_score, recall_score, confusion_matrix\n'), ((1495, 1519), 'numpy.ravel', 'ravel', (['features_train[i]'], {}), '(features_train[i])\n', (1500, 1519), False, 'from numpy import ravel\n'), ((1688, 1711), 'numpy.ravel', 'ravel', (['features_test[i]'], {}), '(features_test[i])\n', (1693, 1711), False, 'from numpy import ravel\n')] |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
import os
import shutil
import openpyxl
import rpy2.robjects as ro
from rpy2.robjects import r
from rpy2.robjects.packages import importr
ro.r['options'](warn=-1)
r('as.POSIXct("2015-01-01 00:00:01")+0 ')
base = importr('base')
cars = importr('car')
mvtnorm = importr('mvtnorm')
broom = importr('broom')
psych = importr('psych')
mhtmult = importr('MHTmult')
def get_rfo_os_num_rows(root, modality):
os.chdir(root)
rfo = pd.read_csv(str(modality) + '_reformatted_output.csv')
ones = pd.read_csv('ONE_SAMPLE.csv')
[num_rows, _] = ones.shape
return [rfo, ones, num_rows]
def run_stats(ones, rfo, row):
group = ones.iloc[row]["GROUP"]
seed = ones.iloc[row]["SEED"]
rois = ones.iloc[row]["ROIS"]
r.assign('rGROUP', group)
r.assign('rSEED', seed)
r.assign('rROIS', rois)
rfo_gs = rfo.loc[rfo['Group'] == group].reset_index(drop=True)
rfo_gs_rs = pd.concat([rfo_gs[["Group", "Seed", "Subject_ID"]], rfo_gs[rois]], axis=1)
rfo_gs_rs_ss = rfo_gs_rs.loc[rfo_gs_rs['Seed'] == seed].reset_index(drop=True)
osd = pd.DataFrame(rfo_gs_rs_ss[rois])
osd.to_csv('osd.csv', index=False)
ro.globalenv['osd'] = "osd.csv"
r('osd<-read.csv(osd)')
r('ttest <- t.test(osd, mu = 0)')
roisdf = pd.DataFrame([rois])
roisdf.columns = ["ROI"]
seeddf = pd.DataFrame([seed])
seeddf.columns = ["Seed"]
groupdf = pd.DataFrame([group])
groupdf.columns = ["Group"]
name = pd.concat([groupdf, seeddf, roisdf], axis=1)
statdf = pd.DataFrame(r('ttest[1]'))
statdf.columns = ["TSTAT"]
pardf = pd.DataFrame(r('ttest[2]'))
pardf.columns = ["DF"]
pvaluedf = pd.DataFrame(r('ttest[3]'))
pvaluedf.columns = ["PVAL"]
conf_estdf = pd.DataFrame(r('ttest[4]'))
conf_est_1 = conf_estdf[0]
conf_est_1 = conf_est_1[0]
conf_est_df_1 = pd.DataFrame([conf_est_1])
conf_est_df_1.columns = ["CONF_1"]
conf_est_2 = conf_estdf[1]
conf_est_2 = conf_est_2[0]
conf_est_df_2 = pd.DataFrame([conf_est_2])
conf_est_df_2.columns = ["CONF_2"]
stdderdf = pd.DataFrame(r('ttest[7]'))
stdder = stdderdf[0]
stdder = stdder[0]
stdder_df = pd.DataFrame([stdder])
stdder_df.columns = ["STDERR"]
statdf_fl = statdf["TSTAT"].round(3).astype(str)
pardf_fl = pardf["DF"].round(3).astype(str)
pvaluedf_fl = pvaluedf["PVAL"].round(3).astype(str)
conf_est_df_1_fl = conf_est_df_1["CONF_1"].round(3).astype(str)
conf_est_df_2_fl = conf_est_df_2["CONF_2"].round(3).astype(str)
summary_stats = "t(" + statdf_fl + ")=" + pardf_fl + ",p=" + pvaluedf_fl + " Conf_interval=[" + conf_est_df_1_fl + "," + conf_est_df_2_fl + "]"
summary_stats = np.array(summary_stats)
summary_stats = pd.DataFrame([summary_stats])
summary_stats.columns = ["summary_stats"]
r('describe_osd<-describe(osd)')
describe_osd = pd.DataFrame(r('describe_osd'))
[_, c] = describe_osd.shape
if c > 1:
describe_osd = describe_osd.T
else:
pass
describe_osd.columns = ['STAT']
mean_df = pd.DataFrame([describe_osd.iloc[2][0]])
mean_df.columns = ["MEAN"]
std_df = pd.DataFrame([describe_osd.iloc[3][0]])
std_df.columns = ["STD"]
onesample_t = pd.concat([name, mean_df, std_df, pardf, statdf, pvaluedf, conf_est_df_1, conf_est_df_2], axis=1)
return onesample_t
def get_full_onesample(root, modality):
[rfo, ones, num_rows] = get_rfo_os_num_rows(root, modality)
full_onesample_t = pd.DataFrame([])
for row in range(num_rows):
onesample_t_df = run_stats(ones, rfo, row)
full_onesample_t = pd.concat([full_onesample_t, onesample_t_df], axis=0)
full_onesample_t = full_onesample_t.reset_index(drop=True)
return full_onesample_t
def calculate_pval_bon(df):
p = df["PVAL"]
p = pd.DataFrame(p)
from rpy2.robjects import pandas2ri
pandas2ri.activate()
ro.globalenv['p'] = p
r('p<-cbind(p)');
r('p<-unlist(p)');
r('pval_adjust<-gsidak.p.adjust(p, k=1)')
pval_adjust = pd.DataFrame(r('pval_adjust'))
pval_adjust.columns = ["PVAL_ADJUST"]
df = pd.concat([df, pval_adjust], axis=1)
return df
def run_split_and_bonferroni(root, modality):
full_onesample_t_df = get_full_onesample(root, modality)
did_g = full_onesample_t_df.loc[full_onesample_t_df['Group'] == 'DID-G'].reset_index(drop=True)
did_s = full_onesample_t_df.loc[full_onesample_t_df['Group'] == 'DID-S'].reset_index(drop=True)
ctrl = full_onesample_t_df.loc[full_onesample_t_df['Group'] == 'ctrl'].reset_index(drop=True)
full_onesample_t_df = calculate_pval_bon(full_onesample_t_df)
did_g = calculate_pval_bon(did_g)
did_s = calculate_pval_bon(did_s)
ctrl = calculate_pval_bon(ctrl)
return [full_onesample_t_df, did_g, did_s, ctrl]
def p_val_cut_off(df, col):
df_cut = df.loc[df[col] < 0.05]
df_cut = df_cut.reset_index(drop=True)
return df_cut
def check_rows_len(df):
[rows, _] = df.shape
return rows
def create_output_dir_file(root, modality):
os.chdir(root)
outputdir = str(modality) + '_PPI_ONE_SAMPLE'
outputdir = os.path.join(root, outputdir)
if os.path.exists(outputdir):
shutil.rmtree(outputdir) # THIS THROWS ERROR IF N/EMPTY
os.chdir(root)
os.mkdir(outputdir)
os.chdir(outputdir)
wb = openpyxl.Workbook()
outfile_name = str(modality) + '_OS_OUTPUT_FULL.xlsx'
outputfile_path = os.path.join(root, outputdir, outfile_name)
wb.save(outputfile_path)
else:
os.chdir(root)
os.mkdir(outputdir)
os.chdir(outputdir)
wb = openpyxl.Workbook()
outfile_name = str(modality) + '_OS_OUTPUT_FULL.xlsx'
outputfile_path = os.path.join(root, outputdir, outfile_name)
wb.save(outputfile_path)
return outputdir
def write_output(outputdir, modality, full_onesample_t, did_g, did_s, ctrl):
os.chdir(outputdir)
outfile_name = str(modality) + '_OS_OUTPUT_FULL.xlsx'
with pd.ExcelWriter(outfile_name, engine='openpyxl') as writer:
full_onesample_t.to_excel(writer, sheet_name='FULL_HS')
did_g.to_excel(writer, sheet_name='DID_G')
did_s.to_excel(writer, sheet_name='DID_S')
ctrl.to_excel(writer, sheet_name='CTRL')
full_onesample_t_hs = p_val_cut_off(full_onesample_t, 'PVAL_ADJUST')
did_g_hs = p_val_cut_off(did_g, 'PVAL_ADJUST')
did_s_hs = p_val_cut_off(did_s, 'PVAL_ADJUST')
ctrl_hs = p_val_cut_off(ctrl, 'PVAL_ADJUST')
if check_rows_len(full_onesample_t_hs) != 0:
full_onesample_t_hs.to_excel(writer, sheet_name='FULL_G_HS')
if check_rows_len(did_g_hs) != 0:
did_g_hs.to_excel(writer, sheet_name='DID_G_HS')
if check_rows_len(did_s_hs) != 0:
did_s_hs.to_excel(writer, sheet_name='DID_S_HS')
if check_rows_len(ctrl_hs) != 0:
ctrl_hs.to_excel(writer, sheet_name='CTRL_HS')
full_onesample_t_p = p_val_cut_off(full_onesample_t, 'PVAL')
did_g_p = p_val_cut_off(did_g, 'PVAL')
did_s_p = p_val_cut_off(did_s, 'PVAL')
ctrl_p = p_val_cut_off(ctrl, 'PVAL')
if check_rows_len(full_onesample_t_hs) != 0:
full_onesample_t_p.to_excel(writer, sheet_name='FULL_G_P')
if check_rows_len(did_g_p) != 0:
did_g_p.to_excel(writer, sheet_name='DID_G_P')
if check_rows_len(did_s_p) != 0:
did_s_p.to_excel(writer, sheet_name='DID_S_P')
if check_rows_len(ctrl_p) != 0:
ctrl_p.to_excel(writer, sheet_name='CTRL_P')
def save_did_g_pval_list(root, did_g):
os.chdir(root)
did_g_pval = did_g.loc[did_g['PVAL'] < 0.05].reset_index(drop=True)
did_g_pval_list = did_g_pval[["Seed", "ROI"]]
did_g_pval_list = did_g_pval_list.drop_duplicates()
did_g_pval_list = pd.DataFrame(did_g_pval_list)
did_g_pval_list.columns = ["Seed", "ROI"]
did_g_pval_list.to_csv('DID_G_OS_SEED_ROI.csv', index=False)
def remove_osd_csv(root):
os.remove("osd.csv")
def run_one_sample_t_test(root, modality):
[full_onesample_t, did_g, did_s, ctrl] = run_split_and_bonferroni(root, modality)
outputdir = create_output_dir_file(root, modality)
full_onesample_t = full_onesample_t.round(3)
did_g = did_g.round(3)
did_s = did_s.round(3)
ctrl = ctrl.round(3)
write_output(outputdir, modality, full_onesample_t, did_g, did_s, ctrl)
save_did_g_pval_list(root, did_g)
remove_osd_csv(root)
| [
"pandas.DataFrame",
"os.mkdir",
"os.remove",
"rpy2.robjects.packages.importr",
"openpyxl.Workbook",
"pandas.read_csv",
"pandas.ExcelWriter",
"rpy2.robjects.pandas2ri.activate",
"rpy2.robjects.r",
"os.path.exists",
"rpy2.robjects.r.assign",
"numpy.array",
"shutil.rmtree",
"pandas.concat",
... | [((241, 282), 'rpy2.robjects.r', 'r', (['"""as.POSIXct("2015-01-01 00:00:01")+0 """'], {}), '(\'as.POSIXct("2015-01-01 00:00:01")+0 \')\n', (242, 282), False, 'from rpy2.robjects import r\n'), ((290, 305), 'rpy2.robjects.packages.importr', 'importr', (['"""base"""'], {}), "('base')\n", (297, 305), False, 'from rpy2.robjects.packages import importr\n'), ((313, 327), 'rpy2.robjects.packages.importr', 'importr', (['"""car"""'], {}), "('car')\n", (320, 327), False, 'from rpy2.robjects.packages import importr\n'), ((338, 356), 'rpy2.robjects.packages.importr', 'importr', (['"""mvtnorm"""'], {}), "('mvtnorm')\n", (345, 356), False, 'from rpy2.robjects.packages import importr\n'), ((365, 381), 'rpy2.robjects.packages.importr', 'importr', (['"""broom"""'], {}), "('broom')\n", (372, 381), False, 'from rpy2.robjects.packages import importr\n'), ((390, 406), 'rpy2.robjects.packages.importr', 'importr', (['"""psych"""'], {}), "('psych')\n", (397, 406), False, 'from rpy2.robjects.packages import importr\n'), ((417, 435), 'rpy2.robjects.packages.importr', 'importr', (['"""MHTmult"""'], {}), "('MHTmult')\n", (424, 435), False, 'from rpy2.robjects.packages import importr\n'), ((483, 497), 'os.chdir', 'os.chdir', (['root'], {}), '(root)\n', (491, 497), False, 'import os\n'), ((574, 603), 'pandas.read_csv', 'pd.read_csv', (['"""ONE_SAMPLE.csv"""'], {}), "('ONE_SAMPLE.csv')\n", (585, 603), True, 'import pandas as pd\n'), ((809, 834), 'rpy2.robjects.r.assign', 'r.assign', (['"""rGROUP"""', 'group'], {}), "('rGROUP', group)\n", (817, 834), False, 'from rpy2.robjects import r\n'), ((839, 862), 'rpy2.robjects.r.assign', 'r.assign', (['"""rSEED"""', 'seed'], {}), "('rSEED', seed)\n", (847, 862), False, 'from rpy2.robjects import r\n'), ((867, 890), 'rpy2.robjects.r.assign', 'r.assign', (['"""rROIS"""', 'rois'], {}), "('rROIS', rois)\n", (875, 890), False, 'from rpy2.robjects import r\n'), ((975, 1049), 'pandas.concat', 'pd.concat', (["[rfo_gs[['Group', 'Seed', 'Subject_ID']], rfo_gs[rois]]"], {'axis': '(1)'}), "([rfo_gs[['Group', 'Seed', 'Subject_ID']], rfo_gs[rois]], axis=1)\n", (984, 1049), True, 'import pandas as pd\n'), ((1143, 1175), 'pandas.DataFrame', 'pd.DataFrame', (['rfo_gs_rs_ss[rois]'], {}), '(rfo_gs_rs_ss[rois])\n', (1155, 1175), True, 'import pandas as pd\n'), ((1255, 1278), 'rpy2.robjects.r', 'r', (['"""osd<-read.csv(osd)"""'], {}), "('osd<-read.csv(osd)')\n", (1256, 1278), False, 'from rpy2.robjects import r\n'), ((1283, 1316), 'rpy2.robjects.r', 'r', (['"""ttest <- t.test(osd, mu = 0)"""'], {}), "('ttest <- t.test(osd, mu = 0)')\n", (1284, 1316), False, 'from rpy2.robjects import r\n'), ((1331, 1351), 'pandas.DataFrame', 'pd.DataFrame', (['[rois]'], {}), '([rois])\n', (1343, 1351), True, 'import pandas as pd\n'), ((1394, 1414), 'pandas.DataFrame', 'pd.DataFrame', (['[seed]'], {}), '([seed])\n', (1406, 1414), True, 'import pandas as pd\n'), ((1459, 1480), 'pandas.DataFrame', 'pd.DataFrame', (['[group]'], {}), '([group])\n', (1471, 1480), True, 'import pandas as pd\n'), ((1524, 1568), 'pandas.concat', 'pd.concat', (['[groupdf, seeddf, roisdf]'], {'axis': '(1)'}), '([groupdf, seeddf, roisdf], axis=1)\n', (1533, 1568), True, 'import pandas as pd\n'), ((1914, 1940), 'pandas.DataFrame', 'pd.DataFrame', (['[conf_est_1]'], {}), '([conf_est_1])\n', (1926, 1940), True, 'import pandas as pd\n'), ((2063, 2089), 'pandas.DataFrame', 'pd.DataFrame', (['[conf_est_2]'], {}), '([conf_est_2])\n', (2075, 2089), True, 'import pandas as pd\n'), ((2237, 2259), 'pandas.DataFrame', 'pd.DataFrame', (['[stdder]'], {}), '([stdder])\n', (2249, 2259), True, 'import pandas as pd\n'), ((2758, 2781), 'numpy.array', 'np.array', (['summary_stats'], {}), '(summary_stats)\n', (2766, 2781), True, 'import numpy as np\n'), ((2802, 2831), 'pandas.DataFrame', 'pd.DataFrame', (['[summary_stats]'], {}), '([summary_stats])\n', (2814, 2831), True, 'import pandas as pd\n'), ((2883, 2915), 'rpy2.robjects.r', 'r', (['"""describe_osd<-describe(osd)"""'], {}), "('describe_osd<-describe(osd)')\n", (2884, 2915), False, 'from rpy2.robjects import r\n'), ((3125, 3164), 'pandas.DataFrame', 'pd.DataFrame', (['[describe_osd.iloc[2][0]]'], {}), '([describe_osd.iloc[2][0]])\n', (3137, 3164), True, 'import pandas as pd\n'), ((3209, 3248), 'pandas.DataFrame', 'pd.DataFrame', (['[describe_osd.iloc[3][0]]'], {}), '([describe_osd.iloc[3][0]])\n', (3221, 3248), True, 'import pandas as pd\n'), ((3297, 3398), 'pandas.concat', 'pd.concat', (['[name, mean_df, std_df, pardf, statdf, pvaluedf, conf_est_df_1, conf_est_df_2]'], {'axis': '(1)'}), '([name, mean_df, std_df, pardf, statdf, pvaluedf, conf_est_df_1,\n conf_est_df_2], axis=1)\n', (3306, 3398), True, 'import pandas as pd\n'), ((3547, 3563), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {}), '([])\n', (3559, 3563), True, 'import pandas as pd\n'), ((3876, 3891), 'pandas.DataFrame', 'pd.DataFrame', (['p'], {}), '(p)\n', (3888, 3891), True, 'import pandas as pd\n'), ((3936, 3956), 'rpy2.robjects.pandas2ri.activate', 'pandas2ri.activate', ([], {}), '()\n', (3954, 3956), False, 'from rpy2.robjects import pandas2ri\n'), ((3987, 4003), 'rpy2.robjects.r', 'r', (['"""p<-cbind(p)"""'], {}), "('p<-cbind(p)')\n", (3988, 4003), False, 'from rpy2.robjects import r\n'), ((4009, 4026), 'rpy2.robjects.r', 'r', (['"""p<-unlist(p)"""'], {}), "('p<-unlist(p)')\n", (4010, 4026), False, 'from rpy2.robjects import r\n'), ((4032, 4073), 'rpy2.robjects.r', 'r', (['"""pval_adjust<-gsidak.p.adjust(p, k=1)"""'], {}), "('pval_adjust<-gsidak.p.adjust(p, k=1)')\n", (4033, 4073), False, 'from rpy2.robjects import r\n'), ((4174, 4210), 'pandas.concat', 'pd.concat', (['[df, pval_adjust]'], {'axis': '(1)'}), '([df, pval_adjust], axis=1)\n', (4183, 4210), True, 'import pandas as pd\n'), ((5107, 5121), 'os.chdir', 'os.chdir', (['root'], {}), '(root)\n', (5115, 5121), False, 'import os\n'), ((5188, 5217), 'os.path.join', 'os.path.join', (['root', 'outputdir'], {}), '(root, outputdir)\n', (5200, 5217), False, 'import os\n'), ((5226, 5251), 'os.path.exists', 'os.path.exists', (['outputdir'], {}), '(outputdir)\n', (5240, 5251), False, 'import os\n'), ((5989, 6008), 'os.chdir', 'os.chdir', (['outputdir'], {}), '(outputdir)\n', (5997, 6008), False, 'import os\n'), ((7701, 7715), 'os.chdir', 'os.chdir', (['root'], {}), '(root)\n', (7709, 7715), False, 'import os\n'), ((7916, 7945), 'pandas.DataFrame', 'pd.DataFrame', (['did_g_pval_list'], {}), '(did_g_pval_list)\n', (7928, 7945), True, 'import pandas as pd\n'), ((8089, 8109), 'os.remove', 'os.remove', (['"""osd.csv"""'], {}), "('osd.csv')\n", (8098, 8109), False, 'import os\n'), ((1596, 1609), 'rpy2.robjects.r', 'r', (['"""ttest[1]"""'], {}), "('ttest[1]')\n", (1597, 1609), False, 'from rpy2.robjects import r\n'), ((1668, 1681), 'rpy2.robjects.r', 'r', (['"""ttest[2]"""'], {}), "('ttest[2]')\n", (1669, 1681), False, 'from rpy2.robjects import r\n'), ((1739, 1752), 'rpy2.robjects.r', 'r', (['"""ttest[3]"""'], {}), "('ttest[3]')\n", (1740, 1752), False, 'from rpy2.robjects import r\n'), ((1817, 1830), 'rpy2.robjects.r', 'r', (['"""ttest[4]"""'], {}), "('ttest[4]')\n", (1818, 1830), False, 'from rpy2.robjects import r\n'), ((2158, 2171), 'rpy2.robjects.r', 'r', (['"""ttest[7]"""'], {}), "('ttest[7]')\n", (2159, 2171), False, 'from rpy2.robjects import r\n'), ((2949, 2966), 'rpy2.robjects.r', 'r', (['"""describe_osd"""'], {}), "('describe_osd')\n", (2950, 2966), False, 'from rpy2.robjects import r\n'), ((3674, 3727), 'pandas.concat', 'pd.concat', (['[full_onesample_t, onesample_t_df]'], {'axis': '(0)'}), '([full_onesample_t, onesample_t_df], axis=0)\n', (3683, 3727), True, 'import pandas as pd\n'), ((4105, 4121), 'rpy2.robjects.r', 'r', (['"""pval_adjust"""'], {}), "('pval_adjust')\n", (4106, 4121), False, 'from rpy2.robjects import r\n'), ((5261, 5285), 'shutil.rmtree', 'shutil.rmtree', (['outputdir'], {}), '(outputdir)\n', (5274, 5285), False, 'import shutil\n'), ((5326, 5340), 'os.chdir', 'os.chdir', (['root'], {}), '(root)\n', (5334, 5340), False, 'import os\n'), ((5349, 5368), 'os.mkdir', 'os.mkdir', (['outputdir'], {}), '(outputdir)\n', (5357, 5368), False, 'import os\n'), ((5377, 5396), 'os.chdir', 'os.chdir', (['outputdir'], {}), '(outputdir)\n', (5385, 5396), False, 'import os\n'), ((5411, 5430), 'openpyxl.Workbook', 'openpyxl.Workbook', ([], {}), '()\n', (5428, 5430), False, 'import openpyxl\n'), ((5519, 5562), 'os.path.join', 'os.path.join', (['root', 'outputdir', 'outfile_name'], {}), '(root, outputdir, outfile_name)\n', (5531, 5562), False, 'import os\n'), ((5615, 5629), 'os.chdir', 'os.chdir', (['root'], {}), '(root)\n', (5623, 5629), False, 'import os\n'), ((5638, 5657), 'os.mkdir', 'os.mkdir', (['outputdir'], {}), '(outputdir)\n', (5646, 5657), False, 'import os\n'), ((5666, 5685), 'os.chdir', 'os.chdir', (['outputdir'], {}), '(outputdir)\n', (5674, 5685), False, 'import os\n'), ((5700, 5719), 'openpyxl.Workbook', 'openpyxl.Workbook', ([], {}), '()\n', (5717, 5719), False, 'import openpyxl\n'), ((5808, 5851), 'os.path.join', 'os.path.join', (['root', 'outputdir', 'outfile_name'], {}), '(root, outputdir, outfile_name)\n', (5820, 5851), False, 'import os\n'), ((6076, 6123), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['outfile_name'], {'engine': '"""openpyxl"""'}), "(outfile_name, engine='openpyxl')\n", (6090, 6123), True, 'import pandas as pd\n')] |
import gym
import sys
import agent
import Q_network
import experience_replay
import torch
import numpy as np
import logging
from torch.utils.tensorboard import SummaryWriter
def run_episode(env, agent, rpm):
total_reward = 0
obs = env.reset()
step = 0
while True:
step += 1
action = agent.sample(obs)
next_obs, reward, done, _ = env.step(action)
rpm.append((obs, action, reward, next_obs, done))
# train model
if (len(rpm) > opt["MEMORY_WARMUP_SIZE"] and (step % opt["LEARN_FREQ"] == 0)):
(batch_obs, batch_action, batch_reward, batch_next_obs, batch_done) = rpm.sample(opt["BATCH_SIZE"])
agent.learn(batch_obs, batch_action, batch_reward, batch_next_obs, batch_done) # s,a,r,s',done
total_reward += reward
obs = next_obs
if done:
break
return total_reward, step
# 评估 agent, 跑 5 个episode,总reward求平均
def evaluate(times, env, agent, render=False):
with torch.no_grad():
eval_reward = []
for i in range(times):
obs = env.reset()
episode_reward = 0
while True:
action = agent.predict(obs) # 预测动作,只选最优动作
obs, reward, done, _ = env.step(action)
episode_reward += reward
if render:
env.render()
if done:
break
eval_reward.append(episode_reward)
return np.mean(eval_reward)
def train(episodes, env, env_name, agent, save):
rpm = experience_replay.ReplayMemory(opt["MEMORY_SIZE"])
while len(rpm) < opt["MEMORY_WARMUP_SIZE"]:
run_episode(env, agent, rpm)
for episode in range(episodes):
reward, steps = run_episode(env, agent, rpm)
writer.add_scalar(env_name + "-reward", reward, episode)
# reward, steps = run_episode_with_sarsa(env, agent, False)
print("train episode {} : reward {}, steps {}".format(episode + 1, reward, steps))
logging.warning("train episode {} : reward {}, steps {}".format(episode + 1, reward, steps))
if episode % 50 == 0:
eval_reward = evaluate(5, env, agent, render = False)
print("evaluate {} episodes : e_greedy {}, reward {}".format(5, agent.e_greedy, eval_reward))
logging.warning("evaluate 5 episodes : e_greedy {}, reward {}".format(agent.e_greedy, eval_reward))
if save:
agent.save(env_name)
return agent
opt = {
"LEARN_FREQ" : 5, # 训练频率,不需要每一个step都learn,攒一些新增经验后再learn,提高效率
"MEMORY_SIZE" : 200000, # replay memory的大小,越大越占用内存
"MEMORY_WARMUP_SIZE" : 500, # replay_memory 里需要预存一些经验数据,再开启训练
"BATCH_SIZE" : 128, # 每次给agent learn的数据数量,从replay memory随机里sample一批数据出来
"LEARNING_RATE" : 0.001, # 学习率
"GAMMA" : 0.99, # reward 的衰减因子,一般取 0.9 到 0.999 不等
"E_GREEDY" : 0.1,
"E_GREEDY_DECREMENT" : 1e-6, # 1e-6
"max_episode" : 2000
}
if __name__ == "__main__":
writer = SummaryWriter()
env_name = "CartPole-v0"
# env_name = "MountainCar-v0"
logging.basicConfig(filename = "{}.log".format(env_name))
env = gym.make(env_name)
logging.warning("DQN trained on {}".format(env_name))
logging.warning(opt)
num_act = env.action_space.n
num_obs = env.observation_space.shape[0]
dqn_agent = agent.DQN_agent(num_act, num_obs, opt["GAMMA"], opt["LEARNING_RATE"], opt["E_GREEDY"], opt["E_GREEDY_DECREMENT"])
# dqn_agent.load("CartPole-v0.pth")
# print("evaluate on {} episode: reward {}".format(20, evaluate(20, env, dqn_agent, True)))
train(opt["max_episode"], env, env_name, dqn_agent, True) | [
"gym.make",
"agent.learn",
"agent.save",
"logging.warning",
"agent.sample",
"experience_replay.ReplayMemory",
"agent.predict",
"agent.DQN_agent",
"numpy.mean",
"torch.utils.tensorboard.SummaryWriter",
"torch.no_grad"
] | [((1473, 1493), 'numpy.mean', 'np.mean', (['eval_reward'], {}), '(eval_reward)\n', (1480, 1493), True, 'import numpy as np\n'), ((1553, 1603), 'experience_replay.ReplayMemory', 'experience_replay.ReplayMemory', (["opt['MEMORY_SIZE']"], {}), "(opt['MEMORY_SIZE'])\n", (1583, 1603), False, 'import experience_replay\n'), ((2972, 2987), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (2985, 2987), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((3128, 3146), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (3136, 3146), False, 'import gym\n'), ((3209, 3229), 'logging.warning', 'logging.warning', (['opt'], {}), '(opt)\n', (3224, 3229), False, 'import logging\n'), ((3324, 3442), 'agent.DQN_agent', 'agent.DQN_agent', (['num_act', 'num_obs', "opt['GAMMA']", "opt['LEARNING_RATE']", "opt['E_GREEDY']", "opt['E_GREEDY_DECREMENT']"], {}), "(num_act, num_obs, opt['GAMMA'], opt['LEARNING_RATE'], opt[\n 'E_GREEDY'], opt['E_GREEDY_DECREMENT'])\n", (3339, 3442), False, 'import agent\n'), ((316, 333), 'agent.sample', 'agent.sample', (['obs'], {}), '(obs)\n', (328, 333), False, 'import agent\n'), ((990, 1005), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1003, 1005), False, 'import torch\n'), ((2438, 2458), 'agent.save', 'agent.save', (['env_name'], {}), '(env_name)\n', (2448, 2458), False, 'import agent\n'), ((680, 758), 'agent.learn', 'agent.learn', (['batch_obs', 'batch_action', 'batch_reward', 'batch_next_obs', 'batch_done'], {}), '(batch_obs, batch_action, batch_reward, batch_next_obs, batch_done)\n', (691, 758), False, 'import agent\n'), ((1173, 1191), 'agent.predict', 'agent.predict', (['obs'], {}), '(obs)\n', (1186, 1191), False, 'import agent\n')] |
from __future__ import absolute_import
import numpy as npo
from scipy.signal import convolve as sp_convolve
from builtins import range
import autograd.numpy as np
import autograd.numpy.random as npr
import autograd.scipy.misc
import autograd.scipy.signal
import autograd.scipy.stats as stats
import autograd.scipy.stats.multivariate_normal as mvn
import autograd.scipy.special as special
from autograd import grad
from numpy_utils import combo_check, check_grads, unary_ufunc_check, to_scalar
npr.seed(1)
R = npr.randn
U = npr.uniform
### Stats ###
def test_norm_pdf(): combo_check(stats.norm.pdf, [0,1,2], [R(4)], [R(4)], [R(4)**2 + 1.1])
def test_norm_cdf(): combo_check(stats.norm.cdf, [0,1,2], [R(4)], [R(4)], [R(4)**2 + 1.1])
def test_norm_logpdf(): combo_check(stats.norm.logpdf, [0,1,2], [R(4)], [R(4)], [R(4)**2 + 1.1])
def test_norm_logcdf(): combo_check(stats.norm.logcdf, [0,1,2], [R(4)], [R(4)], [R(4)**2 + 1.1])
def test_norm_pdf_broadcast(): combo_check(stats.norm.pdf, [0,1,2], [R(4,3)], [R(1,3)], [R(4,1)**2 + 1.1])
def test_norm_cdf_broadcast(): combo_check(stats.norm.cdf, [0,1,2], [R(4,3)], [R(1,3)], [R(4,1)**2 + 1.1])
def test_norm_logpdf_broadcast(): combo_check(stats.norm.logpdf, [0,1,2], [R(4,3)], [R(1,3)], [R(4,1)**2 + 1.1])
def test_norm_logcdf_broadcast(): combo_check(stats.norm.logcdf, [0,1,2], [R(4,3)], [R(1,3)], [R(4,1)**2 + 1.1])
def make_psd(mat): return np.dot(mat.T, mat) + np.eye(mat.shape[0])
def test_mvn_pdf(): combo_check(mvn.logpdf, [0, 1, 2], [R(4)], [R(4)], [make_psd(R(4, 4))])
def test_mvn_logpdf(): combo_check(mvn.logpdf, [0, 1, 2], [R(4)], [R(4)], [make_psd(R(4, 4))])
def test_mvn_entropy():combo_check(mvn.entropy,[0, 1], [R(4)], [make_psd(R(4, 4))])
def test_mvn_pdf_broadcast(): combo_check(mvn.logpdf, [0, 1, 2], [R(5, 4)], [R(4)], [make_psd(R(4, 4))])
def test_mvn_logpdf_broadcast(): combo_check(mvn.logpdf, [0, 1, 2], [R(5, 4)], [R(4)], [make_psd(R(4, 4))])
alpha = npr.random(4)**2 + 1.2
x = stats.dirichlet.rvs(alpha, size=1)[0,:]
# Need to normalize input so that x's sum to one even when we perturb them to compute numeric gradient.
def normalize(x): return x / sum(x)
def normalized_dirichlet_pdf( x, alpha): return stats.dirichlet.pdf( normalize(x), alpha)
def normalized_dirichlet_logpdf(x, alpha): return stats.dirichlet.logpdf(normalize(x), alpha)
def test_dirichlet_pdf_x(): combo_check(normalized_dirichlet_pdf, [0], [x], [alpha])
def test_dirichlet_pdf_alpha(): combo_check(stats.dirichlet.pdf, [1], [x], [alpha])
def test_dirichlet_logpdf_x(): combo_check(normalized_dirichlet_logpdf, [0], [x], [alpha])
def test_dirichlet_logpdf_alpha(): combo_check(stats.dirichlet.logpdf, [1], [x], [alpha])
### Misc ###
def test_logsumexp1(): combo_check(autograd.scipy.misc.logsumexp, [0], [1.1, R(4), R(3,4)], axis=[None, 0], keepdims=[True, False])
def test_logsumexp2(): combo_check(autograd.scipy.misc.logsumexp, [0], [R(3,4), R(4,5,6), R(1,5)], axis=[None, 0, 1], keepdims=[True, False])
def test_logsumexp3(): combo_check(autograd.scipy.misc.logsumexp, [0], [R(4)], b = [np.exp(R(4))], axis=[None, 0], keepdims=[True, False])
def test_logsumexp4(): combo_check(autograd.scipy.misc.logsumexp, [0], [R(3,4),], b = [np.exp(R(3,4))], axis=[None, 0, 1], keepdims=[True, False])
def test_logsumexp5(): combo_check(autograd.scipy.misc.logsumexp, [0], [R(2,3,4)], b = [np.exp(R(2,3,4))], axis=[None, 0, 1], keepdims=[True, False])
def test_logsumexp6():
x = npr.randn(1,5)
def f(a): return autograd.scipy.misc.logsumexp(a, axis=1, keepdims=True)
check_grads(f, x)
check_grads(lambda a: to_scalar(grad(f)(a)), x)
### Signal ###
def test_convolve_generalization():
ag_convolve = autograd.scipy.signal.convolve
A_35 = R(3, 5)
A_34 = R(3, 4)
A_342 = R(3, 4, 2)
A_2543 = R(2, 5, 4, 3)
A_24232 = R(2, 4, 2, 3, 2)
for mode in ['valid', 'full']:
assert npo.allclose(ag_convolve(A_35, A_34, axes=([1], [0]), mode=mode)[1, 2],
sp_convolve(A_35[1,:], A_34[:, 2], mode))
assert npo.allclose(ag_convolve(A_35, A_34, axes=([],[]), dot_axes=([0], [0]), mode=mode),
npo.tensordot(A_35, A_34, axes=([0], [0])))
assert npo.allclose(ag_convolve(A_35, A_342, axes=([1],[2]),
dot_axes=([0], [0]), mode=mode)[2],
sum([sp_convolve(A_35[i, :], A_342[i, 2, :], mode)
for i in range(3)]))
assert npo.allclose(ag_convolve(A_2543, A_24232, axes=([1, 2],[2, 4]),
dot_axes=([0, 3], [0, 3]), mode=mode)[2],
sum([sum([sp_convolve(A_2543[i, :, :, j],
A_24232[i, 2, :, j, :], mode)
for i in range(2)]) for j in range(3)]))
def test_convolve():
combo_check(autograd.scipy.signal.convolve, [0,1],
[R(4), R(5), R(6)],
[R(2), R(3), R(4)], mode=['full', 'valid'])
def test_convolve_2d():
combo_check(autograd.scipy.signal.convolve, [0, 1],
[R(4, 3), R(5, 4), R(6, 7)],
[R(2, 2), R(3, 2), R(4, 2), R(4, 1)], mode=['full', 'valid'])
def test_convolve_ignore():
combo_check(autograd.scipy.signal.convolve, [0, 1], [R(4, 3)], [R(3, 2)],
axes=[([0],[0]), ([1],[1]), ([0],[1]), ([1],[0]), ([0, 1], [0, 1]), ([1, 0], [1, 0])],
mode=['full', 'valid'])
def test_convolve_ignore_dot():
combo_check(autograd.scipy.signal.convolve, [0, 1], [R(3, 3, 2)], [R(3, 2, 3)],
axes=[([1],[1])], dot_axes=[([0],[2]), ([0],[0])], mode=['full', 'valid'])
### Special ###
def test_polygamma(): combo_check(special.polygamma, [1], [0], R(4)**2 + 1.3)
def test_jn(): combo_check(special.jn, [1], [2], R(4)**2 + 1.3)
def test_yn(): combo_check(special.yn, [1], [2], R(4)**2 + 1.3)
def test_psi(): unary_ufunc_check(special.psi, lims=[0.3, 2.0], test_complex=False)
def test_digamma(): unary_ufunc_check(special.digamma, lims=[0.3, 2.0], test_complex=False)
def test_gamma(): unary_ufunc_check(special.gamma, lims=[0.3, 2.0], test_complex=False)
def test_gammaln(): unary_ufunc_check(special.gammaln, lims=[0.3, 2.0], test_complex=False)
def test_gammasgn(): unary_ufunc_check(special.gammasgn,lims=[0.3, 2.0], test_complex=False)
def test_rgamma() : unary_ufunc_check(special.rgamma, lims=[0.3, 2.0], test_complex=False)
def test_multigammaln(): combo_check(special.multigammaln, [0], [U(4., 5.), U(4., 5., (2,3))],
[1, 2, 3])
def test_j0(): unary_ufunc_check(special.j0, lims=[0.2, 20.0], test_complex=False)
def test_j1(): unary_ufunc_check(special.j1, lims=[0.2, 20.0], test_complex=False)
def test_y0(): unary_ufunc_check(special.y0, lims=[0.2, 20.0], test_complex=False)
def test_y1(): unary_ufunc_check(special.y1, lims=[0.2, 20.0], test_complex=False)
| [
"autograd.numpy.dot",
"builtins.range",
"autograd.scipy.stats.dirichlet.rvs",
"numpy.tensordot",
"autograd.numpy.random.randn",
"autograd.grad",
"numpy_utils.check_grads",
"numpy_utils.combo_check",
"numpy_utils.unary_ufunc_check",
"autograd.numpy.random.random",
"autograd.numpy.eye",
"scipy.s... | [((496, 507), 'autograd.numpy.random.seed', 'npr.seed', (['(1)'], {}), '(1)\n', (504, 507), True, 'import autograd.numpy.random as npr\n'), ((2001, 2035), 'autograd.scipy.stats.dirichlet.rvs', 'stats.dirichlet.rvs', (['alpha'], {'size': '(1)'}), '(alpha, size=1)\n', (2020, 2035), True, 'import autograd.scipy.stats as stats\n'), ((2406, 2462), 'numpy_utils.combo_check', 'combo_check', (['normalized_dirichlet_pdf', '[0]', '[x]', '[alpha]'], {}), '(normalized_dirichlet_pdf, [0], [x], [alpha])\n', (2417, 2462), False, 'from numpy_utils import combo_check, check_grads, unary_ufunc_check, to_scalar\n'), ((2501, 2552), 'numpy_utils.combo_check', 'combo_check', (['stats.dirichlet.pdf', '[1]', '[x]', '[alpha]'], {}), '(stats.dirichlet.pdf, [1], [x], [alpha])\n', (2512, 2552), False, 'from numpy_utils import combo_check, check_grads, unary_ufunc_check, to_scalar\n'), ((2596, 2655), 'numpy_utils.combo_check', 'combo_check', (['normalized_dirichlet_logpdf', '[0]', '[x]', '[alpha]'], {}), '(normalized_dirichlet_logpdf, [0], [x], [alpha])\n', (2607, 2655), False, 'from numpy_utils import combo_check, check_grads, unary_ufunc_check, to_scalar\n'), ((2691, 2745), 'numpy_utils.combo_check', 'combo_check', (['stats.dirichlet.logpdf', '[1]', '[x]', '[alpha]'], {}), '(stats.dirichlet.logpdf, [1], [x], [alpha])\n', (2702, 2745), False, 'from numpy_utils import combo_check, check_grads, unary_ufunc_check, to_scalar\n'), ((3546, 3561), 'autograd.numpy.random.randn', 'npr.randn', (['(1)', '(5)'], {}), '(1, 5)\n', (3555, 3561), True, 'import autograd.numpy.random as npr\n'), ((3642, 3659), 'numpy_utils.check_grads', 'check_grads', (['f', 'x'], {}), '(f, x)\n', (3653, 3659), False, 'from numpy_utils import combo_check, check_grads, unary_ufunc_check, to_scalar\n'), ((6077, 6144), 'numpy_utils.unary_ufunc_check', 'unary_ufunc_check', (['special.psi'], {'lims': '[0.3, 2.0]', 'test_complex': '(False)'}), '(special.psi, lims=[0.3, 2.0], test_complex=False)\n', (6094, 6144), False, 'from numpy_utils import combo_check, check_grads, unary_ufunc_check, to_scalar\n'), ((6171, 6242), 'numpy_utils.unary_ufunc_check', 'unary_ufunc_check', (['special.digamma'], {'lims': '[0.3, 2.0]', 'test_complex': '(False)'}), '(special.digamma, lims=[0.3, 2.0], test_complex=False)\n', (6188, 6242), False, 'from numpy_utils import combo_check, check_grads, unary_ufunc_check, to_scalar\n'), ((6265, 6334), 'numpy_utils.unary_ufunc_check', 'unary_ufunc_check', (['special.gamma'], {'lims': '[0.3, 2.0]', 'test_complex': '(False)'}), '(special.gamma, lims=[0.3, 2.0], test_complex=False)\n', (6282, 6334), False, 'from numpy_utils import combo_check, check_grads, unary_ufunc_check, to_scalar\n'), ((6359, 6430), 'numpy_utils.unary_ufunc_check', 'unary_ufunc_check', (['special.gammaln'], {'lims': '[0.3, 2.0]', 'test_complex': '(False)'}), '(special.gammaln, lims=[0.3, 2.0], test_complex=False)\n', (6376, 6430), False, 'from numpy_utils import combo_check, check_grads, unary_ufunc_check, to_scalar\n'), ((6453, 6525), 'numpy_utils.unary_ufunc_check', 'unary_ufunc_check', (['special.gammasgn'], {'lims': '[0.3, 2.0]', 'test_complex': '(False)'}), '(special.gammasgn, lims=[0.3, 2.0], test_complex=False)\n', (6470, 6525), False, 'from numpy_utils import combo_check, check_grads, unary_ufunc_check, to_scalar\n'), ((6547, 6617), 'numpy_utils.unary_ufunc_check', 'unary_ufunc_check', (['special.rgamma'], {'lims': '[0.3, 2.0]', 'test_complex': '(False)'}), '(special.rgamma, lims=[0.3, 2.0], test_complex=False)\n', (6564, 6617), False, 'from numpy_utils import combo_check, check_grads, unary_ufunc_check, to_scalar\n'), ((6778, 6845), 'numpy_utils.unary_ufunc_check', 'unary_ufunc_check', (['special.j0'], {'lims': '[0.2, 20.0]', 'test_complex': '(False)'}), '(special.j0, lims=[0.2, 20.0], test_complex=False)\n', (6795, 6845), False, 'from numpy_utils import combo_check, check_grads, unary_ufunc_check, to_scalar\n'), ((6861, 6928), 'numpy_utils.unary_ufunc_check', 'unary_ufunc_check', (['special.j1'], {'lims': '[0.2, 20.0]', 'test_complex': '(False)'}), '(special.j1, lims=[0.2, 20.0], test_complex=False)\n', (6878, 6928), False, 'from numpy_utils import combo_check, check_grads, unary_ufunc_check, to_scalar\n'), ((6944, 7011), 'numpy_utils.unary_ufunc_check', 'unary_ufunc_check', (['special.y0'], {'lims': '[0.2, 20.0]', 'test_complex': '(False)'}), '(special.y0, lims=[0.2, 20.0], test_complex=False)\n', (6961, 7011), False, 'from numpy_utils import combo_check, check_grads, unary_ufunc_check, to_scalar\n'), ((7027, 7094), 'numpy_utils.unary_ufunc_check', 'unary_ufunc_check', (['special.y1'], {'lims': '[0.2, 20.0]', 'test_complex': '(False)'}), '(special.y1, lims=[0.2, 20.0], test_complex=False)\n', (7044, 7094), False, 'from numpy_utils import combo_check, check_grads, unary_ufunc_check, to_scalar\n'), ((1421, 1439), 'autograd.numpy.dot', 'np.dot', (['mat.T', 'mat'], {}), '(mat.T, mat)\n', (1427, 1439), True, 'import autograd.numpy as np\n'), ((1442, 1462), 'autograd.numpy.eye', 'np.eye', (['mat.shape[0]'], {}), '(mat.shape[0])\n', (1448, 1462), True, 'import autograd.numpy as np\n'), ((1974, 1987), 'autograd.numpy.random.random', 'npr.random', (['(4)'], {}), '(4)\n', (1984, 1987), True, 'import autograd.numpy.random as npr\n'), ((4088, 4129), 'scipy.signal.convolve', 'sp_convolve', (['A_35[1, :]', 'A_34[:, 2]', 'mode'], {}), '(A_35[1, :], A_34[:, 2], mode)\n', (4099, 4129), True, 'from scipy.signal import convolve as sp_convolve\n'), ((4257, 4299), 'numpy.tensordot', 'npo.tensordot', (['A_35', 'A_34'], {'axes': '([0], [0])'}), '(A_35, A_34, axes=([0], [0]))\n', (4270, 4299), True, 'import numpy as npo\n'), ((3696, 3703), 'autograd.grad', 'grad', (['f'], {}), '(f)\n', (3700, 3703), False, 'from autograd import grad\n'), ((4479, 4524), 'scipy.signal.convolve', 'sp_convolve', (['A_35[i, :]', 'A_342[i, 2, :]', 'mode'], {}), '(A_35[i, :], A_342[i, 2, :], mode)\n', (4490, 4524), True, 'from scipy.signal import convolve as sp_convolve\n'), ((4567, 4575), 'builtins.range', 'range', (['(3)'], {}), '(3)\n', (4572, 4575), False, 'from builtins import range\n'), ((4956, 4964), 'builtins.range', 'range', (['(3)'], {}), '(3)\n', (4961, 4964), False, 'from builtins import range\n'), ((4778, 4839), 'scipy.signal.convolve', 'sp_convolve', (['A_2543[i, :, :, j]', 'A_24232[i, 2, :, j, :]', 'mode'], {}), '(A_2543[i, :, :, j], A_24232[i, 2, :, j, :], mode)\n', (4789, 4839), True, 'from scipy.signal import convolve as sp_convolve\n'), ((4936, 4944), 'builtins.range', 'range', (['(2)'], {}), '(2)\n', (4941, 4944), False, 'from builtins import range\n')] |
import torch
import random
import numpy as np
def normalize_center(x: torch.tensor):
# cast origin from top left to middle of image
torch.add(x, -0.5, out=x)
for i, _ in enumerate(x):
x[i, 1] = - x[i, 1]
return x
def denormalize_center(x: torch.tensor):
# cast origin from top left to middle of image
for i, _ in enumerate(x):
x[i, 1] = - x[i, 1]
torch.add(x, 0.5, out=x)
return x
def flip_h(x: torch.tensor):
# randomly flip according to y axis
do_flip_h = random.randint(0, 1)
if do_flip_h:
for i, _ in enumerate(x):
x[i, 0] = - x[i, 0]
return x
def rotate(min_angle: float, max_angle: float):
def inner(x: torch.tensor):
# do_rotate = random.randint(0, 1)
do_rotate = 1
if do_rotate:
rot_angle = random.randint(min_angle, max_angle)
rot_mat = torch.zeros((2, 2))
rot_mat[0, 0] = np.cos(np.deg2rad(rot_angle))
rot_mat[0, 1] = - np.sin(np.deg2rad(rot_angle))
rot_mat[1, 0] = np.sin(np.deg2rad(rot_angle))
rot_mat[1, 1] = np.cos(np.deg2rad(rot_angle))
ret = torch.zeros_like(x)
for i, point in enumerate(x):
point = point.view(2, 1)
ret[i] = torch.mm(rot_mat, point).view(2)
return ret
return x
return inner
if __name__ == "__main__":
test_tensor = torch.rand((21, 2))
print(test_tensor[:4])
test_tensor = normalize_center(test_tensor)
print(test_tensor[:4])
test_tensor = denormalize_center(test_tensor)
print(test_tensor[:4])
| [
"random.randint",
"torch.zeros_like",
"numpy.deg2rad",
"torch.add",
"torch.mm",
"torch.rand",
"torch.zeros"
] | [((144, 169), 'torch.add', 'torch.add', (['x', '(-0.5)'], {'out': 'x'}), '(x, -0.5, out=x)\n', (153, 169), False, 'import torch\n'), ((400, 424), 'torch.add', 'torch.add', (['x', '(0.5)'], {'out': 'x'}), '(x, 0.5, out=x)\n', (409, 424), False, 'import torch\n'), ((526, 546), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (540, 546), False, 'import random\n'), ((1437, 1456), 'torch.rand', 'torch.rand', (['(21, 2)'], {}), '((21, 2))\n', (1447, 1456), False, 'import torch\n'), ((837, 873), 'random.randint', 'random.randint', (['min_angle', 'max_angle'], {}), '(min_angle, max_angle)\n', (851, 873), False, 'import random\n'), ((897, 916), 'torch.zeros', 'torch.zeros', (['(2, 2)'], {}), '((2, 2))\n', (908, 916), False, 'import torch\n'), ((1170, 1189), 'torch.zeros_like', 'torch.zeros_like', (['x'], {}), '(x)\n', (1186, 1189), False, 'import torch\n'), ((952, 973), 'numpy.deg2rad', 'np.deg2rad', (['rot_angle'], {}), '(rot_angle)\n', (962, 973), True, 'import numpy as np\n'), ((1070, 1091), 'numpy.deg2rad', 'np.deg2rad', (['rot_angle'], {}), '(rot_angle)\n', (1080, 1091), True, 'import numpy as np\n'), ((1128, 1149), 'numpy.deg2rad', 'np.deg2rad', (['rot_angle'], {}), '(rot_angle)\n', (1138, 1149), True, 'import numpy as np\n'), ((1012, 1033), 'numpy.deg2rad', 'np.deg2rad', (['rot_angle'], {}), '(rot_angle)\n', (1022, 1033), True, 'import numpy as np\n'), ((1298, 1322), 'torch.mm', 'torch.mm', (['rot_mat', 'point'], {}), '(rot_mat, point)\n', (1306, 1322), False, 'import torch\n')] |
import argparse
import json
import logging
import math
import os
from os.path import exists, join, split
import threading
import shutil
from fnmatch import filter
from PIL import Image
import torch
from torch import nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
# added for adversarial experiment
import torch.nn.functional as F
from distutils.version import LooseVersion
import numpy as np
def clamp_tensor(image, upper_bound, lower_bound):
image = torch.where(image > upper_bound, upper_bound, image)
image = torch.where(image < lower_bound, lower_bound, image)
return image
def back_transform(image, info):
# image = image2.copy()
image[:, 0, :, :] = image[:, 0, :, :] * info["std"][0] + info["mean"][0]
image[:, 1, :, :] = image[:, 1, :, :] * info["std"][1] + info["mean"][1]
image[:, 2, :, :] = image[:, 2, :, :] * info["std"][2] + info["mean"][2]
return image
def forward_transform(image, info):
image[:, 0, :, :] = (image[:, 0, :, :] - info["mean"][0]) / info["std"][0]
image[:, 1, :, :] = (image[:, 1, :, :] - info["mean"][1]) / info["std"][1]
image[:, 2, :, :] = (image[:, 2, :, :] - info["mean"][2]) / info["std"][2]
return image
def resize_4d_tensor(tensor, width, height):
tensor_cpu = tensor.cpu().numpy()
if tensor.size(2) == height and tensor.size(3) == width:
return tensor_cpu
out_size = (tensor.size(0), tensor.size(1), height, width)
out = np.empty(out_size, dtype=np.float32)
def resize_one(i, j):
out[i, j] = np.array(
Image.fromarray(tensor_cpu[i, j]).resize(
(width, height), Image.BILINEAR))
def resize_channel(j):
for i in range(tensor.size(0)):
out[i, j] = np.array(
Image.fromarray(tensor_cpu[i, j]).resize(
(width, height), Image.BILINEAR))
# workers = [threading.Thread(target=resize_one, args=(i, j))
# for i in range(tensor.size(0)) for j in range(tensor.size(1))]
workers = [threading.Thread(target=resize_channel, args=(j,))
for j in range(tensor.size(1))]
for w in workers:
w.start()
for w in workers:
w.join()
# for i in range(tensor.size(0)):
# for j in range(tensor.size(1)):
# out[i, j] = np.array(
# Image.fromarray(tensor_cpu[i, j]).resize(
# (w, h), Image.BILINEAR))
# out = tensor.new().resize_(*out.shape).copy_(torch.from_numpy(out))
return out
def adjust_learning_rate(args, optimizer, epoch):
"""
Sets the learning rate to the initial LR decayed by 10 every 30 epochs
"""
if args.lr_mode == 'step':
lr = args.lr * (args.lr_change ** (epoch // args.step))
elif args.lr_mode == 'poly':
lr = args.lr * (1 - epoch / args.epochs) ** 0.9
elif args.lr_mode == 'schedule':
print('args.args.step_size_schedule',args.step_size_schedule)
assert len(args.step_size_schedule) == 3
lr = args.step_size_schedule[0][1]
if epoch >= args.step_size_schedule[1][0] and epoch < args.step_size_schedule[2][0]:
lr = args.step_size_schedule[1][1]
elif epoch >= args.step_size_schedule[2][0]:
lr = args.step_size_schedule[2][1]
else:
raise ValueError('Unknown lr mode {}'.format(args.lr_mode))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def fast_hist(pred, label, n):
k = (label >= 0) & (label < n)
return np.bincount(
n * label[k].astype(int) + pred[k], minlength=n ** 2).reshape(n, n)
def per_class_iu(hist):
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
def save_output_images(predictions, filenames, output_dir):
"""
Saves a given (B x C x H x W) into an image file.
If given a mini-batch tensor, will save the tensor as a grid of images.
"""
# pdb.set_trace()
for ind in range(len(filenames)):
im = Image.fromarray(predictions[ind].astype(np.uint8))
fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
def save_colorful_images(predictions, filenames, output_dir, palettes):
"""
Saves a given (B x C x H x W) into an image file.
If given a mini-batch tensor, will save the tensor as a grid of images.
"""
for ind in range(len(filenames)):
im = Image.fromarray(palettes[predictions[ind].squeeze()])
fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar', save_model_path = None):
try:
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, os.path.join(save_model_path, 'model_best.pth.tar'))
except:
for _ in range(30): print("--------------WARNING!!! FAILED TO SAVE. DISK POSSIBLY OUT OF SPACE--------------")
pass
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target):
"""Computes the precision@k for the specified values of k"""
# batch_size = target.size(0) * target.size(1) * target.size(2)
_, pred = output.max(1)
pred = pred.view(1, -1)
target = target.view(1, -1)
correct = pred.eq(target)
if correct.size(0) == 0:
pass
# print('c1', correct.size())
correct = correct[target != 255]
correct = correct.view(-1)
if correct.size(0) == 0:
# print('c2', correct.size(), correct)
cor_num = correct.float().sum(0)
score = cor_num.mul(100.0 / 1)
else:
cor_num = correct.float().sum(0)
# print('correc size', correct.size(0))
score = cor_num.mul(100.0 / correct.size(0))
# print('cor num', cor_num, correct.size(0),correct.size())
# return score.data[0]
return score.data.item()
def cross_entropy2d(input, target, weight=None, size_average=True, ignore_index=255):
# input: (n, c, h, w), target: (n, h, w)
n, c, h, w = input.size()
# log_p: (n, c, h, w)
if LooseVersion(torch.__version__) < LooseVersion('0.3'):
# ==0.2.X
log_p = F.log_softmax(input)
else:
# >=0.3
log_p = F.log_softmax(input, dim=1)
# log_p: (n*h*w, c)
log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous()
log_p = log_p[target.view(n, h, w, 1).repeat(1, 1, 1, c) >= 0]
log_p = log_p.view(-1, c)
# target: (n*h*w,)
mask = target >= 0
target = target[mask]
loss = F.nll_loss(log_p, target, weight=weight, reduction='sum', ignore_index=ignore_index)
if size_average:
loss /= mask.data.sum()
return loss
# added for adversarial experiment ends
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
def include_patterns(*patterns):
"""Factory function that can be used with copytree() ignore parameter.
Arguments define a sequence of glob-style patterns
that are used to specify what files to NOT ignore.
Creates and returns a function that determines this for each directory
in the file hierarchy rooted at the source directory when used with
shutil.copytree().
"""
def _ignore_patterns(path, names):
keep = set(name for pattern in patterns
for name in filter(names, pattern))
ignore = set(name for name in names
if name not in keep and not os.path.isdir(join(path, name)))
return ignore
return _ignore_patterns
| [
"threading.Thread",
"fnmatch.filter",
"os.makedirs",
"torch.where",
"math.fabs",
"distutils.version.LooseVersion",
"numpy.empty",
"os.path.exists",
"torch.save",
"torch.nn.functional.nll_loss",
"torch.nn.functional.log_softmax",
"PIL.Image.fromarray",
"numpy.diag",
"os.path.split",
"os.p... | [((560, 612), 'torch.where', 'torch.where', (['(image > upper_bound)', 'upper_bound', 'image'], {}), '(image > upper_bound, upper_bound, image)\n', (571, 612), False, 'import torch\n'), ((625, 677), 'torch.where', 'torch.where', (['(image < lower_bound)', 'lower_bound', 'image'], {}), '(image < lower_bound, lower_bound, image)\n', (636, 677), False, 'import torch\n'), ((1541, 1577), 'numpy.empty', 'np.empty', (['out_size'], {'dtype': 'np.float32'}), '(out_size, dtype=np.float32)\n', (1549, 1577), True, 'import numpy as np\n'), ((7110, 7199), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['log_p', 'target'], {'weight': 'weight', 'reduction': '"""sum"""', 'ignore_index': 'ignore_index'}), "(log_p, target, weight=weight, reduction='sum', ignore_index=\n ignore_index)\n", (7120, 7199), True, 'import torch.nn.functional as F\n'), ((2116, 2166), 'threading.Thread', 'threading.Thread', ([], {'target': 'resize_channel', 'args': '(j,)'}), '(target=resize_channel, args=(j,))\n', (2132, 2166), False, 'import threading\n'), ((3749, 3762), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (3756, 3762), True, 'import numpy as np\n'), ((4154, 4208), 'os.path.join', 'os.path.join', (['output_dir', "(filenames[ind][:-4] + '.png')"], {}), "(output_dir, filenames[ind][:-4] + '.png')\n", (4166, 4208), False, 'import os\n'), ((4656, 4710), 'os.path.join', 'os.path.join', (['output_dir', "(filenames[ind][:-4] + '.png')"], {}), "(output_dir, filenames[ind][:-4] + '.png')\n", (4668, 4710), False, 'import os\n'), ((4934, 4961), 'torch.save', 'torch.save', (['state', 'filename'], {}), '(state, filename)\n', (4944, 4961), False, 'import torch\n'), ((6663, 6694), 'distutils.version.LooseVersion', 'LooseVersion', (['torch.__version__'], {}), '(torch.__version__)\n', (6675, 6694), False, 'from distutils.version import LooseVersion\n'), ((6697, 6716), 'distutils.version.LooseVersion', 'LooseVersion', (['"""0.3"""'], {}), "('0.3')\n", (6709, 6716), False, 'from distutils.version import LooseVersion\n'), ((6752, 6772), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['input'], {}), '(input)\n', (6765, 6772), True, 'import torch.nn.functional as F\n'), ((6815, 6842), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['input'], {'dim': '(1)'}), '(input, dim=1)\n', (6828, 6842), True, 'import torch.nn.functional as F\n'), ((3794, 3807), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (3801, 3807), True, 'import numpy as np\n'), ((4227, 4236), 'os.path.split', 'split', (['fn'], {}), '(fn)\n', (4232, 4236), False, 'from os.path import exists, join, split\n'), ((4255, 4270), 'os.path.exists', 'exists', (['out_dir'], {}), '(out_dir)\n', (4261, 4270), False, 'from os.path import exists, join, split\n'), ((4284, 4304), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (4295, 4304), False, 'import os\n'), ((4728, 4737), 'os.path.split', 'split', (['fn'], {}), '(fn)\n', (4733, 4737), False, 'from os.path import exists, join, split\n'), ((4755, 4770), 'os.path.exists', 'exists', (['out_dir'], {}), '(out_dir)\n', (4761, 4770), False, 'from os.path import exists, join, split\n'), ((4783, 4803), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (4794, 4803), False, 'import os\n'), ((5020, 5071), 'os.path.join', 'os.path.join', (['save_model_path', '"""model_best.pth.tar"""'], {}), "(save_model_path, 'model_best.pth.tar')\n", (5032, 5071), False, 'import os\n'), ((1647, 1680), 'PIL.Image.fromarray', 'Image.fromarray', (['tensor_cpu[i, j]'], {}), '(tensor_cpu[i, j])\n', (1662, 1680), False, 'from PIL import Image\n'), ((7542, 7562), 'math.fabs', 'math.fabs', (['(i / f - c)'], {}), '(i / f - c)\n', (7551, 7562), False, 'import math\n'), ((7571, 7591), 'math.fabs', 'math.fabs', (['(j / f - c)'], {}), '(j / f - c)\n', (7580, 7591), False, 'import math\n'), ((8182, 8204), 'fnmatch.filter', 'filter', (['names', 'pattern'], {}), '(names, pattern)\n', (8188, 8204), False, 'from fnmatch import filter\n'), ((1857, 1890), 'PIL.Image.fromarray', 'Image.fromarray', (['tensor_cpu[i, j]'], {}), '(tensor_cpu[i, j])\n', (1872, 1890), False, 'from PIL import Image\n'), ((8313, 8329), 'os.path.join', 'join', (['path', 'name'], {}), '(path, name)\n', (8317, 8329), False, 'from os.path import exists, join, split\n')] |
import gym
import time
import torch
import warnings
import numpy as np
from copy import deepcopy
from numbers import Number
from typing import Dict, List, Union, Optional, Callable
from advertorch.attacks.base import Attack
from tianshou.policy import BasePolicy
from tianshou.env import BaseVectorEnv, DummyVectorEnv
from tianshou.data import Batch, ReplayBuffer, ListReplayBuffer, to_numpy
import random as rd
class adversarial_training_collector(object):
"""Collector that defends an existing policy with adversarial training.
:param policy: an instance of the :class:`~tianshou.policy.BasePolicy`
class.
:param env: a ``gym.Env`` environment or an instance of the
:class:`~tianshou.env.BaseVectorEnv` class.
:param obs_adv_atk: an instance of the :class:`~advertorch.attacks.base.Attack`
class implementing an image adversarial attack.
:param atk_frequency: float, how frequently attacking env observations
:param test: bool, if True adversarial actions replace original actions
:param buffer: an instance of the :class:`~tianshou.data.ReplayBuffer`
class. If set to ``None`` (testing phase), it will not store the data.
:param function preprocess_fn: a function called before the data has been
added to the buffer, see issue #42 and :ref:`preprocess_fn`, defaults
to None.
:param function reward_metric: to be used in multi-agent RL. The reward to
report is of shape [agent_num], but we need to return a single scalar
to monitor training. This function specifies what is the desired
metric, e.g., the reward of agent 1 or the average reward over all
agents. By default, the behavior is to select the reward of agent 1.
:param atk_frequency: float, how frequently attacking env observations.
Note: parallel or async envs are currently not supported
"""
def __init__(
self,
policy: BasePolicy,
env: Union[gym.Env, BaseVectorEnv],
obs_adv_atk: Attack,
buffer: Optional[ReplayBuffer] = None,
preprocess_fn: Optional[Callable[..., Batch]] = None,
reward_metric: Optional[Callable[[np.ndarray], float]] = None,
atk_frequency: float = 0.5,
test: bool = False,
device: str = 'cuda' if torch.cuda.is_available() else 'cpu'
) -> None:
super().__init__()
if not isinstance(env, BaseVectorEnv):
env = DummyVectorEnv([lambda: env])
self.env = env
self.env_num = len(env)
self.device = device
self.obs_adv_atk = obs_adv_atk
self.obs_adv_atk.targeted = False
self.atk_frequency = atk_frequency
self.test = test
# environments that are available in step()
# this means all environments in synchronous simulation
# but only a subset of environments in asynchronous simulation
self._ready_env_ids = np.arange(self.env_num)
# need cache buffers before storing in the main buffer
self._cached_buf = [ListReplayBuffer() for _ in range(self.env_num)]
self.buffer = buffer
self.policy = policy
self.preprocess_fn = preprocess_fn
self.process_fn = policy.process_fn
self._action_space = env.action_space
self._rew_metric = reward_metric or adversarial_training_collector._default_rew_metric
# avoid creating attribute outside __init__
self.reset()
@staticmethod
def _default_rew_metric(
x: Union[Number, np.number]
) -> Union[Number, np.number]:
# this internal function is designed for single-agent RL
# for multi-agent RL, a reward_metric must be provided
assert np.asanyarray(x).size == 1, (
"Please specify the reward_metric "
"since the reward is not a scalar."
)
return x
def reset(self) -> None:
"""Reset all related variables in the collector."""
# use empty Batch for ``state`` so that ``self.data`` supports slicing
# convert empty Batch to None when passing data to policy
self.data = Batch(state={}, obs={}, act={}, rew={}, done={}, info={},
obs_next={}, policy={})
self.reset_env()
self.reset_buffer()
self.reset_stat()
def reset_stat(self) -> None:
"""Reset the statistic variables."""
self.collect_time, self.collect_step, self.collect_episode = 0.0, 0, 0
def reset_buffer(self) -> None:
"""Reset the main data buffer."""
if self.buffer is not None:
self.buffer.reset()
def get_env_num(self) -> int:
"""Return the number of environments the collector have."""
return self.env_num
def reset_env(self) -> None:
"""Reset all of the environment(s)' states and the cache buffers."""
self._ready_env_ids = np.arange(self.env_num)
obs = self.env.reset()
if self.preprocess_fn:
obs = self.preprocess_fn(obs=obs).get("obs", obs)
self.data.obs = obs
for b in self._cached_buf:
b.reset()
def _reset_state(self, id: Union[int, List[int]]) -> None:
"""Reset the hidden state: self.data.state[id]."""
state = self.data.state # it is a reference
if isinstance(state, torch.Tensor):
state[id].zero_()
elif isinstance(state, np.ndarray):
state[id] = None if state.dtype == np.object else 0
elif isinstance(state, Batch):
state.empty_(id)
def collect(
self,
n_step: Optional[int] = None,
n_episode: Optional[Union[int, List[int]]] = None,
random: bool = False,
render: Optional[float] = None,
no_grad: bool = True,
) -> Dict[str, float]:
"""Collect a specified number of step or episode.
:param int n_step: how many steps you want to collect.
:param n_episode: how many episodes you want to collect. If it is an
int, it means to collect at lease ``n_episode`` episodes; if it is
a list, it means to collect exactly ``n_episode[i]`` episodes in
the i-th environment
:param bool random: whether to use random policy for collecting data,
defaults to False.
:param float render: the sleep time between rendering consecutive
frames, defaults to None (no rendering).
:param bool no_grad: whether to retain gradient in policy.forward,
defaults to True (no gradient retaining).
.. note::
One and only one collection number specification is permitted,
either ``n_step`` or ``n_episode``.
:return: A dict including the following keys
* ``n/ep`` the collected number of episodes.
* ``n/st`` the collected number of steps.
* ``v/st`` the speed of steps per second.
* ``v/ep`` the speed of episode per second.
* ``rew`` the mean reward over collected episodes.
* ``len`` the mean length over collected episodes.
"""
assert (n_step is not None and n_episode is None and n_step > 0) or (
n_step is None and n_episode is not None and np.sum(n_episode) > 0
), "Only one of n_step or n_episode is allowed in Collector.collect, "
f"got n_step = {n_step}, n_episode = {n_episode}."
start_time = time.time()
step_count = 0
succ_attacks = 0
n_attacks = 0
# episode of each environment
episode_count = np.zeros(self.env_num)
# If n_episode is a list, and some envs have collected the required
# number of episodes, these envs will be recorded in this list, and
# they will not be stepped.
finished_env_ids = []
rewards = []
if isinstance(n_episode, list):
assert len(n_episode) == self.get_env_num()
finished_env_ids = [
i for i in self._ready_env_ids if n_episode[i] <= 0]
self._ready_env_ids = np.array(
[x for x in self._ready_env_ids if x not in finished_env_ids])
while True:
if step_count >= 100000 and episode_count.sum() == 0:
warnings.warn(
"There are already many steps in an episode. "
"You should add a time limitation to your environment!",
Warning)
# restore the state and the input data
last_state = self.data.state
if isinstance(last_state, Batch) and last_state.is_empty():
last_state = None
self.data.update(state=Batch(), obs_next=Batch(), policy=Batch())
# calculate the next action
if random:
spaces = self._action_space
result = Batch(
act=[spaces[i].sample() for i in self._ready_env_ids])
else:
if no_grad:
with torch.no_grad(): # faster than retain_grad version
result = self.policy(self.data, last_state)
else:
result = self.policy(self.data, last_state)
state = result.get("state", Batch())
# convert None to Batch(), since None is reserved for 0-init
if state is None:
state = Batch()
self.data.update(state=state, policy=result.get("policy", Batch()))
# save hidden state to policy._state, in order to save into buffer
if not (isinstance(state, Batch) and state.is_empty()):
self.data.policy._state = self.data.state
self.data.act = to_numpy(result.act)
# START ADVERSARIAL ATTACK
x = rd.uniform(0, 1)
if x < self.atk_frequency:
ori_act = self.data.act
adv_act, adv_obs = self.obs_attacks(self.data, ori_act)
for j, i in enumerate(self._ready_env_ids):
if adv_act[i] != ori_act[i]:
succ_attacks += 1
n_attacks += self.env_num
self.data.update(obs=adv_obs) # so that the adv obs will be inserted in the buffer
if self.test:
self.data.act = adv_act
# step in env
obs_next, rew, done, info = self.env.step(self.data.act)
# move data to self.data
self.data.update(obs_next=obs_next, rew=rew, done=done, info=info)
if render:
self.env.render()
time.sleep(render)
# add data into the buffer
if self.preprocess_fn:
result = self.preprocess_fn(**self.data) # type: ignore
self.data.update(result)
for j, i in enumerate(self._ready_env_ids):
# j is the index in current ready_env_ids
# i is the index in all environments
if self.buffer is None:
# users do not want to store data, so we store
# small fake data here to make the code clean
self._cached_buf[i].add(obs=0, act=0, rew=rew[j], done=0)
else:
self._cached_buf[i].add(**self.data[j])
if done[j]:
if not (isinstance(n_episode, list)
and episode_count[i] >= n_episode[i]):
episode_count[i] += 1
rewards.append(self._rew_metric(
np.sum(self._cached_buf[i].rew, axis=0)))
step_count += len(self._cached_buf[i])
if self.buffer is not None:
self.buffer.update(self._cached_buf[i])
if isinstance(n_episode, list) and \
episode_count[i] >= n_episode[i]:
# env i has collected enough data, it has finished
finished_env_ids.append(i)
self._cached_buf[i].reset()
self._reset_state(j)
obs_next = self.data.obs_next
if sum(done):
env_ind_local = np.where(done)[0]
env_ind_global = self._ready_env_ids[env_ind_local]
obs_reset = self.env.reset(env_ind_global)
if self.preprocess_fn:
obs_reset = self.preprocess_fn(
obs=obs_reset).get("obs", obs_reset)
obs_next[env_ind_local] = obs_reset
self.data.obs = obs_next
self._ready_env_ids = np.array(
[x for x in self._ready_env_ids if x not in finished_env_ids])
if n_step:
if step_count >= n_step:
break
else:
if isinstance(n_episode, int) and \
episode_count.sum() >= n_episode:
break
if isinstance(n_episode, list) and \
(episode_count >= n_episode).all():
break
# finished envs are ready, and can be used for the next collection
self._ready_env_ids = np.array(
self._ready_env_ids.tolist() + finished_env_ids)
# generate the statistics
episode_count = sum(episode_count)
duration = max(time.time() - start_time, 1e-9)
self.collect_step += step_count
self.collect_episode += episode_count
self.collect_time += duration
return {
"n/ep": episode_count,
"n/st": step_count,
"v/st": step_count / duration,
"v/ep": episode_count / duration,
"rew": np.mean(rewards),
"rew_std": np.std(rewards),
"len": step_count / episode_count,
'succ_atks(%)': succ_attacks / n_attacks if n_attacks > 0 else 0,
}
def obs_attacks(self,
data,
target_action: List[int]
):
"""
Performs an image adversarial attack on the observation stored in 'obs' respect to
the action 'target_action' using the method defined in 'self.obs_adv_atk'
"""
data = deepcopy(data)
obs = torch.FloatTensor(data.obs).to(self.device) # convert observation to tensor
act = torch.tensor(target_action).to(self.device) # convert action to tensor
adv_obs = self.obs_adv_atk.perturb(obs, act) # create adversarial observation
with torch.no_grad():
adv_obs = adv_obs.cpu().detach().numpy()
data.obs = adv_obs
result = self.policy(data, last_state=None)
return to_numpy(result.act), adv_obs
| [
"numpy.sum",
"numpy.mean",
"numpy.arange",
"torch.no_grad",
"tianshou.data.ListReplayBuffer",
"numpy.std",
"torch.FloatTensor",
"tianshou.data.Batch",
"copy.deepcopy",
"time.sleep",
"torch.cuda.is_available",
"tianshou.data.to_numpy",
"tianshou.env.DummyVectorEnv",
"random.uniform",
"num... | [((2921, 2944), 'numpy.arange', 'np.arange', (['self.env_num'], {}), '(self.env_num)\n', (2930, 2944), True, 'import numpy as np\n'), ((4114, 4199), 'tianshou.data.Batch', 'Batch', ([], {'state': '{}', 'obs': '{}', 'act': '{}', 'rew': '{}', 'done': '{}', 'info': '{}', 'obs_next': '{}', 'policy': '{}'}), '(state={}, obs={}, act={}, rew={}, done={}, info={}, obs_next={},\n policy={})\n', (4119, 4199), False, 'from tianshou.data import Batch, ReplayBuffer, ListReplayBuffer, to_numpy\n'), ((4879, 4902), 'numpy.arange', 'np.arange', (['self.env_num'], {}), '(self.env_num)\n', (4888, 4902), True, 'import numpy as np\n'), ((7415, 7426), 'time.time', 'time.time', ([], {}), '()\n', (7424, 7426), False, 'import time\n'), ((7559, 7581), 'numpy.zeros', 'np.zeros', (['self.env_num'], {}), '(self.env_num)\n', (7567, 7581), True, 'import numpy as np\n'), ((14310, 14324), 'copy.deepcopy', 'deepcopy', (['data'], {}), '(data)\n', (14318, 14324), False, 'from copy import deepcopy\n'), ((2297, 2322), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2320, 2322), False, 'import torch\n'), ((2441, 2471), 'tianshou.env.DummyVectorEnv', 'DummyVectorEnv', (['[lambda : env]'], {}), '([lambda : env])\n', (2455, 2471), False, 'from tianshou.env import BaseVectorEnv, DummyVectorEnv\n'), ((3036, 3054), 'tianshou.data.ListReplayBuffer', 'ListReplayBuffer', ([], {}), '()\n', (3052, 3054), False, 'from tianshou.data import Batch, ReplayBuffer, ListReplayBuffer, to_numpy\n'), ((8053, 8124), 'numpy.array', 'np.array', (['[x for x in self._ready_env_ids if x not in finished_env_ids]'], {}), '([x for x in self._ready_env_ids if x not in finished_env_ids])\n', (8061, 8124), True, 'import numpy as np\n'), ((9700, 9720), 'tianshou.data.to_numpy', 'to_numpy', (['result.act'], {}), '(result.act)\n', (9708, 9720), False, 'from tianshou.data import Batch, ReplayBuffer, ListReplayBuffer, to_numpy\n'), ((9777, 9793), 'random.uniform', 'rd.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (9787, 9793), True, 'import random as rd\n'), ((12686, 12757), 'numpy.array', 'np.array', (['[x for x in self._ready_env_ids if x not in finished_env_ids]'], {}), '([x for x in self._ready_env_ids if x not in finished_env_ids])\n', (12694, 12757), True, 'import numpy as np\n'), ((13784, 13800), 'numpy.mean', 'np.mean', (['rewards'], {}), '(rewards)\n', (13791, 13800), True, 'import numpy as np\n'), ((13825, 13840), 'numpy.std', 'np.std', (['rewards'], {}), '(rewards)\n', (13831, 13840), True, 'import numpy as np\n'), ((14602, 14617), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (14615, 14617), False, 'import torch\n'), ((14774, 14794), 'tianshou.data.to_numpy', 'to_numpy', (['result.act'], {}), '(result.act)\n', (14782, 14794), False, 'from tianshou.data import Batch, ReplayBuffer, ListReplayBuffer, to_numpy\n'), ((3706, 3722), 'numpy.asanyarray', 'np.asanyarray', (['x'], {}), '(x)\n', (3719, 3722), True, 'import numpy as np\n'), ((8244, 8377), 'warnings.warn', 'warnings.warn', (['"""There are already many steps in an episode. You should add a time limitation to your environment!"""', 'Warning'], {}), "(\n 'There are already many steps in an episode. You should add a time limitation to your environment!'\n , Warning)\n", (8257, 8377), False, 'import warnings\n'), ((9242, 9249), 'tianshou.data.Batch', 'Batch', ([], {}), '()\n', (9247, 9249), False, 'from tianshou.data import Batch, ReplayBuffer, ListReplayBuffer, to_numpy\n'), ((9378, 9385), 'tianshou.data.Batch', 'Batch', ([], {}), '()\n', (9383, 9385), False, 'from tianshou.data import Batch, ReplayBuffer, ListReplayBuffer, to_numpy\n'), ((10599, 10617), 'time.sleep', 'time.sleep', (['render'], {}), '(render)\n', (10609, 10617), False, 'import time\n'), ((13436, 13447), 'time.time', 'time.time', ([], {}), '()\n', (13445, 13447), False, 'import time\n'), ((14339, 14366), 'torch.FloatTensor', 'torch.FloatTensor', (['data.obs'], {}), '(data.obs)\n', (14356, 14366), False, 'import torch\n'), ((14430, 14457), 'torch.tensor', 'torch.tensor', (['target_action'], {}), '(target_action)\n', (14442, 14457), False, 'import torch\n'), ((7234, 7251), 'numpy.sum', 'np.sum', (['n_episode'], {}), '(n_episode)\n', (7240, 7251), True, 'import numpy as np\n'), ((8666, 8673), 'tianshou.data.Batch', 'Batch', ([], {}), '()\n', (8671, 8673), False, 'from tianshou.data import Batch, ReplayBuffer, ListReplayBuffer, to_numpy\n'), ((8684, 8691), 'tianshou.data.Batch', 'Batch', ([], {}), '()\n', (8689, 8691), False, 'from tianshou.data import Batch, ReplayBuffer, ListReplayBuffer, to_numpy\n'), ((8700, 8707), 'tianshou.data.Batch', 'Batch', ([], {}), '()\n', (8705, 8707), False, 'from tianshou.data import Batch, ReplayBuffer, ListReplayBuffer, to_numpy\n'), ((12266, 12280), 'numpy.where', 'np.where', (['done'], {}), '(done)\n', (12274, 12280), True, 'import numpy as np\n'), ((8995, 9010), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9008, 9010), False, 'import torch\n'), ((9456, 9463), 'tianshou.data.Batch', 'Batch', ([], {}), '()\n', (9461, 9463), False, 'from tianshou.data import Batch, ReplayBuffer, ListReplayBuffer, to_numpy\n'), ((11591, 11630), 'numpy.sum', 'np.sum', (['self._cached_buf[i].rew'], {'axis': '(0)'}), '(self._cached_buf[i].rew, axis=0)\n', (11597, 11630), True, 'import numpy as np\n')] |
import os
import numpy as np
import pybullet as p
import pybullet_data
from panda import Panda
from objects import YCBObject, InteractiveObj, RBOObject
class DoubleEnv():
def __init__(self):
# create simulation (GUI)
self.urdfRootPath = pybullet_data.getDataPath()
p.connect(p.GUI)
p.setGravity(0, 0, -9.81)
# set up camera
self._set_camera()
# load some scene objects
p.loadURDF(os.path.join(self.urdfRootPath, "plane.urdf"), basePosition=[0, 0, -0.65])
p.loadURDF(os.path.join(self.urdfRootPath, "table/table.urdf"), basePosition=[0.5, 0, -0.65])
p.loadURDF(os.path.join(self.urdfRootPath, "table/table.urdf"), basePosition=[0.5, 1, -0.65])
# example YCB object
obj1 = YCBObject('003_cracker_box')
obj1.load()
p.resetBasePositionAndOrientation(obj1.body_id, [0.7, -0.2, 0.1], [0, 0, 0, 1])
# example RBO object
obj2 = RBOObject('book')
obj2.load()
p.resetBasePositionAndOrientation(obj2.body_id, [0.8, 1.1, 0.5], [0, 0, 1, 1])
# load a panda robot
self.panda1 = Panda([0, 0, 0])
self.panda2 = Panda([0, 1, 0])
def reset(self):
self.panda1.reset()
self.panda2.reset()
return [self.panda1.state, self.panda2.state]
def close(self):
p.disconnect()
def step(self, action):
# get current state
state = [self.panda1.state, self.panda2.state]
# action in this example is the end-effector velocity
action1 = [action[0], action[1], action[2]]
action2 = [action[3], action[4], action[5]]
self.panda1.step(dposition=action1)
self.panda2.step(dposition=action2)
# take simulation step
p.stepSimulation()
# return next_state, reward, done, info
next_state = [self.panda1.state, self.panda2.state]
reward = 0.0
done = False
info = {}
return next_state, reward, done, info
def render(self):
(width, height, pxl, depth, segmentation) = p.getCameraImage(width=self.camera_width,
height=self.camera_height,
viewMatrix=self.view_matrix,
projectionMatrix=self.proj_matrix)
rgb_array = np.array(pxl, dtype=np.uint8)
rgb_array = np.reshape(rgb_array, (self.camera_height, self.camera_width, 4))
rgb_array = rgb_array[:, :, :3]
return rgb_array
def _set_camera(self):
self.camera_width = 256
self.camera_height = 256
p.resetDebugVisualizerCamera(cameraDistance=1.5, cameraYaw=20, cameraPitch=-30,
cameraTargetPosition=[0.5, -0.2, 0.0])
self.view_matrix = p.computeViewMatrixFromYawPitchRoll(cameraTargetPosition=[0.5, 0, 0],
distance=1.0,
yaw=90,
pitch=-50,
roll=0,
upAxisIndex=2)
self.proj_matrix = p.computeProjectionMatrixFOV(fov=60,
aspect=float(self.camera_width) / self.camera_height,
nearVal=0.1,
farVal=100.0)
| [
"os.path.join",
"objects.RBOObject",
"objects.YCBObject",
"pybullet.setGravity",
"pybullet.stepSimulation",
"pybullet.computeViewMatrixFromYawPitchRoll",
"pybullet.resetDebugVisualizerCamera",
"panda.Panda",
"pybullet.disconnect",
"pybullet.resetBasePositionAndOrientation",
"numpy.array",
"pyb... | [((260, 287), 'pybullet_data.getDataPath', 'pybullet_data.getDataPath', ([], {}), '()\n', (285, 287), False, 'import pybullet_data\n'), ((296, 312), 'pybullet.connect', 'p.connect', (['p.GUI'], {}), '(p.GUI)\n', (305, 312), True, 'import pybullet as p\n'), ((321, 346), 'pybullet.setGravity', 'p.setGravity', (['(0)', '(0)', '(-9.81)'], {}), '(0, 0, -9.81)\n', (333, 346), True, 'import pybullet as p\n'), ((777, 805), 'objects.YCBObject', 'YCBObject', (['"""003_cracker_box"""'], {}), "('003_cracker_box')\n", (786, 805), False, 'from objects import YCBObject, InteractiveObj, RBOObject\n'), ((834, 913), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['obj1.body_id', '[0.7, -0.2, 0.1]', '[0, 0, 0, 1]'], {}), '(obj1.body_id, [0.7, -0.2, 0.1], [0, 0, 0, 1])\n', (867, 913), True, 'import pybullet as p\n'), ((959, 976), 'objects.RBOObject', 'RBOObject', (['"""book"""'], {}), "('book')\n", (968, 976), False, 'from objects import YCBObject, InteractiveObj, RBOObject\n'), ((1005, 1083), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['obj2.body_id', '[0.8, 1.1, 0.5]', '[0, 0, 1, 1]'], {}), '(obj2.body_id, [0.8, 1.1, 0.5], [0, 0, 1, 1])\n', (1038, 1083), True, 'import pybullet as p\n'), ((1136, 1152), 'panda.Panda', 'Panda', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (1141, 1152), False, 'from panda import Panda\n'), ((1175, 1191), 'panda.Panda', 'Panda', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (1180, 1191), False, 'from panda import Panda\n'), ((1354, 1368), 'pybullet.disconnect', 'p.disconnect', ([], {}), '()\n', (1366, 1368), True, 'import pybullet as p\n'), ((1776, 1794), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (1792, 1794), True, 'import pybullet as p\n'), ((2085, 2221), 'pybullet.getCameraImage', 'p.getCameraImage', ([], {'width': 'self.camera_width', 'height': 'self.camera_height', 'viewMatrix': 'self.view_matrix', 'projectionMatrix': 'self.proj_matrix'}), '(width=self.camera_width, height=self.camera_height,\n viewMatrix=self.view_matrix, projectionMatrix=self.proj_matrix)\n', (2101, 2221), True, 'import pybullet as p\n'), ((2445, 2474), 'numpy.array', 'np.array', (['pxl'], {'dtype': 'np.uint8'}), '(pxl, dtype=np.uint8)\n', (2453, 2474), True, 'import numpy as np\n'), ((2495, 2560), 'numpy.reshape', 'np.reshape', (['rgb_array', '(self.camera_height, self.camera_width, 4)'], {}), '(rgb_array, (self.camera_height, self.camera_width, 4))\n', (2505, 2560), True, 'import numpy as np\n'), ((2727, 2850), 'pybullet.resetDebugVisualizerCamera', 'p.resetDebugVisualizerCamera', ([], {'cameraDistance': '(1.5)', 'cameraYaw': '(20)', 'cameraPitch': '(-30)', 'cameraTargetPosition': '[0.5, -0.2, 0.0]'}), '(cameraDistance=1.5, cameraYaw=20, cameraPitch=\n -30, cameraTargetPosition=[0.5, -0.2, 0.0])\n', (2755, 2850), True, 'import pybullet as p\n'), ((2910, 3039), 'pybullet.computeViewMatrixFromYawPitchRoll', 'p.computeViewMatrixFromYawPitchRoll', ([], {'cameraTargetPosition': '[0.5, 0, 0]', 'distance': '(1.0)', 'yaw': '(90)', 'pitch': '(-50)', 'roll': '(0)', 'upAxisIndex': '(2)'}), '(cameraTargetPosition=[0.5, 0, 0],\n distance=1.0, yaw=90, pitch=-50, roll=0, upAxisIndex=2)\n', (2945, 3039), True, 'import pybullet as p\n'), ((453, 498), 'os.path.join', 'os.path.join', (['self.urdfRootPath', '"""plane.urdf"""'], {}), "(self.urdfRootPath, 'plane.urdf')\n", (465, 498), False, 'import os\n'), ((547, 598), 'os.path.join', 'os.path.join', (['self.urdfRootPath', '"""table/table.urdf"""'], {}), "(self.urdfRootPath, 'table/table.urdf')\n", (559, 598), False, 'import os\n'), ((649, 700), 'os.path.join', 'os.path.join', (['self.urdfRootPath', '"""table/table.urdf"""'], {}), "(self.urdfRootPath, 'table/table.urdf')\n", (661, 700), False, 'import os\n')] |
import unittest
import EXOSIMS.util.get_dirs as gd
import os
from unittest.mock import *
import numpy as np
import sys
class TestGetDirs(unittest.TestCase):
"""
Tests the get_dir tool.
<NAME>, Cornell, July 2021
"""
def test_get_home_dir(self):
"""
Tests that get_home_dir works in muiltiple OS environments.
Test method: Uses unittest's mock library to create fake OS environment
and paths to see if get_dirs returns the correct home directory. Because
get_dirs returns assertionerrors when the homedir isn't real, use the
assertion message itself to check that the homedir is correct.
This assumes that the os library does its job correctly as the mocking
library will overwrite whatever os has stored for testing purposes.
This method also assumes that winreg works as expected.
"""
#collect assertion errors and verify at the end that we only get the
#expected assertion errors.
#this tests the assertion error as well- it should be called for all
#of these cases as I use imaginary pathnames
assertErrors = []
#mock directories
directories = \
[
{'HOME':'posixhome'},
{},
{'HOME':'myshome','MSYSTEM':'test'},
{'HOMESHARE':'sharehome'},
{'USERPROFILE':'userhome'},
{'HOME': 'otherOShome'},
{}
]
#mock os names
os_name = ['posix','posix','nt','nt','nt','door','door']
#names for home directory- 'none' shouldn't show up
home_names = ['posixhome','none','myshome','sharehome','userhome',
'otherOShome', 'none']
#test all paths except for winreg
for i, dic in enumerate(directories):
with patch.dict(os.environ,dic,clear=True), \
patch.object(os,'name',os_name[i]):
#i==1 and i==6 correspond to where homedir isn't in environ
if i == 1 or i == 6:
with self.assertRaises(OSError):
gd.get_home_dir()
else:
try: gd.get_home_dir()
except AssertionError as e:
assertErrors.append(str(e))
#add all assertion errors so far to the expected list of assertion
#errors
exp_asrt = []
for s in home_names:
if s == 'none':
continue
exp_asrt.append("Identified "+s+ " as home directory, but it does" +
" not exist or is not accessible/writeable")
#test winreg branch
#first, test that if winreg doesn't except, homedir is set
#(mock a key: make key functions do nothing.
# mock queryvalueex: return test homedir)
with patch.dict(os.environ,{},clear=True), \
patch.object(os,'name','nt'), \
patch.dict(sys.modules, {'winreg': MagicMock()}), \
patch('winreg.OpenKey'), \
patch('winreg.QueryValueEx') as mockquery:
mockquery.return_value= ['winregHome']
try: gd.get_home_dir()
except AssertionError as e:
assertErrors.append(str(e))
#second, test that home is tried if an exception is raised and attempt
#at homedir setting is made
with patch.dict(os.environ,{'HOME':'winreghome2'},clear=True), \
patch.object(os,'name','nt'), \
patch.dict(sys.modules, {'winreg': MagicMock()}), \
patch('winreg.OpenKey'), \
patch('winreg.QueryValueEx') as mockquery:
mockquery.side_effect = Exception
try: gd.get_home_dir()
except AssertionError as e:
assertErrors.append(str(e))
with patch.dict(os.environ,{},clear=True), \
patch.object(os,'name','nt'), \
patch.dict(sys.modules, {'winreg': MagicMock()}), \
patch('winreg.OpenKey'), \
patch('winreg.QueryValueEx') as mockquery:
mockquery.side_effect = Exception
with self.assertRaises(OSError):
gd.get_home_dir()
exp_asrt.append("Identified "+"winregHome"+ " as home directory, but it does" +
" not exist or is not accessible/writeable")
exp_asrt.append("Identified "+"winreghome2"+ " as home directory, but it does" +
" not exist or is not accessible/writeable")
np.testing.assert_array_equal(assertErrors ,exp_asrt)
def test_get_paths(self):
"""
Tests that get_paths returns the proper (relative) paths.
Test method: Calls the method and tests to see if the path dictionary
matches expectations for various trivial inputs. For some cases, use the
python mock library to simplify testing
For the JSON, queue file, and and runqueue branches, just use a simple
dictionary (*although this should probably be changed to the respective
datatype. )
"""
#test no parameter output, testing branch #1.
#mock current working directory
dict_paths = gd.get_paths()
outputs = dict_paths.values()
outputs_rel = []
for x in outputs:
outputs_rel.append(os.path.relpath(x))
#test environment output, testing branch #2. mock environment dictionary
with patch.dict(os.environ,{'EXOSIMS1': 'exosims_path',
'EXOSIMS2':'exosims_path2'}):
#only keep the key/values i seek to test for each branch
test_dict = dict()
dict_paths = gd.get_paths()
for key in dict_paths:
if key == 'EXOSIMS1' or key == 'EXOSIMS2':
test_dict[key] = dict_paths[key]
self.assertDictEqual(test_dict,{'EXOSIMS1': 'exosims_path',
'EXOSIMS2':'exosims_path2'})
#test JSON script output, branch #3. mock
paths = {'EXOSIMS_SCRIPTS_PATH': 'scriptspath',
'EXOSIMS_OBSERVING_BLOCK_CSV_PATH': 'csvpath',
'EXOSIMS_FIT_FILES_FOLDER_PATH': 'folderpath',
'EXOSIMS_PLOT_OUTPUT_PATH': 'outputpath',
'EXOSIMS_RUN_SAVE_PATH': 'savepath',
'EXOSIMS_RUN_LOG_PATH': 'logpath',
'EXOSIMS_QUEUE_FILE_PATH': 'filepath'
}
paths_test = {'paths': paths}
self.assertDictEqual(paths, gd.get_paths(specs=paths_test))
#test qFile script specified path, branch #4
self.assertDictEqual(paths,gd.get_paths(qFile=paths_test))
#test runQueue specified path, branch #5
self.assertDictEqual(paths, gd.get_paths(qFargs=paths))
| [
"EXOSIMS.util.get_dirs.get_home_dir",
"numpy.testing.assert_array_equal",
"EXOSIMS.util.get_dirs.get_paths",
"os.path.relpath"
] | [((4556, 4609), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['assertErrors', 'exp_asrt'], {}), '(assertErrors, exp_asrt)\n', (4585, 4609), True, 'import numpy as np\n'), ((5257, 5271), 'EXOSIMS.util.get_dirs.get_paths', 'gd.get_paths', ([], {}), '()\n', (5269, 5271), True, 'import EXOSIMS.util.get_dirs as gd\n'), ((5744, 5758), 'EXOSIMS.util.get_dirs.get_paths', 'gd.get_paths', ([], {}), '()\n', (5756, 5758), True, 'import EXOSIMS.util.get_dirs as gd\n'), ((6524, 6554), 'EXOSIMS.util.get_dirs.get_paths', 'gd.get_paths', ([], {'specs': 'paths_test'}), '(specs=paths_test)\n', (6536, 6554), True, 'import EXOSIMS.util.get_dirs as gd\n'), ((6646, 6676), 'EXOSIMS.util.get_dirs.get_paths', 'gd.get_paths', ([], {'qFile': 'paths_test'}), '(qFile=paths_test)\n', (6658, 6676), True, 'import EXOSIMS.util.get_dirs as gd\n'), ((6765, 6791), 'EXOSIMS.util.get_dirs.get_paths', 'gd.get_paths', ([], {'qFargs': 'paths'}), '(qFargs=paths)\n', (6777, 6791), True, 'import EXOSIMS.util.get_dirs as gd\n'), ((3187, 3204), 'EXOSIMS.util.get_dirs.get_home_dir', 'gd.get_home_dir', ([], {}), '()\n', (3202, 3204), True, 'import EXOSIMS.util.get_dirs as gd\n'), ((3755, 3772), 'EXOSIMS.util.get_dirs.get_home_dir', 'gd.get_home_dir', ([], {}), '()\n', (3770, 3772), True, 'import EXOSIMS.util.get_dirs as gd\n'), ((4220, 4237), 'EXOSIMS.util.get_dirs.get_home_dir', 'gd.get_home_dir', ([], {}), '()\n', (4235, 4237), True, 'import EXOSIMS.util.get_dirs as gd\n'), ((5393, 5411), 'os.path.relpath', 'os.path.relpath', (['x'], {}), '(x)\n', (5408, 5411), False, 'import os\n'), ((2131, 2148), 'EXOSIMS.util.get_dirs.get_home_dir', 'gd.get_home_dir', ([], {}), '()\n', (2146, 2148), True, 'import EXOSIMS.util.get_dirs as gd\n'), ((2197, 2214), 'EXOSIMS.util.get_dirs.get_home_dir', 'gd.get_home_dir', ([], {}), '()\n', (2212, 2214), True, 'import EXOSIMS.util.get_dirs as gd\n')] |
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from pathlib import Path
import numpy as np
import scipy.sparse as sp
import os
import sys
project_path = Path(__file__).resolve().parents[2]
# Settings
flags = tf.compat.v1.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('edgelist', 'ppi.edgelist.txt', 'Edgelist file.') # 'PPI'
#Check data availability
if not os.path.isfile("{}/data/output/network/{}".format(project_path, FLAGS.edgelist)):
sys.exit("{} file is not available under /data/output/network/".format(FLAGS.edgelist))
print("Generate adjacency matrix...")
# build graph
ppi_ids = np.genfromtxt("{}/data/output/network/ppi.ids.txt".format(project_path), dtype=np.dtype(str))
idx = np.array(ppi_ids[:, 1], dtype=np.int32)
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt("{}/data/output/network/ppi.edgelist.txt".format(project_path), dtype=np.int32)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(idx.shape[0], idx.shape[0]), dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
sp.save_npz("{}/data/output/network/ppi.adjacency.npz".format(project_path), adj)
print("Successful generation of adjacency matrix in /data/output/network/ppi.adjacency.npz") | [
"pathlib.Path",
"numpy.dtype",
"numpy.array",
"numpy.ones"
] | [((772, 811), 'numpy.array', 'np.array', (['ppi_ids[:, 1]'], {'dtype': 'np.int32'}), '(ppi_ids[:, 1], dtype=np.int32)\n', (780, 811), True, 'import numpy as np\n'), ((750, 763), 'numpy.dtype', 'np.dtype', (['str'], {}), '(str)\n', (758, 763), True, 'import numpy as np\n'), ((1128, 1151), 'numpy.ones', 'np.ones', (['edges.shape[0]'], {}), '(edges.shape[0])\n', (1135, 1151), True, 'import numpy as np\n'), ((210, 224), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (214, 224), False, 'from pathlib import Path\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.