code stringlengths 189 49.4k | apis sequence | extract_api stringlengths 107 64.3k |
|---|---|---|
import fastai
from neptune.new.integrations.fastai import NeptuneCallback
from fastai.vision.all import *
import neptune.new as neptune
run = neptune.init(
project="common/fastai-integration", api_token="<PASSWORD>", tags="basic"
)
path = untar_data(URLs.MNIST_TINY)
dls = ImageDataLoaders.from_csv(path)
# Log all training phases of the learner
learn = cnn_learner(dls, resnet18, cbs=[NeptuneCallback(run=run, base_namespace="experiment")])
learn.fit_one_cycle(2)
learn.fit_one_cycle(1)
run.stop()
| [
"neptune.new.integrations.fastai.NeptuneCallback",
"neptune.new.init"
] | [((143, 234), 'neptune.new.init', 'neptune.init', ([], {'project': '"""common/fastai-integration"""', 'api_token': '"""<PASSWORD>"""', 'tags': '"""basic"""'}), "(project='common/fastai-integration', api_token='<PASSWORD>',\n tags='basic')\n", (155, 234), True, 'import neptune.new as neptune\n'), ((393, 446), 'neptune.new.integrations.fastai.NeptuneCallback', 'NeptuneCallback', ([], {'run': 'run', 'base_namespace': '"""experiment"""'}), "(run=run, base_namespace='experiment')\n", (408, 446), False, 'from neptune.new.integrations.fastai import NeptuneCallback\n')] |
import neptune
from tensorflow.keras.callbacks import BaseLogger
class NeptuneMonitor(BaseLogger):
def __init__(self, name, api_token, prj_name, params: tuple = None):
assert api_token is not None
assert prj_name is not None
super(BaseLogger, self).__init__()
self.my_name = name
self.stateful_metrics = set(['loss'] or [])
neptune.init(
api_token=api_token,
project_qualified_name=prj_name)
self.experiment = neptune.create_experiment(name=self.my_name, params=params)
self.log_neptune = True
def on_epoch_end(self, epoch, logs={}):
#acc = logs['acc']
loss = logs['loss']
if self.log_neptune:
self.experiment.append_tag(self.my_name)
#self.experiment.send_metric('acc', acc)
self.experiment.send_metric('loss', loss)
#self.experiment.send_metric('epoch', epoch)
def on_train_end(self, logs={}):
if self.log_neptune:
self.experiment.stop() | [
"neptune.create_experiment",
"neptune.init"
] | [((379, 445), 'neptune.init', 'neptune.init', ([], {'api_token': 'api_token', 'project_qualified_name': 'prj_name'}), '(api_token=api_token, project_qualified_name=prj_name)\n', (391, 445), False, 'import neptune\n'), ((497, 556), 'neptune.create_experiment', 'neptune.create_experiment', ([], {'name': 'self.my_name', 'params': 'params'}), '(name=self.my_name, params=params)\n', (522, 556), False, 'import neptune\n')] |
'''Train DCENet with PyTorch'''
# from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import os
import json
import neptune
import argparse
import numpy as np
from loader import *
from utils.plots import *
from utils.utils import *
from utils.collision import *
from utils.datainfo import DataInfo
from utils.ranking import gauss_rank
from models import DCENet
from loss import DCENetLoss
def main():
# ================= Arguments ================ #
parser = argparse.ArgumentParser(description='PyTorch Knowledge Distillation')
parser.add_argument('--gpu', type=str, default="4", help='gpu id')
parser.add_argument('--config', type=str, default="config", help='.json')
args = parser.parse_args()
# ================= Device Setup ================ #
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# ================= Config Load ================ #
with open('config/' + args.config) as config_file:
config = json.load(config_file)
# ================= Neptune Setup ================ #
if config['neptune']:
neptune.init('seongjulee/DCENet', api_token=config["neptune_token"]) # username/project-name, api_token=token from neptune
neptune.create_experiment(name='EXP', params=config) # name=project name (anything is ok), params=parameter list (json format)
neptune.append_tag(args.config) # neptune tag (str or string list)
# ================= Model Setup ================ #
model = nn.DataParallel(DCENet(config)).to(device) if len(args.gpu.split(',')) > 1 else DCENet(config).to(device)
# ================= Loss Function ================ #
criterion = DCENetLoss(config)
# ================= Optimizer Setup ================ #
optimizer = optim.Adam(model.parameters(), lr=config['lr'], betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-6, amsgrad=False)
# ================= Data Loader ================ #
datalist = DataInfo()
train_datalist = datalist.train_merged
print('Train data list', train_datalist)
test_datalist = datalist.train_biwi
print('Test data list', test_datalist)
np.random.seed(10)
offsets, traj_data, occupancy = load_data(config, train_datalist, datatype="train")
trainval_split = np.random.rand(len(offsets)) < config['split']
train_x = offsets[trainval_split, :config['obs_seq'] - 1, 4:6]
train_occu = occupancy[trainval_split, :config['obs_seq'] - 1, ..., :config['enviro_pdim'][-1]]
train_y = offsets[trainval_split, config['obs_seq'] - 1:, 4:6]
train_y_occu = occupancy[trainval_split, config['obs_seq'] - 1:, ..., :config['enviro_pdim'][-1]]
val_x = offsets[~trainval_split, :config['obs_seq'] - 1, 4:6]
val_occu = occupancy[~trainval_split, :config['obs_seq'] - 1, ..., :config['enviro_pdim'][-1]]
val_y = offsets[~trainval_split, config['obs_seq'] - 1:, 4:6]
val_y_occu = occupancy[~trainval_split, config['obs_seq'] - 1:, ..., :config['enviro_pdim'][-1]]
print("%.0f trajectories for training\n %.0f trajectories for valiadation" %(train_x.shape[0], val_x.shape[0]))
test_offsets, test_trajs, test_occupancy = load_data(config, test_datalist, datatype="test")
test_x = test_offsets[:, :config['obs_seq'] - 1, 4:6]
test_occu = test_occupancy[:, :config['obs_seq'] - 1, ..., :config['enviro_pdim'][-1]]
last_obs_test = test_offsets[:, config['obs_seq'] - 2, 2:4]
y_truth = test_offsets[:, config['obs_seq'] - 1:, :4]
xy_truth = test_offsets[:, :, :4]
print('test_trajs', test_trajs.shape)
print("%.0f trajectories for testing" % (test_x.shape[0]))
train_dataset = TrajDataset(x=train_x, x_occu=train_occu, y=train_y, y_occu=train_y_occu, mode='train')
train_loader = DataLoader(dataset=train_dataset, batch_size=config["batch_size"], shuffle=True, num_workers=4)
val_dataset = TrajDataset(x=val_x, x_occu=val_occu, y=val_y, y_occu=val_y_occu, mode='val')
val_loader = DataLoader(dataset=val_dataset, batch_size=config["batch_size"], shuffle=False, num_workers=4)
# test_dataset = TrajDataset(x=test_x, x_occu=test_occu, y=y_truth, y_occu=None, mode='test')
# test_loader = DataLoader(dataset=test_dataset, batch_size=config["batch_size"], shuffle=False, num_workers=4)
# ================= Training Loop ================ #
early_stopping = EarlyStopping(patience=config['patience'], verbose=True, filename=args.config.split('/')[-1].replace('.json', '.pth'))
for epoch in range(config['max_epochs']):
train_one_epoch(config, epoch, device, model, optimizer, criterion, train_loader)
val_loss = evaluate(config, device, model, optimizer, criterion, val_loader)
early_stopping(val_loss, model)
if early_stopping.early_stop:
print("Early stopping")
break
# ================= Test ================ #
model.load_state_dict(torch.load(os.path.join('checkpoints', args.config.split('/')[-1].replace('.json', '.pth'))))
model.eval()
with torch.no_grad():
test_x, test_occu = input2tensor(test_x, test_occu, device)
x_latent = model.encoder_x(test_x, test_occu)
predictions = []
for i, x_ in enumerate(x_latent):
last_pos = last_obs_test[i]
x_ = x_.view(1, -1)
for i in range(config['num_pred']):
y_p = model.decoder(x_, train=False)
y_p_ = np.concatenate(([last_pos], np.squeeze(y_p.cpu().numpy())), axis=0)
y_p_sum = np.cumsum(y_p_, axis=0)
predictions.append(y_p_sum[1:, :])
predictions = np.reshape(predictions, [-1, config['num_pred'], config['pred_seq'], 2])
print('Predicting done!')
print(predictions.shape)
plot_pred(xy_truth, predictions)
# Get the errors for ADE, DEF, Hausdorff distance, speed deviation, heading error
print("\nEvaluation results @top%.0f" % config['num_pred'])
errors = get_errors(y_truth, predictions)
check_collision(y_truth)
## Get the first time prediction by g
ranked_prediction = []
for prediction in predictions:
ranks = gauss_rank(prediction)
ranked_prediction.append(prediction[np.argmax(ranks)])
ranked_prediction = np.reshape(ranked_prediction, [-1, 1, config['pred_seq'], 2])
print("\nEvaluation results for most-likely predictions")
ranked_errors = get_errors(y_truth, ranked_prediction)
# Function for one epoch training
def train_one_epoch(config, epoch, device, model, optimizer, criterion, loader):
print('\nEpoch: %d' % epoch)
model.train()
train_total, train_loss = 0, 0
for batch_idx, (x, x_occu, y, y_occu) in enumerate(loader):
x, x_occu, y, y_occu = x.to(device), x_occu.to(device), y.to(device), y_occu.to(device)
optimizer.zero_grad()
y_pred, mu, log_var = model(x, x_occu, y, y_occu, train=True)
loss = criterion(mu, log_var, y_pred, y)
loss.backward()
optimizer.step()
# train_ade += ade * x.size(0)
# train_fde += fde * x.size(0)
train_total += x.size(0)
train_loss += loss.item() * x.size(0)
if config['neptune']:
# neptune.log_metric('train_batch_ADE', ade)
# neptune.log_metric('train_batch_FDE', fde)
neptune.log_metric('train_batch_Loss', loss.item())
# progress_bar(batch_idx, len(loader), 'Lr: %.4e | Loss: %.3f | ADE[m]: %.3f | FDE[m]: %.3f'
# % (get_lr(optimizer), train_loss / train_total, train_ade / train_total, train_fde / train_total))
progress_bar(batch_idx, len(loader), 'Lr: %.4e | Loss: %.3f' % (get_lr(optimizer), train_loss / train_total))
# Function for validation
@torch.no_grad()
def evaluate(config, device, model, optimizer, criterion, loader):
model.eval()
# eval_ade, eval_fde, eval_total = 0, 0, 0
eval_total, eval_loss = 0, 0
for batch_idx, (x, x_occu, y, y_occu) in enumerate(loader):
x, x_occu, y, y_occu = x.to(device), x_occu.to(device), y.to(device), y_occu.to(device)
y_pred, mu, log_var = model(x, x_occu, y, y_occu, train=True)
loss = criterion(mu, log_var, y_pred, y)
eval_total += x.size(0)
eval_loss += loss.item() * x.size(0)
progress_bar(batch_idx, len(loader), 'Lr: %.4e | Loss: %.3f' % (get_lr(optimizer), eval_loss / eval_total))
# progress_bar(batch_idx, len(loader), 'Lr: %.4e | ADE[m]: %.3f | FDE[m]: %.3f'
# % (get_lr(optimizer), eval_ade / eval_total, eval_fde / eval_total))
if config['neptune']:
neptune.log_metric('val_Loss', eval_loss / eval_total)
# neptune.log_metric('{}_ADE'.format(loader.dataset.mode), eval_ade / eval_total)
# neptune.log_metric('{}_FDE'.format(loader.dataset.mode), eval_fde / eval_total)
return eval_loss / eval_total
if __name__ == "__main__":
main()
| [
"neptune.init",
"neptune.log_metric",
"neptune.create_experiment",
"neptune.append_tag"
] | [((7920, 7935), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7933, 7935), False, 'import torch\n'), ((564, 633), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch Knowledge Distillation"""'}), "(description='PyTorch Knowledge Distillation')\n", (587, 633), False, 'import argparse\n'), ((1884, 1902), 'loss.DCENetLoss', 'DCENetLoss', (['config'], {}), '(config)\n', (1894, 1902), False, 'from loss import DCENetLoss\n'), ((2162, 2172), 'utils.datainfo.DataInfo', 'DataInfo', ([], {}), '()\n', (2170, 2172), False, 'from utils.datainfo import DataInfo\n'), ((2350, 2368), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (2364, 2368), True, 'import numpy as np\n'), ((3954, 4054), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': "config['batch_size']", 'shuffle': '(True)', 'num_workers': '(4)'}), "(dataset=train_dataset, batch_size=config['batch_size'], shuffle=\n True, num_workers=4)\n", (3964, 4054), False, 'from torch.utils.data import DataLoader\n'), ((4164, 4263), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'val_dataset', 'batch_size': "config['batch_size']", 'shuffle': '(False)', 'num_workers': '(4)'}), "(dataset=val_dataset, batch_size=config['batch_size'], shuffle=\n False, num_workers=4)\n", (4174, 4263), False, 'from torch.utils.data import DataLoader\n'), ((5818, 5890), 'numpy.reshape', 'np.reshape', (['predictions', "[-1, config['num_pred'], config['pred_seq'], 2]"], {}), "(predictions, [-1, config['num_pred'], config['pred_seq'], 2])\n", (5828, 5890), True, 'import numpy as np\n'), ((6444, 6505), 'numpy.reshape', 'np.reshape', (['ranked_prediction', "[-1, 1, config['pred_seq'], 2]"], {}), "(ranked_prediction, [-1, 1, config['pred_seq'], 2])\n", (6454, 6505), True, 'import numpy as np\n'), ((1177, 1199), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (1186, 1199), False, 'import json\n'), ((1292, 1360), 'neptune.init', 'neptune.init', (['"""seongjulee/DCENet"""'], {'api_token': "config['neptune_token']"}), "('seongjulee/DCENet', api_token=config['neptune_token'])\n", (1304, 1360), False, 'import neptune\n'), ((1425, 1477), 'neptune.create_experiment', 'neptune.create_experiment', ([], {'name': '"""EXP"""', 'params': 'config'}), "(name='EXP', params=config)\n", (1450, 1477), False, 'import neptune\n'), ((1561, 1592), 'neptune.append_tag', 'neptune.append_tag', (['args.config'], {}), '(args.config)\n', (1579, 1592), False, 'import neptune\n'), ((5228, 5243), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5241, 5243), False, 'import torch\n'), ((6334, 6356), 'utils.ranking.gauss_rank', 'gauss_rank', (['prediction'], {}), '(prediction)\n', (6344, 6356), False, 'from utils.ranking import gauss_rank\n'), ((8786, 8840), 'neptune.log_metric', 'neptune.log_metric', (['"""val_Loss"""', '(eval_loss / eval_total)'], {}), "('val_Loss', eval_loss / eval_total)\n", (8804, 8840), False, 'import neptune\n'), ((1011, 1036), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1034, 1036), False, 'import torch\n'), ((1780, 1794), 'models.DCENet', 'DCENet', (['config'], {}), '(config)\n', (1786, 1794), False, 'from models import DCENet\n'), ((5724, 5747), 'numpy.cumsum', 'np.cumsum', (['y_p_'], {'axis': '(0)'}), '(y_p_, axis=0)\n', (5733, 5747), True, 'import numpy as np\n'), ((6401, 6417), 'numpy.argmax', 'np.argmax', (['ranks'], {}), '(ranks)\n', (6410, 6417), True, 'import numpy as np\n'), ((1716, 1730), 'models.DCENet', 'DCENet', (['config'], {}), '(config)\n', (1722, 1730), False, 'from models import DCENet\n')] |
import gym, torch, random, copy
import torch.nn as nn
import torch.optim as optim
import numpy as np
import torch.nn.functional as F
import neptune.new as neptune
# initialize policy & value network
class PolicyNetwork(nn.Module):
def __init__(self, beta):
super().__init__()
self.model = nn.Sequential(nn.Linear(111, 400), nn.ReLU(),
nn.Linear(400, 300), nn.ReLU(),
nn.Linear(300, 8))
self.beta = beta
def forward(self, x):
if not isinstance(x, torch.Tensor):
x = torch.from_numpy(x).float()
return torch.tanh(self.model(x))
def explore(self, x):
if not isinstance(x, torch.Tensor):
x = torch.from_numpy(x).float()
return self.forward(x) + self.beta * torch.randn(8)
class ValueNetwork(nn.Module):
def __init__(self):
super().__init__()
self.model = nn.Sequential(nn.Linear(119, 400), nn.ReLU(),
nn.Linear(400, 300), nn.ReLU(),
nn.Linear(300, 1))
def forward(self, x):
return self.model(x)
class Buffer():
def __init__(self, max_size, mini_batch):
self.buffer = []
self.max_size = max_size
self.batch_size = mini_batch
def add(self, x):
self.buffer.append(x)
if len(self.buffer) > self.max_size:
del self.buffer[0]
def mini_batch(self):
return random.sample(self.buffer, self.batch_size)
# This is messy
def extract(x):
f_tensor = []
for i in range(5):
list = [e[i] for e in x]
if isinstance(list[0], np.ndarray):
list = [torch.from_numpy(a).float() for a in list]
if i != 2 and i!=4:
tensor = torch.stack(tuple(b for b in list))
elif i == 4:
tensor = torch.Tensor(list).float().unsqueeze(1)
else:
tensor = torch.unsqueeze(torch.tensor(list).float(), 1)
f_tensor.append(tensor)
return f_tensor[0], f_tensor[1], f_tensor[2], f_tensor[3], f_tensor[4]
class TrainingLoop():
def __init__(self, policy_net, value_net, buffer, value_optim, policy_optim, episodes = 1000, gamma = 0.99, loss_fn = nn.MSELoss(), env=gym.make('Ant-v2'), k = 2, min_buffer_size = 1000, tau = 0.0005):
self.policy_net = policy_net
self.value_net = value_net
self.target_policy = copy.deepcopy(self.policy_net)
self.target_value = copy.deepcopy(self.value_net)
self.buffer = buffer
self.value_optim = value_optim
self.policy_optim = policy_optim
self.episodes = episodes
self.gamma = gamma
self.loss_fn = loss_fn
self.extract_fn = extract
self.episode_reward = 0
self.env = env
self.k = k
self.run = neptune.init(project = "jaltermain/DDPGAntv-2")
self.initial_obs = None
self.val_losses = []
self.pol_losses = []
self.min_buffer_size = min_buffer_size
self.done = False
self.steps = 0
self.tau = tau
def interact(self):
action = (self.policy_net.explore(self.initial_obs)).detach()
obs, reward, done, _ = self.env.step(action.numpy()) # make sure the done flag is not time out
self.episode_reward += reward
self.buffer.add((self.initial_obs, action, reward, obs, done)) # (s, a, r, s')
self.initial_obs = obs
self.done = done
def get_batch(self):
samples = self.buffer.mini_batch()
return self.extract_fn(samples)
def train_value(self):
initial_states, actions, rewards, states, finished = self.get_batch()
q_model = value(torch.cat((initial_states, actions), 1))
q_bellman = rewards + self.gamma * self.target_value(torch.cat((states, self.target_policy(states)),1)) * (1-finished)
loss_value = self.loss_fn(q_model, q_bellman.detach())
self.val_losses.append(loss_value.detach())
# run['train/value_loss'].log(loss_value)
loss_value.backward()
# print([i.grad for i in target_value.parameters()])
self.value_optim.step()
self.value_optim.zero_grad()
self.policy_optim.zero_grad()
def train_policy(self):
initial_states, actions, rewards, states, finished = self.get_batch()
loss_policy = -(self.value_net(torch.cat((initial_states, self.policy_net.explore(initial_states)),1)).mean())
self.pol_losses.append(loss_policy.detach())
# run['train/policy_loss'].log(loss_policy)
loss_policy.backward()
self.policy_optim.step()
self.value_optim.zero_grad()
self.policy_optim.zero_grad()
def polyak_averaging(self):
params1 = self.value_net.named_parameters()
params2 = self.target_value.named_parameters()
dict_params2 = dict(params2)
for name1, param1 in params1:
if name1 in dict_params2:
dict_params2[name1].data.copy_(self.tau*param1.data + (1-self.tau)*dict_params2[name1].data)
if self.steps % self.k == 0:
params1 = self.policy_net.named_parameters()
params2 = self.target_policy.named_parameters()
dict_params2 = dict(params2)
for name1, param1 in params1:
if name1 in dict_params2:
dict_params2[name1].data.copy_(self.tau*param1.data + (1-self.tau)*dict_params2[name1].data)
def init_log(self):
network_hyperparams = {'Optimizer': 'Adam','Value-learning_rate': 0.0001, 'Policy-learning_rate': 0.0001, 'loss_fn_value': 'MSE'}
self.run["network_hyperparameters"] = network_hyperparams
network_sizes = {'ValueNet_size': '(119,400,300,1)', 'PolicyNet_size': '(111,400,300,8)'}
self.run["network_sizes"] = network_sizes
buffer_params = {'buffer_maxsize': self.buffer.max_size, 'batch_size': self.buffer.batch_size, 'min_size_train': self.min_buffer_size}
self.run['buffer_parameters'] = buffer_params
policy_params = {'exploration_noise': policy.beta, 'policy_smoothing': policy.beta}
self.run['policy_parameters'] = policy_params
environment_params = {'gamma': self.gamma, 'env_name': 'Ant-v2', 'episodes': self.episodes}
self.run['environment_parameters'] = environment_params
self.run['environment_parameters'] = environment_params
def log(self):
self.run['train/episode_reward'].log(self.episode_reward)
if not self.val_losses:
self.run['train/mean_episodic_value_loss'].log(0)
self.run['train/mean_episodic_policy_loss'].log(0)
else:
mean_val_loss = sum(self.val_losses) / len(self.val_losses)
mean_pol_loss = sum(self.pol_losses) / len(self.pol_losses)
self.run['train/mean_episodic_value_loss'].log(mean_val_loss)
self.run['train/mean_episodic_policy_loss'].log(mean_pol_loss)
def evaluation_loop(self):
self.episode_reward = 0
self.done = False
self.initial_obs = self.env.reset()
while not self.done:
action = (self.policy_net(self.initial_obs)).detach()
obs, reward, done, _ = self.env.step(action.numpy()) # make sure the done flag is not time out
self.episode_reward += reward
self.initial_obs = obs
self.done = done
self.run['validation/episode_reward'].log(self.episode_reward)
def training_loop(self):
self.init_log()
for e in range(self.episodes):
self.episode_reward = 0
self.done = False
self.val_losses, self.pol_losses = [], []
self.initial_obs = self.env.reset()
while not self.done:
self.interact()
if len(self.buffer.buffer) > self.min_buffer_size:
self.train_value()
if self.steps % self.k == 0:
self.train_policy()
self.polyak_averaging()
self.steps += 1
self.log()
self.evaluation_loop()
self.run.stop()
policy = PolicyNetwork(0.1)
value = ValueNetwork()
buffer = Buffer(15000, 128)
value_optim = optim.Adam(value.parameters(), lr = 0.0001)
policy_optim = optim.Adam(policy.parameters(), lr = 0.0001)
training_loop = TrainingLoop(policy, value, buffer, value_optim, policy_optim)
training_loop.training_loop()
| [
"neptune.new.init"
] | [((1496, 1539), 'random.sample', 'random.sample', (['self.buffer', 'self.batch_size'], {}), '(self.buffer, self.batch_size)\n', (1509, 1539), False, 'import gym, torch, random, copy\n'), ((2259, 2271), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2269, 2271), True, 'import torch.nn as nn\n'), ((2277, 2295), 'gym.make', 'gym.make', (['"""Ant-v2"""'], {}), "('Ant-v2')\n", (2285, 2295), False, 'import gym, torch, random, copy\n'), ((2444, 2474), 'copy.deepcopy', 'copy.deepcopy', (['self.policy_net'], {}), '(self.policy_net)\n', (2457, 2474), False, 'import gym, torch, random, copy\n'), ((2503, 2532), 'copy.deepcopy', 'copy.deepcopy', (['self.value_net'], {}), '(self.value_net)\n', (2516, 2532), False, 'import gym, torch, random, copy\n'), ((2860, 2905), 'neptune.new.init', 'neptune.init', ([], {'project': '"""jaltermain/DDPGAntv-2"""'}), "(project='jaltermain/DDPGAntv-2')\n", (2872, 2905), True, 'import neptune.new as neptune\n'), ((325, 344), 'torch.nn.Linear', 'nn.Linear', (['(111)', '(400)'], {}), '(111, 400)\n', (334, 344), True, 'import torch.nn as nn\n'), ((346, 355), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (353, 355), True, 'import torch.nn as nn\n'), ((393, 412), 'torch.nn.Linear', 'nn.Linear', (['(400)', '(300)'], {}), '(400, 300)\n', (402, 412), True, 'import torch.nn as nn\n'), ((414, 423), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (421, 423), True, 'import torch.nn as nn\n'), ((461, 478), 'torch.nn.Linear', 'nn.Linear', (['(300)', '(8)'], {}), '(300, 8)\n', (470, 478), True, 'import torch.nn as nn\n'), ((954, 973), 'torch.nn.Linear', 'nn.Linear', (['(119)', '(400)'], {}), '(119, 400)\n', (963, 973), True, 'import torch.nn as nn\n'), ((975, 984), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (982, 984), True, 'import torch.nn as nn\n'), ((1022, 1041), 'torch.nn.Linear', 'nn.Linear', (['(400)', '(300)'], {}), '(400, 300)\n', (1031, 1041), True, 'import torch.nn as nn\n'), ((1043, 1052), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1050, 1052), True, 'import torch.nn as nn\n'), ((1090, 1107), 'torch.nn.Linear', 'nn.Linear', (['(300)', '(1)'], {}), '(300, 1)\n', (1099, 1107), True, 'import torch.nn as nn\n'), ((3735, 3774), 'torch.cat', 'torch.cat', (['(initial_states, actions)', '(1)'], {}), '((initial_states, actions), 1)\n', (3744, 3774), False, 'import gym, torch, random, copy\n'), ((820, 834), 'torch.randn', 'torch.randn', (['(8)'], {}), '(8)\n', (831, 834), False, 'import gym, torch, random, copy\n'), ((591, 610), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (607, 610), False, 'import gym, torch, random, copy\n'), ((747, 766), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (763, 766), False, 'import gym, torch, random, copy\n'), ((1714, 1733), 'torch.from_numpy', 'torch.from_numpy', (['a'], {}), '(a)\n', (1730, 1733), False, 'import gym, torch, random, copy\n'), ((1975, 1993), 'torch.tensor', 'torch.tensor', (['list'], {}), '(list)\n', (1987, 1993), False, 'import gym, torch, random, copy\n'), ((1884, 1902), 'torch.Tensor', 'torch.Tensor', (['list'], {}), '(list)\n', (1896, 1902), False, 'import gym, torch, random, copy\n')] |
#
# Copyright (c) 2020, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=redefined-outer-name
import os
import random
import string
import threading
import uuid
from random import randint
import pytest
import neptune.new.sync
from neptune.new.constants import OFFLINE_DIRECTORY
from neptune.new.exceptions import ProjectNotFound
from neptune.new.internal.backends.api_model import Project
from neptune.new.internal.container_type import ContainerType
from neptune.new.internal.containers.disk_queue import DiskQueue
from neptune.new.internal.operation import Operation
from neptune.new.internal.utils.sync_offset_file import SyncOffsetFile
from neptune.new.sync import (
ApiRun,
get_project,
get_qualified_name,
sync_all_runs,
sync_selected_runs,
synchronization_status,
)
def a_run():
return ApiRun(
str(uuid.uuid4()), "RUN-{}".format(randint(42, 12342)), "org", "proj", False
)
def a_project():
return ApiRun(
str(uuid.uuid4()),
"".join((random.choice(string.ascii_letters).upper() for _ in range(3))),
"org",
"proj",
False,
)
def generate_get_run_impl(registered_experiments):
def get_run_impl(run_id):
"""This function will return run as well as projects. Will be cleaned in ModelRegistry"""
for exp in registered_experiments:
if run_id in (str(exp.id), get_qualified_name(exp)):
return exp
return get_run_impl
def prepare_projects(path):
unsync_project = a_project()
sync_project = a_project()
registered_projects = (unsync_project, sync_project)
execution_id = "exec-0"
for project in registered_projects:
project_path = path / "async" / str(project.id) / execution_id
project_path.mkdir(parents=True)
queue = DiskQueue(
project_path,
lambda x: x,
lambda x: x,
threading.RLock(),
ContainerType.PROJECT,
)
queue.put("op-proj-0")
queue.put("op-proj-1")
SyncOffsetFile(
path / "async" / str(unsync_project.id) / execution_id / "last_ack_version"
).write(1)
SyncOffsetFile(
path / "async" / str(unsync_project.id) / execution_id / "last_put_version"
).write(2)
SyncOffsetFile(
path / "async" / str(sync_project.id) / execution_id / "last_ack_version"
).write(2)
SyncOffsetFile(
path / "async" / str(sync_project.id) / execution_id / "last_put_version"
).write(2)
return unsync_project, sync_project, generate_get_run_impl(registered_projects)
def prepare_runs(path):
unsync_exp = a_run()
sync_exp = a_run()
registered_runs = (unsync_exp, sync_exp)
execution_id = "exec-0"
for exp in registered_runs:
exp_path = path / "async" / str(exp.id) / execution_id
exp_path.mkdir(parents=True)
queue = DiskQueue(
exp_path, lambda x: x, lambda x: x, threading.RLock(), ContainerType.RUN
)
queue.put("op-0")
queue.put("op-1")
SyncOffsetFile(
path / "async" / str(unsync_exp.id) / execution_id / "last_ack_version"
).write(1)
SyncOffsetFile(
path / "async" / str(unsync_exp.id) / execution_id / "last_put_version"
).write(2)
SyncOffsetFile(
path / "async" / str(sync_exp.id) / execution_id / "last_ack_version"
).write(2)
SyncOffsetFile(
path / "async" / str(sync_exp.id) / execution_id / "last_put_version"
).write(2)
return unsync_exp, sync_exp, generate_get_run_impl(registered_runs)
def prepare_offline_run(path):
offline_exp_uuid = str(uuid.uuid4())
offline_exp_path = path / OFFLINE_DIRECTORY / offline_exp_uuid
offline_exp_path.mkdir(parents=True)
queue = DiskQueue(
offline_exp_path, lambda x: x, lambda x: x, threading.RLock(), ContainerType.RUN
)
queue.put("op-0")
queue.put("op-1")
SyncOffsetFile(
path / OFFLINE_DIRECTORY / offline_exp_uuid / "last_put_version"
).write(2)
return offline_exp_uuid
def test_list_projects(tmp_path, mocker, capsys):
"""TODO: we're mentioning projects as runs, will be improved with ModelRegistry"""
# given
unsync_proj, sync_proj, get_exp_impl = prepare_projects(tmp_path)
offline_exp_uuid = prepare_offline_run(tmp_path)
# and
mocker.patch.object(neptune.new.sync, "get_run", get_exp_impl)
mocker.patch.object(Operation, "from_dict")
# when
synchronization_status(tmp_path)
# then
captured = capsys.readouterr()
assert captured.err == ""
assert (
"Synchronized runs:\n- {}".format(get_qualified_name(sync_proj)) in captured.out
)
assert (
"Unsynchronized runs:\n- {}".format(get_qualified_name(unsync_proj))
in captured.out
)
assert (
"Unsynchronized offline runs:\n- offline/{}".format(offline_exp_uuid)
in captured.out
)
def test_list_runs(tmp_path, mocker, capsys):
# given
unsync_exp, sync_exp, get_run_impl = prepare_runs(tmp_path)
offline_exp_uuid = prepare_offline_run(tmp_path)
# and
mocker.patch.object(neptune.new.sync, "get_run", get_run_impl)
mocker.patch.object(Operation, "from_dict")
# when
synchronization_status(tmp_path)
# then
captured = capsys.readouterr()
assert captured.err == ""
assert (
"Synchronized runs:\n- {}".format(get_qualified_name(sync_exp)) in captured.out
)
assert (
"Unsynchronized runs:\n- {}".format(get_qualified_name(unsync_exp))
in captured.out
)
assert (
"Unsynchronized offline runs:\n- offline/{}".format(offline_exp_uuid)
in captured.out
)
def test_list_runs_when_no_run(tmp_path, capsys):
(tmp_path / "async").mkdir()
# when
with pytest.raises(SystemExit):
synchronization_status(tmp_path)
# then
captured = capsys.readouterr()
assert captured.err == ""
assert "There are no Neptune runs" in captured.out
def test_sync_all_runs(tmp_path, mocker, capsys):
# given
unsync_proj, sync_proj, _ = prepare_projects(tmp_path)
unsync_exp, sync_exp, _ = prepare_runs(tmp_path)
get_run_impl = generate_get_run_impl((unsync_proj, sync_proj, unsync_exp, sync_exp))
offline_exp_uuid = prepare_offline_run(tmp_path)
registered_offline_run = a_run()
# and
mocker.patch.object(neptune.new.sync, "get_run", get_run_impl)
mocker.patch.object(neptune.new.sync, "backend")
mocker.patch.object(neptune.new.sync.backend, "execute_operations")
mocker.patch.object(
neptune.new.sync.backend,
"get_project",
lambda _: Project(str(uuid.uuid4()), "project", "workspace"),
)
mocker.patch.object(
neptune.new.sync,
"register_offline_run",
lambda project, container_type: (registered_offline_run, True),
)
mocker.patch.object(Operation, "from_dict", lambda x: x)
neptune.new.sync.backend.execute_operations.return_value = (1, [])
# when
sync_all_runs(tmp_path, "foo")
# then
captured = capsys.readouterr()
assert captured.err == ""
assert (
"Offline run {} registered as {}".format(
offline_exp_uuid, get_qualified_name(registered_offline_run)
)
) in captured.out
assert "Synchronising {}".format(get_qualified_name(unsync_exp)) in captured.out
assert "Synchronising {}".format(get_qualified_name(unsync_proj)) in captured.out
assert (
"Synchronization of run {} completed.".format(get_qualified_name(unsync_exp))
in captured.out
)
assert (
"Synchronization of project {} completed.".format(
get_qualified_name(unsync_proj)
)
in captured.out
)
assert "Synchronising {}".format(get_qualified_name(sync_exp)) not in captured.out
assert "Synchronising {}".format(get_qualified_name(sync_proj)) not in captured.out
# and
# pylint: disable=no-member
neptune.new.sync.backend.execute_operations.has_calls(
[
mocker.call(unsync_exp.id, ContainerType.RUN, ["op-1"]),
mocker.call(registered_offline_run.id, ContainerType.RUN, ["op-1"]),
mocker.call(unsync_proj.id, ContainerType.PROJECT, ["op-proj-1"]),
],
any_order=True,
)
def test_sync_selected_runs(tmp_path, mocker, capsys):
# given
unsync_exp, sync_exp, get_run_impl = prepare_runs(tmp_path)
offline_exp_uuid = prepare_offline_run(tmp_path)
registered_offline_exp = a_run()
def get_run_impl_(run_id: str):
if run_id in (
str(registered_offline_exp.id),
get_qualified_name(registered_offline_exp),
):
return registered_offline_exp
else:
return get_run_impl(run_id)
# and
mocker.patch.object(neptune.new.sync, "get_run", get_run_impl_)
mocker.patch.object(neptune.new.sync, "backend")
mocker.patch.object(neptune.new.sync.backend, "execute_operations")
mocker.patch.object(
neptune.new.sync.backend,
"get_project",
lambda _: Project(str(uuid.uuid4()), "project", "workspace"),
)
mocker.patch.object(
neptune.new.sync,
"register_offline_run",
lambda project, container_type: (registered_offline_exp, True),
)
mocker.patch.object(Operation, "from_dict", lambda x: x)
neptune.new.sync.backend.execute_operations.return_value = (2, [])
# when
sync_selected_runs(
tmp_path,
"some-name",
[get_qualified_name(sync_exp), "offline/" + offline_exp_uuid],
)
# then
captured = capsys.readouterr()
assert captured.err == ""
assert "Synchronising {}".format(get_qualified_name(sync_exp)) in captured.out
assert (
"Synchronization of run {} completed.".format(get_qualified_name(sync_exp))
in captured.out
)
assert (
"Synchronising {}".format(get_qualified_name(registered_offline_exp))
in captured.out
)
assert (
"Synchronization of run {} completed.".format(
get_qualified_name(registered_offline_exp)
)
in captured.out
)
assert "Synchronising {}".format(get_qualified_name(unsync_exp)) not in captured.out
# and
# pylint: disable=no-member
neptune.new.sync.backend.execute_operations.assert_called_with(
registered_offline_exp.id, ContainerType.RUN, operations=["op-0", "op-1"]
)
def test_get_project_no_name_set(mocker):
# given
mocker.patch.object(os, "getenv")
os.getenv.return_value = None
# expect
assert get_project(None) is None
def test_get_project_project_not_found(mocker):
# given
mocker.patch.object(neptune.new.sync, "backend")
mocker.patch.object(neptune.new.sync.backend, "get_project")
neptune.new.sync.backend.get_project.side_effect = ProjectNotFound("foo")
# expect
assert get_project("foo") is None
def test_sync_non_existent_run(tmp_path, mocker, capsys):
# given
mocker.patch.object(neptune.new.sync, "get_project")
mocker.patch.object(neptune.new.sync, "get_run")
neptune.new.sync.get_run.return_value = a_run()
# when
sync_selected_runs(tmp_path, "foo", ["bar"])
# then
captured = capsys.readouterr()
assert "Warning: Run 'bar' does not exist in location" in captured.err
| [
"neptune.new.exceptions.ProjectNotFound",
"neptune.new.sync.get_project",
"neptune.new.sync.sync_all_runs",
"neptune.new.internal.utils.sync_offset_file.SyncOffsetFile",
"neptune.new.sync.get_qualified_name",
"neptune.new.sync.sync_selected_runs",
"neptune.new.sync.synchronization_status"
] | [((5024, 5056), 'neptune.new.sync.synchronization_status', 'synchronization_status', (['tmp_path'], {}), '(tmp_path)\n', (5046, 5056), False, 'from neptune.new.sync import ApiRun, get_project, get_qualified_name, sync_all_runs, sync_selected_runs, synchronization_status\n'), ((5802, 5834), 'neptune.new.sync.synchronization_status', 'synchronization_status', (['tmp_path'], {}), '(tmp_path)\n', (5824, 5834), False, 'from neptune.new.sync import ApiRun, get_project, get_qualified_name, sync_all_runs, sync_selected_runs, synchronization_status\n'), ((7589, 7619), 'neptune.new.sync.sync_all_runs', 'sync_all_runs', (['tmp_path', '"""foo"""'], {}), "(tmp_path, 'foo')\n", (7602, 7619), False, 'from neptune.new.sync import ApiRun, get_project, get_qualified_name, sync_all_runs, sync_selected_runs, synchronization_status\n'), ((11448, 11470), 'neptune.new.exceptions.ProjectNotFound', 'ProjectNotFound', (['"""foo"""'], {}), "('foo')\n", (11463, 11470), False, 'from neptune.new.exceptions import ProjectNotFound\n'), ((11773, 11817), 'neptune.new.sync.sync_selected_runs', 'sync_selected_runs', (['tmp_path', '"""foo"""', "['bar']"], {}), "(tmp_path, 'foo', ['bar'])\n", (11791, 11817), False, 'from neptune.new.sync import ApiRun, get_project, get_qualified_name, sync_all_runs, sync_selected_runs, synchronization_status\n'), ((4186, 4198), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4196, 4198), False, 'import uuid\n'), ((4384, 4401), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (4399, 4401), False, 'import threading\n'), ((6364, 6389), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (6377, 6389), False, 'import pytest\n'), ((6399, 6431), 'neptune.new.sync.synchronization_status', 'synchronization_status', (['tmp_path'], {}), '(tmp_path)\n', (6421, 6431), False, 'from neptune.new.sync import ApiRun, get_project, get_qualified_name, sync_all_runs, sync_selected_runs, synchronization_status\n'), ((11187, 11204), 'neptune.new.sync.get_project', 'get_project', (['None'], {}), '(None)\n', (11198, 11204), False, 'from neptune.new.sync import ApiRun, get_project, get_qualified_name, sync_all_runs, sync_selected_runs, synchronization_status\n'), ((11496, 11514), 'neptune.new.sync.get_project', 'get_project', (['"""foo"""'], {}), "('foo')\n", (11507, 11514), False, 'from neptune.new.sync import ApiRun, get_project, get_qualified_name, sync_all_runs, sync_selected_runs, synchronization_status\n'), ((1388, 1400), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1398, 1400), False, 'import uuid\n'), ((1419, 1437), 'random.randint', 'randint', (['(42)', '(12342)'], {}), '(42, 12342)\n', (1426, 1437), False, 'from random import randint\n'), ((1517, 1529), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1527, 1529), False, 'import uuid\n'), ((2455, 2472), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (2470, 2472), False, 'import threading\n'), ((3496, 3513), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (3511, 3513), False, 'import threading\n'), ((4475, 4560), 'neptune.new.internal.utils.sync_offset_file.SyncOffsetFile', 'SyncOffsetFile', (["(path / OFFLINE_DIRECTORY / offline_exp_uuid / 'last_put_version')"], {}), "(path / OFFLINE_DIRECTORY / offline_exp_uuid / 'last_put_version'\n )\n", (4489, 4560), False, 'from neptune.new.internal.utils.sync_offset_file import SyncOffsetFile\n'), ((5189, 5218), 'neptune.new.sync.get_qualified_name', 'get_qualified_name', (['sync_proj'], {}), '(sync_proj)\n', (5207, 5218), False, 'from neptune.new.sync import ApiRun, get_project, get_qualified_name, sync_all_runs, sync_selected_runs, synchronization_status\n'), ((5299, 5330), 'neptune.new.sync.get_qualified_name', 'get_qualified_name', (['unsync_proj'], {}), '(unsync_proj)\n', (5317, 5330), False, 'from neptune.new.sync import ApiRun, get_project, get_qualified_name, sync_all_runs, sync_selected_runs, synchronization_status\n'), ((5967, 5995), 'neptune.new.sync.get_qualified_name', 'get_qualified_name', (['sync_exp'], {}), '(sync_exp)\n', (5985, 5995), False, 'from neptune.new.sync import ApiRun, get_project, get_qualified_name, sync_all_runs, sync_selected_runs, synchronization_status\n'), ((6076, 6106), 'neptune.new.sync.get_qualified_name', 'get_qualified_name', (['unsync_exp'], {}), '(unsync_exp)\n', (6094, 6106), False, 'from neptune.new.sync import ApiRun, get_project, get_qualified_name, sync_all_runs, sync_selected_runs, synchronization_status\n'), ((7790, 7832), 'neptune.new.sync.get_qualified_name', 'get_qualified_name', (['registered_offline_run'], {}), '(registered_offline_run)\n', (7808, 7832), False, 'from neptune.new.sync import ApiRun, get_project, get_qualified_name, sync_all_runs, sync_selected_runs, synchronization_status\n'), ((7902, 7932), 'neptune.new.sync.get_qualified_name', 'get_qualified_name', (['unsync_exp'], {}), '(unsync_exp)\n', (7920, 7932), False, 'from neptune.new.sync import ApiRun, get_project, get_qualified_name, sync_all_runs, sync_selected_runs, synchronization_status\n'), ((7987, 8018), 'neptune.new.sync.get_qualified_name', 'get_qualified_name', (['unsync_proj'], {}), '(unsync_proj)\n', (8005, 8018), False, 'from neptune.new.sync import ApiRun, get_project, get_qualified_name, sync_all_runs, sync_selected_runs, synchronization_status\n'), ((8103, 8133), 'neptune.new.sync.get_qualified_name', 'get_qualified_name', (['unsync_exp'], {}), '(unsync_exp)\n', (8121, 8133), False, 'from neptune.new.sync import ApiRun, get_project, get_qualified_name, sync_all_runs, sync_selected_runs, synchronization_status\n'), ((8249, 8280), 'neptune.new.sync.get_qualified_name', 'get_qualified_name', (['unsync_proj'], {}), '(unsync_proj)\n', (8267, 8280), False, 'from neptune.new.sync import ApiRun, get_project, get_qualified_name, sync_all_runs, sync_selected_runs, synchronization_status\n'), ((8358, 8386), 'neptune.new.sync.get_qualified_name', 'get_qualified_name', (['sync_exp'], {}), '(sync_exp)\n', (8376, 8386), False, 'from neptune.new.sync import ApiRun, get_project, get_qualified_name, sync_all_runs, sync_selected_runs, synchronization_status\n'), ((8445, 8474), 'neptune.new.sync.get_qualified_name', 'get_qualified_name', (['sync_proj'], {}), '(sync_proj)\n', (8463, 8474), False, 'from neptune.new.sync import ApiRun, get_project, get_qualified_name, sync_all_runs, sync_selected_runs, synchronization_status\n'), ((10107, 10135), 'neptune.new.sync.get_qualified_name', 'get_qualified_name', (['sync_exp'], {}), '(sync_exp)\n', (10125, 10135), False, 'from neptune.new.sync import ApiRun, get_project, get_qualified_name, sync_all_runs, sync_selected_runs, synchronization_status\n'), ((10289, 10317), 'neptune.new.sync.get_qualified_name', 'get_qualified_name', (['sync_exp'], {}), '(sync_exp)\n', (10307, 10317), False, 'from neptune.new.sync import ApiRun, get_project, get_qualified_name, sync_all_runs, sync_selected_runs, synchronization_status\n'), ((10402, 10430), 'neptune.new.sync.get_qualified_name', 'get_qualified_name', (['sync_exp'], {}), '(sync_exp)\n', (10420, 10430), False, 'from neptune.new.sync import ApiRun, get_project, get_qualified_name, sync_all_runs, sync_selected_runs, synchronization_status\n'), ((10509, 10551), 'neptune.new.sync.get_qualified_name', 'get_qualified_name', (['registered_offline_exp'], {}), '(registered_offline_exp)\n', (10527, 10551), False, 'from neptune.new.sync import ApiRun, get_project, get_qualified_name, sync_all_runs, sync_selected_runs, synchronization_status\n'), ((10663, 10705), 'neptune.new.sync.get_qualified_name', 'get_qualified_name', (['registered_offline_exp'], {}), '(registered_offline_exp)\n', (10681, 10705), False, 'from neptune.new.sync import ApiRun, get_project, get_qualified_name, sync_all_runs, sync_selected_runs, synchronization_status\n'), ((10783, 10813), 'neptune.new.sync.get_qualified_name', 'get_qualified_name', (['unsync_exp'], {}), '(unsync_exp)\n', (10801, 10813), False, 'from neptune.new.sync import ApiRun, get_project, get_qualified_name, sync_all_runs, sync_selected_runs, synchronization_status\n'), ((9217, 9259), 'neptune.new.sync.get_qualified_name', 'get_qualified_name', (['registered_offline_exp'], {}), '(registered_offline_exp)\n', (9235, 9259), False, 'from neptune.new.sync import ApiRun, get_project, get_qualified_name, sync_all_runs, sync_selected_runs, synchronization_status\n'), ((1929, 1952), 'neptune.new.sync.get_qualified_name', 'get_qualified_name', (['exp'], {}), '(exp)\n', (1947, 1952), False, 'from neptune.new.sync import ApiRun, get_project, get_qualified_name, sync_all_runs, sync_selected_runs, synchronization_status\n'), ((7234, 7246), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (7244, 7246), False, 'import uuid\n'), ((9684, 9696), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (9694, 9696), False, 'import uuid\n'), ((1549, 1584), 'random.choice', 'random.choice', (['string.ascii_letters'], {}), '(string.ascii_letters)\n', (1562, 1584), False, 'import random\n')] |
import neptune
# The init() function called this way assumes that
# NEPTUNE_API_TOKEN environment variable is defined.
neptune.init('zackpashkin/sandbox')
PARAMS = {'decay_factor' : 0.5,
'n_iterations' : 117}
neptune.create_experiment(name='minimal_example',params=PARAMS)
# log some metrics
for i in range(100):
neptune.log_metric('loss', 0.95**i)
neptune.log_metric('AUC', 0.96) | [
"neptune.create_experiment",
"neptune.init",
"neptune.log_metric"
] | [((121, 156), 'neptune.init', 'neptune.init', (['"""zackpashkin/sandbox"""'], {}), "('zackpashkin/sandbox')\n", (133, 156), False, 'import neptune\n'), ((223, 287), 'neptune.create_experiment', 'neptune.create_experiment', ([], {'name': '"""minimal_example"""', 'params': 'PARAMS'}), "(name='minimal_example', params=PARAMS)\n", (248, 287), False, 'import neptune\n'), ((370, 401), 'neptune.log_metric', 'neptune.log_metric', (['"""AUC"""', '(0.96)'], {}), "('AUC', 0.96)\n", (388, 401), False, 'import neptune\n'), ((333, 370), 'neptune.log_metric', 'neptune.log_metric', (['"""loss"""', '(0.95 ** i)'], {}), "('loss', 0.95 ** i)\n", (351, 370), False, 'import neptune\n')] |
import requests
import pytest
from neptune.neptune_api import NeptuneService
from neptune.tests.conftest import get_server_addr
@pytest.mark.fpgas(1)
def test_coco(request):
"""
Check the coco service from Neptune with a known image
Args:
request (fixture): get the cmdline options
"""
server_addr = get_server_addr(request.config)
# create Neptune service and start it
service = NeptuneService(server_addr, 'coco')
service.start()
# submit job to service
post_data = {
'url': 'http://farm1.staticflickr.com/26/50531313_4422f0787e_z.jpg',
'dtype': 'uint8'
}
r = requests.post('%s/serve/coco' % server_addr, post_data)
assert r.status_code == 200, r.text
response = r.json()
assert type(response) is dict
# for this known image, validate the expected response
for i, j in zip(response['resized_shape'], [149, 224, 3]):
assert i == j
assert 'img' in response
assert response['url'] == post_data['url']
assert len(response['boxes']) == 2
tolerance = 5
for i, j in zip(response['boxes'][0], [85, 18, 149, 118, "giraffe"]):
if isinstance(j, int):
assert j - tolerance <= i <= j + tolerance
else:
assert i == j
for i, j in zip(response['boxes'][1], [21, 90, 65, 148, "zebra"]):
if isinstance(j, int):
assert j - tolerance <= i <= j + tolerance
else:
assert i == j
service.stop()
| [
"neptune.neptune_api.NeptuneService",
"neptune.tests.conftest.get_server_addr"
] | [((131, 151), 'pytest.mark.fpgas', 'pytest.mark.fpgas', (['(1)'], {}), '(1)\n', (148, 151), False, 'import pytest\n'), ((331, 362), 'neptune.tests.conftest.get_server_addr', 'get_server_addr', (['request.config'], {}), '(request.config)\n', (346, 362), False, 'from neptune.tests.conftest import get_server_addr\n'), ((420, 455), 'neptune.neptune_api.NeptuneService', 'NeptuneService', (['server_addr', '"""coco"""'], {}), "(server_addr, 'coco')\n", (434, 455), False, 'from neptune.neptune_api import NeptuneService\n'), ((639, 694), 'requests.post', 'requests.post', (["('%s/serve/coco' % server_addr)", 'post_data'], {}), "('%s/serve/coco' % server_addr, post_data)\n", (652, 694), False, 'import requests\n')] |
"""
Script by <NAME>, November 2020
Used to finetune models trained on pathology images (pre-chunked) on External Images
"""
import numpy as np
import tables
import pickle
import neptune
from neptunecontrib.monitoring.keras import NeptuneMonitor
import collections
from sklearn.utils import class_weight
import cv2
from scipy import ndimage, misc
from tensorflow.keras.models import load_model
import os
import random
from keras.models import Model
from keras.layers import Dense, Conv2D, Flatten, LSTM, Activation, Masking, Dropout, GlobalAveragePooling2D
import tensorflow as tf
import numpy as np
from keras.optimizers import RMSprop
from keras import backend as k
from sklearn.preprocessing import normalize
from keras.utils import np_utils
from keras import regularizers
from keras.applications.inception_v3 import InceptionV3
from keras.preprocessing import image
from keras import backend as K
from keras.callbacks import CSVLogger
from keras.optimizers import *
from keras import losses
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.inception_v3 import preprocess_input
import keras
from keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import SGD
from keras.callbacks import ModelCheckpoint
#Set up GPU to use for fine tuning
os.environ["CUDA_VISIBLE_DEVICES"]="2"
#Set up Neptune API to use for tracking training progress
neptune_api_key = "Neptune API Key"
neptune.init(api_token = neptune_api_key, project_qualified_name='yashaektefaie/benignmodel')
def flatten_dict(d, parent_key='', sep='.'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
#print(dict(items))
return dict(items)
#Identify and open HDF5 file containing data to finetune with
hdf5_path = "/home/ye12/benign_model/benigntumorHDF5.hdf5"
hdf5_file = tables.open_file(hdf5_path, mode='r')
csv_logger = CSVLogger('benign_model_log.tsv', append=True, separator='\t')
# Identify and open model to fine tune
path_to_model = "model to finetune"
model = load_model(path_to_model, compile=True)
#Unfreeze specific layers of model to finetune
for layer in model.layers[249:]:
layer.trainable=True
# Recompile model with low learning rate and SGD optimizer foor finetuning
lr = 0.00001
model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss="binary_crossentropy",metrics=['accuracy'])
#Regular Batch Generating Function, used to load data unmodified
def imageLoader(img, labels, batch_size, validation=0):
datasetLength = labels.shape[0]
while True:
batch_start = 0
batch_end = batch_size
while batch_start < datasetLength:
limit = min(batch_end, datasetLength)
X = img[batch_start:limit]
if validation:
new_X = []
for i in X:
i = cv2.resize(cv2.resize(i, (50, 50)), (224,224))
new_X.append(preprocess_input(i))
Y = np.array([[np.float32(i)] for i in labels[batch_start:limit]])
yield (np.array(new_X),Y)
else:
yield (X, labels[batch_start:limit])
batch_start += batch_size
batch_end += batch_size
#Modified Batch Generating Function, used to load a proportion of External Data interspersed with training data to protect against catastrophic forgetting
def imageLoader_modified(batch_size):
datasetLength = hdf5_file.root.exttest_labels.shape[0]*0.40
swap = 0
while True:
batch_start = 0
batch_end = batch_size
while batch_start < datasetLength:
limit = min(batch_end, datasetLength)
if swap%5 == 0:
X = hdf5_file.root.train_img_modified[batch_start:limit]
Y = hdf5_file.root.train_labels_modified[batch_start:limit]
else:
if swap%2 == 0:
X = hdf5_file.root.exttest_img[batch_start:limit]
Y = hdf5_file.root.exttest_labels[batch_start:limit]
else:
X = hdf5_file.root.exttest_img[batch_start+100000:limit+100000]
Y = hdf5_file.root.exttest_labels[batch_start+100000:limit+100000]
new_X = []
for i in X:
i = cv2.resize(cv2.resize(i, (50, 50)), (224,224))
new_X.append(preprocess_input(i))
yield (np.array(new_X),Y)
swap += 1
batch_start += batch_size
batch_end += batch_size
#Batch Size and Neptune Experiment Setup
batch_size = 32
train_settings = {'epochs': 50, 'batch_size': batch_size}
neptune_kwargs={}
exp = neptune.create_experiment(params = flatten_dict({'compile_options': compile_kwargs,
'train_settings': {'learning_rate':lr,
**train_settings}}), **neptune_kwargs)
#Setup Checkpoint to save best model iteration
checkpoint = ModelCheckpoint("Model Checkpoint Name", monitor='val_loss', verbose=1,
save_best_only=True, mode='auto', period=1)
#Start FineTuning The Model
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto')
hist = model.fit(
x=imageLoader_modified(batch_size),
steps_per_epoch=hdf5_file.root.exttest_img.shape[0]*0.4 // batch_size,
epochs=50,
validation_data=imageLoader(hdf5_file.root.val_img,hdf5_file.root.val_labels,batch_size, 1),
validation_steps=hdf5_file.root.val_img.shape[0] // batch_size,
callbacks=[early_stopping, csv_logger, NeptuneMonitor(exp), checkpoint])
model.save('Final Model Name')
| [
"neptune.init"
] | [((1446, 1542), 'neptune.init', 'neptune.init', ([], {'api_token': 'neptune_api_key', 'project_qualified_name': '"""yashaektefaie/benignmodel"""'}), "(api_token=neptune_api_key, project_qualified_name=\n 'yashaektefaie/benignmodel')\n", (1458, 1542), False, 'import neptune\n'), ((2046, 2083), 'tables.open_file', 'tables.open_file', (['hdf5_path'], {'mode': '"""r"""'}), "(hdf5_path, mode='r')\n", (2062, 2083), False, 'import tables\n'), ((2099, 2161), 'keras.callbacks.CSVLogger', 'CSVLogger', (['"""benign_model_log.tsv"""'], {'append': '(True)', 'separator': '"""\t"""'}), "('benign_model_log.tsv', append=True, separator='\\t')\n", (2108, 2161), False, 'from keras.callbacks import CSVLogger\n'), ((2246, 2285), 'tensorflow.keras.models.load_model', 'load_model', (['path_to_model'], {'compile': '(True)'}), '(path_to_model, compile=True)\n', (2256, 2285), False, 'from tensorflow.keras.models import load_model\n'), ((5214, 5333), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""Model Checkpoint Name"""'], {'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""auto"""', 'period': '(1)'}), "('Model Checkpoint Name', monitor='val_loss', verbose=1,\n save_best_only=True, mode='auto', period=1)\n", (5229, 5333), False, 'from keras.callbacks import ModelCheckpoint\n'), ((5380, 5486), 'tensorflow.keras.callbacks.EarlyStopping', 'tf.keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0)', 'patience': '(5)', 'verbose': '(0)', 'mode': '"""auto"""'}), "(monitor='val_loss', min_delta=0, patience=\n 5, verbose=0, mode='auto')\n", (5412, 5486), True, 'import tensorflow as tf\n'), ((2505, 2533), 'tensorflow.keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.0001)', 'momentum': '(0.9)'}), '(lr=0.0001, momentum=0.9)\n', (2508, 2533), False, 'from tensorflow.keras.optimizers import SGD\n'), ((5841, 5860), 'neptunecontrib.monitoring.keras.NeptuneMonitor', 'NeptuneMonitor', (['exp'], {}), '(exp)\n', (5855, 5860), False, 'from neptunecontrib.monitoring.keras import NeptuneMonitor\n'), ((4503, 4526), 'cv2.resize', 'cv2.resize', (['i', '(50, 50)'], {}), '(i, (50, 50))\n', (4513, 4526), False, 'import cv2\n'), ((4568, 4587), 'keras.applications.inception_v3.preprocess_input', 'preprocess_input', (['i'], {}), '(i)\n', (4584, 4587), False, 'from keras.applications.inception_v3 import preprocess_input\n'), ((4608, 4623), 'numpy.array', 'np.array', (['new_X'], {}), '(new_X)\n', (4616, 4623), True, 'import numpy as np\n'), ((3063, 3086), 'cv2.resize', 'cv2.resize', (['i', '(50, 50)'], {}), '(i, (50, 50))\n', (3073, 3086), False, 'import cv2\n'), ((3132, 3151), 'keras.applications.inception_v3.preprocess_input', 'preprocess_input', (['i'], {}), '(i)\n', (3148, 3151), False, 'from keras.applications.inception_v3 import preprocess_input\n'), ((3260, 3275), 'numpy.array', 'np.array', (['new_X'], {}), '(new_X)\n', (3268, 3275), True, 'import numpy as np\n'), ((3184, 3197), 'numpy.float32', 'np.float32', (['i'], {}), '(i)\n', (3194, 3197), True, 'import numpy as np\n')] |
import warnings
from typing import Callable, Sequence, Union
import joblib
import neptune.new as neptune
import neptune.new.integrations.optuna as optuna_utils
import optuna
import pandas as pd
from optuna.samplers import TPESampler
from optuna.study import Study
from optuna.trial import FrozenTrial, Trial
from sklearn.metrics import mean_absolute_error
from xgboost import XGBRegressor
warnings.filterwarnings("ignore")
class BayesianOptimizer:
def __init__(
self, objective_function: Callable[[Trial], Union[float, Sequence[float]]]
):
self.objective_function = objective_function
def bulid_study(
self,
trials: FrozenTrial,
name: str,
liar: bool = False,
verbose: bool = True,
):
if liar:
run = neptune.init(project="ds-wook/predict-meals")
neptune_callback = optuna_utils.NeptuneCallback(
run, plots_update_freq=1, log_plot_slice=False, log_plot_contour=False
)
sampler = TPESampler(
seed=42,
constant_liar=True,
multivariate=True,
group=True,
n_startup_trials=20,
)
study = optuna.create_study(
study_name=name, direction="minimize", sampler=sampler
)
study.optimize(
self.objective_function, n_trials=trials, callbacks=[neptune_callback]
)
run.stop()
else:
run = neptune.init(project="ds-wook/predict-meals")
neptune_callback = optuna_utils.NeptuneCallback(
run, plots_update_freq=1, log_plot_slice=False, log_plot_contour=False
)
sampler = TPESampler(seed=42)
study = optuna.create_study(
study_name=name, direction="minimize", sampler=sampler
)
study.optimize(
self.objective_function, n_trials=trials, callbacks=[neptune_callback]
)
run.stop()
if verbose:
self.display_study_statistics(study)
return study
@staticmethod
def display_study_statistics(study: Study):
print("Best trial:")
trial = study.best_trial
print(" Value: ", trial.value)
print(" Params: ")
for key, value in trial.params.items():
print(f" '{key}': {value},")
@staticmethod
def xgb_lunch_save_params(study: Study, params_name: str):
params = study.best_trial.params
params["random_state"] = 42
params["n_estimators"] = 10000
params["learning_rate"] = 0.02
params["eval_metric"] = "mae"
joblib.dump(params, "../../parameters/" + params_name)
@staticmethod
def xgb_dinner_save_params(study: Study, params_name: str):
params = study.best_trial.params
params["random_state"] = 42
params["n_estimators"] = 10000
params["eval_metric"] = "mae"
joblib.dump(params, "../../parameters/" + params_name)
def xgb_lunch_objective(
trial: FrozenTrial,
x_train: pd.DataFrame,
y_train: pd.DataFrame,
x_valid: pd.DataFrame,
y_valid: pd.DataFrame,
verbose: Union[int, bool],
) -> float:
param = {
"lambda": trial.suggest_loguniform("lambda", 1e-03, 1e-01),
"subsample": trial.suggest_float("subsample", 0.5, 1),
"max_depth": trial.suggest_int("max_depth", 3, 20),
"min_child_weight": trial.suggest_int("min_child_weight", 1, 300),
"random_state": 42,
"learning_rate": 0.02,
"n_estimators": 10000,
"eval_metric": "mae",
}
model = XGBRegressor(**param)
model.fit(
x_train,
y_train,
eval_set=[(x_train, y_train), (x_valid, y_valid)],
early_stopping_rounds=100,
verbose=verbose,
)
preds = model.predict(x_valid)
mae = mean_absolute_error(y_valid, preds)
return mae
def xgb_dinner_objective(
trial: FrozenTrial,
x_train: pd.DataFrame,
y_train: pd.DataFrame,
x_valid: pd.DataFrame,
y_valid: pd.DataFrame,
verbose: Union[int, bool],
) -> float:
param = {
"colsample_bytree": trial.suggest_float("colsample_bytree", 0.5, 1),
"subsample": trial.suggest_float("subsample", 0.5, 1),
"learning_rate": trial.suggest_float("learning_rate", 1e-02, 1e-01),
"n_estimators": 10000,
"max_depth": trial.suggest_int("max_depth", 3, 20),
"random_state": 42,
"eval_metric": "mae",
"min_child_weight": trial.suggest_int("min_child_weight", 1, 300),
}
model = XGBRegressor(**param)
model.fit(
x_train,
y_train,
eval_set=[(x_train, y_train), (x_valid, y_valid)],
early_stopping_rounds=100,
verbose=verbose,
)
preds = model.predict(x_valid)
mae = mean_absolute_error(y_valid, preds)
return mae
| [
"neptune.new.init",
"neptune.new.integrations.optuna.NeptuneCallback"
] | [((391, 424), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (414, 424), False, 'import warnings\n'), ((3692, 3713), 'xgboost.XGBRegressor', 'XGBRegressor', ([], {}), '(**param)\n', (3704, 3713), False, 'from xgboost import XGBRegressor\n'), ((3936, 3971), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y_valid', 'preds'], {}), '(y_valid, preds)\n', (3955, 3971), False, 'from sklearn.metrics import mean_absolute_error\n'), ((4665, 4686), 'xgboost.XGBRegressor', 'XGBRegressor', ([], {}), '(**param)\n', (4677, 4686), False, 'from xgboost import XGBRegressor\n'), ((4907, 4942), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y_valid', 'preds'], {}), '(y_valid, preds)\n', (4926, 4942), False, 'from sklearn.metrics import mean_absolute_error\n'), ((2717, 2771), 'joblib.dump', 'joblib.dump', (['params', "('../../parameters/' + params_name)"], {}), "(params, '../../parameters/' + params_name)\n", (2728, 2771), False, 'import joblib\n'), ((3017, 3071), 'joblib.dump', 'joblib.dump', (['params', "('../../parameters/' + params_name)"], {}), "(params, '../../parameters/' + params_name)\n", (3028, 3071), False, 'import joblib\n'), ((797, 842), 'neptune.new.init', 'neptune.init', ([], {'project': '"""ds-wook/predict-meals"""'}), "(project='ds-wook/predict-meals')\n", (809, 842), True, 'import neptune.new as neptune\n'), ((874, 978), 'neptune.new.integrations.optuna.NeptuneCallback', 'optuna_utils.NeptuneCallback', (['run'], {'plots_update_freq': '(1)', 'log_plot_slice': '(False)', 'log_plot_contour': '(False)'}), '(run, plots_update_freq=1, log_plot_slice=False,\n log_plot_contour=False)\n', (902, 978), True, 'import neptune.new.integrations.optuna as optuna_utils\n'), ((1027, 1122), 'optuna.samplers.TPESampler', 'TPESampler', ([], {'seed': '(42)', 'constant_liar': '(True)', 'multivariate': '(True)', 'group': '(True)', 'n_startup_trials': '(20)'}), '(seed=42, constant_liar=True, multivariate=True, group=True,\n n_startup_trials=20)\n', (1037, 1122), False, 'from optuna.samplers import TPESampler\n'), ((1234, 1309), 'optuna.create_study', 'optuna.create_study', ([], {'study_name': 'name', 'direction': '"""minimize"""', 'sampler': 'sampler'}), "(study_name=name, direction='minimize', sampler=sampler)\n", (1253, 1309), False, 'import optuna\n'), ((1525, 1570), 'neptune.new.init', 'neptune.init', ([], {'project': '"""ds-wook/predict-meals"""'}), "(project='ds-wook/predict-meals')\n", (1537, 1570), True, 'import neptune.new as neptune\n'), ((1602, 1706), 'neptune.new.integrations.optuna.NeptuneCallback', 'optuna_utils.NeptuneCallback', (['run'], {'plots_update_freq': '(1)', 'log_plot_slice': '(False)', 'log_plot_contour': '(False)'}), '(run, plots_update_freq=1, log_plot_slice=False,\n log_plot_contour=False)\n', (1630, 1706), True, 'import neptune.new.integrations.optuna as optuna_utils\n'), ((1755, 1774), 'optuna.samplers.TPESampler', 'TPESampler', ([], {'seed': '(42)'}), '(seed=42)\n', (1765, 1774), False, 'from optuna.samplers import TPESampler\n'), ((1795, 1870), 'optuna.create_study', 'optuna.create_study', ([], {'study_name': 'name', 'direction': '"""minimize"""', 'sampler': 'sampler'}), "(study_name=name, direction='minimize', sampler=sampler)\n", (1814, 1870), False, 'import optuna\n')] |
import neptune
def _update_keys(d, prefix):
keys = list(d.keys())
for k in keys:
d['{}_{}'.format(prefix, k)] = d.pop(k)
class NeptuneWriter:
def __init__(self, proj_name):
self.project = neptune.init(proj_name)
self.has_started = False
def start(self, args, **kwargs):
self.experiment = self.project.create_experiment(
name=args['experiment_name'], params=args, **kwargs)
self.has_started = True
def fin(self):
if self.has_started:
# will finish when all data has been sent
self.experiment.stop()
self.has_started = False
def write(self, data, step):
if self.has_started:
for k in data.keys():
self.experiment.log_metric(k, step, data[k])
else:
print('Warning: Writing to dead writer - call .start({}) first')
def id(self):
return self.experiment.id | [
"neptune.init"
] | [((220, 243), 'neptune.init', 'neptune.init', (['proj_name'], {}), '(proj_name)\n', (232, 243), False, 'import neptune\n')] |
import neptune.new as neptune
import os
import torch.nn as nn
import torch
import torch.nn.functional as F
from torch.optim import SGD, Adam
from torch.utils.data import DataLoader, random_split
from torch.optim.lr_scheduler import CyclicLR
import torch.multiprocessing as mp
import numpy as np
import random
import math
import sys
sys.path.append("..") # adds higher directory to python modules path
from LoaderPACK.Unet_leaky import Unet_leaky, Unet_leaky_lstm
from LoaderPACK.Loader import shuffle_5min
from LoaderPACK.trainer import net_train
from LoaderPACK.Accuarcy_finder import Accuarcy_find
from LoaderPACK.Accuarcy_upload import Accuarcy_upload
from multiprocessing import Process
try:
mp.set_start_method('spawn')
except RuntimeError:
pass
def net_SGD(device, fl, it, train_path, val_path):
token = os.getenv('Neptune_api')
run = neptune.init(
project="NTLAB/artifact-rej-scalp",
api_token=token,
)
net_name = "network_SGD"
batch_size = 10
n_samples = 17 # the defualt amount of samples minus 1
train_load_file = shuffle_5min(path = train_path,
series_dict = 'train_series_length.pickle',
size = (195, 22, 2060000),
device = device,
length = n_samples)
train_loader = torch.utils.data.DataLoader(train_load_file,
batch_size=batch_size,
shuffle=True,
num_workers=0)
val_load_file = shuffle_5min(path = val_path,
series_dict = 'val_series_length.pickle',
size = (28, 22, 549200),
device = device,
seed = 42)
val_loader = torch.utils.data.DataLoader(val_load_file,
batch_size=batch_size,
shuffle=False,
num_workers=0,
drop_last=True)
nEpoch = 100
base_lr = 0.216 # where we start the learning rate (min point)
max_lr = 0.268 # where the learning rate is at the max point
weight_decay = 0
step_size_up = (n_samples/batch_size)*5
model = Unet_leaky_lstm(n_channels=1, batch_size=batch_size, \
device=device).to(device)
optimizer = SGD(model.parameters(), lr=base_lr)
lossFunc = nn.CrossEntropyLoss(weight = torch.tensor([1., 5.]).to(device),
reduction = "mean")
scheduler = CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr,
step_size_up=step_size_up,
cycle_momentum=True, base_momentum=0.8,
max_momentum=0.9)
smooth = 0.05
params = {"optimizer":"SGD", "batch_size":batch_size,
"optimizer_learning_rate": base_lr,
"loss_function":"CrossEntropyLoss",
"loss_function_weights":[1, 5],
"loss_function_reduction":"mean",
"model":"Unet_leaky_lstm", "scheduler":"CyclicLR",
"scheduler_base_lr":base_lr, "scheduler_max_lr":max_lr,
"scheduler_cycle_momentum":True,
"base_momentum":0.8, "max_momentum":0.9,
"scheduler_step_size_up":step_size_up,
"smooting_loss":smooth}
run[f"{net_name}/parameters"] = params
net_train(device = device,
fl = fl, it = it,
net_name = net_name,
model = model,
optimizer = optimizer,
lossFunc = lossFunc,
nEpoch = nEpoch,
smooth = smooth,
train_loader = train_loader,
val_loader = val_loader,
run = run,
path = "C:/Users/Marc/Desktop/network/",
scheduler = scheduler)
def net_ADAM(device, fl, it, train_path, val_path):
token = os.getenv('Neptune_api')
run = neptune.init(
project="NTLAB/artifact-rej-scalp",
api_token=token,
)
net_name = "network_ADAM"
batch_size = 10
n_samples = 11141 - 1 # the defualt amount of samples minus 1
train_load_file = shuffle_5min(path = train_path,
series_dict = 'train_series_length.pickle',
size = (195, 22, 2060000),
device = device,
length = n_samples)
train_loader = torch.utils.data.DataLoader(train_load_file,
batch_size=batch_size,
shuffle=True,
num_workers=0,
drop_last=True)
val_load_file = shuffle_5min(path = val_path,
series_dict = 'val_series_length.pickle',
size = (28, 22, 549200),
device = device,
seed = 42)
val_loader = torch.utils.data.DataLoader(val_load_file,
batch_size=batch_size,
shuffle=False,
num_workers=0,
drop_last=True)
nEpoch = 100
base_lr = 0.0089 # where we start the learning rate (min point)
max_lr = 0.013 # where the learning rate is at the max point
weight_decay = 0.0001
step_size_up = (n_samples/batch_size)*5
model = Unet_leaky_lstm(n_channels=1, batch_size=batch_size, \
device=device).to(device)
# model = Unet_leaky(n_channels=1, n_classes=2).to(device)
optimizer = Adam(model.parameters(), lr=0.004, weight_decay=weight_decay)
lossFunc = nn.CrossEntropyLoss(weight = torch.tensor([1., 5.]).to(device),
reduction = "mean")
scheduler = CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr,
step_size_up=step_size_up,
cycle_momentum=False)
# step_size_up is set so the learning rate is updated linearly
smooth = 0.05
params = {"optimizer":"ADAM", "batch_size":batch_size,
"optimizer_learning_rate": base_lr,
"optimizer_weight_decay": weight_decay,
"loss_function":"CrossEntropyLoss",
"loss_function_weights":[1, 5],
"loss_function_reduction":"mean",
"model":"Unet_leaky_lstm", "scheduler":"CyclicLR",
"scheduler_base_lr":base_lr, "scheduler_max_lr":max_lr,
"scheduler_cycle_momentum":False,
"scheduler_step_size_up":step_size_up,
"smooting_loss":smooth}
run[f"{net_name}/parameters"] = params
net_train(device = device,
fl = fl, it = it,
net_name = net_name,
model = model,
optimizer = optimizer,
lossFunc = lossFunc,
nEpoch = nEpoch,
smooth = smooth,
train_loader = train_loader,
val_loader = val_loader,
run = run,
path = "/home/tyson/network/", #"C:/Users/Marc/Desktop/network/",
scheduler = scheduler)
def net_starter(nets, device, fl, it, train_path, val_path):
for net in nets:
pr1 = mp.Process(target=net, args = (device, fl, it,
train_path,
val_path,))
pr1.start()
pr1.join()
if __name__ == '__main__':
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
if device == "cpu":
fl = torch.FloatTensor
it = torch.LongTensor
else:
fl = torch.cuda.FloatTensor
it = torch.cuda.LongTensor
core = torch.cuda.device_count()
networks = [net_SGD] #
cuda_dict = dict()
# cuda_dict[core] = networks
for i in range(core):
cuda_dict[i] = []
for i in range(len(networks)):
cuda_dict[i % core].append(networks[i]) # i % core
#"/home/tyson/model_data/train_model_data"
# "C:/Users/Marc/Desktop/model_data/train_model_data"
# train_path = "/home/tyson/data_cutoff/train_model_data"
# val_path = "/home/tyson/data_cutoff/val_model_data"
train_path = r"C:\Users\Marc\Desktop\data\train_model_data"
val_path = r"C:\Users\Marc\Desktop\data\val_model_data"
pres = []
for i in range(core):
pres.append(mp.Process(target=net_starter, args = (cuda_dict.get(i),
f"cuda:{i}",
fl, it,
train_path,
val_path,)))
for process in pres:
process.start()
for process in pres:
process.join()
| [
"neptune.new.init"
] | [((333, 354), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (348, 354), False, 'import sys\n'), ((703, 731), 'torch.multiprocessing.set_start_method', 'mp.set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (722, 731), True, 'import torch.multiprocessing as mp\n'), ((828, 852), 'os.getenv', 'os.getenv', (['"""Neptune_api"""'], {}), "('Neptune_api')\n", (837, 852), False, 'import os\n'), ((863, 928), 'neptune.new.init', 'neptune.init', ([], {'project': '"""NTLAB/artifact-rej-scalp"""', 'api_token': 'token'}), "(project='NTLAB/artifact-rej-scalp', api_token=token)\n", (875, 928), True, 'import neptune.new as neptune\n'), ((1085, 1218), 'LoaderPACK.Loader.shuffle_5min', 'shuffle_5min', ([], {'path': 'train_path', 'series_dict': '"""train_series_length.pickle"""', 'size': '(195, 22, 2060000)', 'device': 'device', 'length': 'n_samples'}), "(path=train_path, series_dict='train_series_length.pickle',\n size=(195, 22, 2060000), device=device, length=n_samples)\n", (1097, 1218), False, 'from LoaderPACK.Loader import shuffle_5min\n'), ((1386, 1487), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_load_file'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(0)'}), '(train_load_file, batch_size=batch_size, shuffle\n =True, num_workers=0)\n', (1413, 1487), False, 'import torch\n'), ((1645, 1764), 'LoaderPACK.Loader.shuffle_5min', 'shuffle_5min', ([], {'path': 'val_path', 'series_dict': '"""val_series_length.pickle"""', 'size': '(28, 22, 549200)', 'device': 'device', 'seed': '(42)'}), "(path=val_path, series_dict='val_series_length.pickle', size=(\n 28, 22, 549200), device=device, seed=42)\n", (1657, 1764), False, 'from LoaderPACK.Loader import shuffle_5min\n'), ((1921, 2037), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_load_file'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(0)', 'drop_last': '(True)'}), '(val_load_file, batch_size=batch_size, shuffle=\n False, num_workers=0, drop_last=True)\n', (1948, 2037), False, 'import torch\n'), ((2755, 2896), 'torch.optim.lr_scheduler.CyclicLR', 'CyclicLR', (['optimizer'], {'base_lr': 'base_lr', 'max_lr': 'max_lr', 'step_size_up': 'step_size_up', 'cycle_momentum': '(True)', 'base_momentum': '(0.8)', 'max_momentum': '(0.9)'}), '(optimizer, base_lr=base_lr, max_lr=max_lr, step_size_up=\n step_size_up, cycle_momentum=True, base_momentum=0.8, max_momentum=0.9)\n', (2763, 2896), False, 'from torch.optim.lr_scheduler import CyclicLR\n'), ((3616, 3888), 'LoaderPACK.trainer.net_train', 'net_train', ([], {'device': 'device', 'fl': 'fl', 'it': 'it', 'net_name': 'net_name', 'model': 'model', 'optimizer': 'optimizer', 'lossFunc': 'lossFunc', 'nEpoch': 'nEpoch', 'smooth': 'smooth', 'train_loader': 'train_loader', 'val_loader': 'val_loader', 'run': 'run', 'path': '"""C:/Users/Marc/Desktop/network/"""', 'scheduler': 'scheduler'}), "(device=device, fl=fl, it=it, net_name=net_name, model=model,\n optimizer=optimizer, lossFunc=lossFunc, nEpoch=nEpoch, smooth=smooth,\n train_loader=train_loader, val_loader=val_loader, run=run, path=\n 'C:/Users/Marc/Desktop/network/', scheduler=scheduler)\n", (3625, 3888), False, 'from LoaderPACK.trainer import net_train\n'), ((4139, 4163), 'os.getenv', 'os.getenv', (['"""Neptune_api"""'], {}), "('Neptune_api')\n", (4148, 4163), False, 'import os\n'), ((4174, 4239), 'neptune.new.init', 'neptune.init', ([], {'project': '"""NTLAB/artifact-rej-scalp"""', 'api_token': 'token'}), "(project='NTLAB/artifact-rej-scalp', api_token=token)\n", (4186, 4239), True, 'import neptune.new as neptune\n'), ((4404, 4537), 'LoaderPACK.Loader.shuffle_5min', 'shuffle_5min', ([], {'path': 'train_path', 'series_dict': '"""train_series_length.pickle"""', 'size': '(195, 22, 2060000)', 'device': 'device', 'length': 'n_samples'}), "(path=train_path, series_dict='train_series_length.pickle',\n size=(195, 22, 2060000), device=device, length=n_samples)\n", (4416, 4537), False, 'from LoaderPACK.Loader import shuffle_5min\n'), ((4705, 4822), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_load_file'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(0)', 'drop_last': '(True)'}), '(train_load_file, batch_size=batch_size, shuffle\n =True, num_workers=0, drop_last=True)\n', (4732, 4822), False, 'import torch\n'), ((5027, 5146), 'LoaderPACK.Loader.shuffle_5min', 'shuffle_5min', ([], {'path': 'val_path', 'series_dict': '"""val_series_length.pickle"""', 'size': '(28, 22, 549200)', 'device': 'device', 'seed': '(42)'}), "(path=val_path, series_dict='val_series_length.pickle', size=(\n 28, 22, 549200), device=device, seed=42)\n", (5039, 5146), False, 'from LoaderPACK.Loader import shuffle_5min\n'), ((5303, 5419), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_load_file'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(0)', 'drop_last': '(True)'}), '(val_load_file, batch_size=batch_size, shuffle=\n False, num_workers=0, drop_last=True)\n', (5330, 5419), False, 'import torch\n'), ((6231, 6336), 'torch.optim.lr_scheduler.CyclicLR', 'CyclicLR', (['optimizer'], {'base_lr': 'base_lr', 'max_lr': 'max_lr', 'step_size_up': 'step_size_up', 'cycle_momentum': '(False)'}), '(optimizer, base_lr=base_lr, max_lr=max_lr, step_size_up=\n step_size_up, cycle_momentum=False)\n', (6239, 6336), False, 'from torch.optim.lr_scheduler import CyclicLR\n'), ((7100, 7362), 'LoaderPACK.trainer.net_train', 'net_train', ([], {'device': 'device', 'fl': 'fl', 'it': 'it', 'net_name': 'net_name', 'model': 'model', 'optimizer': 'optimizer', 'lossFunc': 'lossFunc', 'nEpoch': 'nEpoch', 'smooth': 'smooth', 'train_loader': 'train_loader', 'val_loader': 'val_loader', 'run': 'run', 'path': '"""/home/tyson/network/"""', 'scheduler': 'scheduler'}), "(device=device, fl=fl, it=it, net_name=net_name, model=model,\n optimizer=optimizer, lossFunc=lossFunc, nEpoch=nEpoch, smooth=smooth,\n train_loader=train_loader, val_loader=val_loader, run=run, path=\n '/home/tyson/network/', scheduler=scheduler)\n", (7109, 7362), False, 'from LoaderPACK.trainer import net_train\n'), ((8186, 8211), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (8209, 8211), False, 'import torch\n'), ((7680, 7747), 'torch.multiprocessing.Process', 'mp.Process', ([], {'target': 'net', 'args': '(device, fl, it, train_path, val_path)'}), '(target=net, args=(device, fl, it, train_path, val_path))\n', (7690, 7747), True, 'import torch.multiprocessing as mp\n'), ((2442, 2509), 'LoaderPACK.Unet_leaky.Unet_leaky_lstm', 'Unet_leaky_lstm', ([], {'n_channels': '(1)', 'batch_size': 'batch_size', 'device': 'device'}), '(n_channels=1, batch_size=batch_size, device=device)\n', (2457, 2509), False, 'from LoaderPACK.Unet_leaky import Unet_leaky, Unet_leaky_lstm\n'), ((5830, 5897), 'LoaderPACK.Unet_leaky.Unet_leaky_lstm', 'Unet_leaky_lstm', ([], {'n_channels': '(1)', 'batch_size': 'batch_size', 'device': 'device'}), '(n_channels=1, batch_size=batch_size, device=device)\n', (5845, 5897), False, 'from LoaderPACK.Unet_leaky import Unet_leaky, Unet_leaky_lstm\n'), ((7951, 7976), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7974, 7976), False, 'import torch\n'), ((2648, 2672), 'torch.tensor', 'torch.tensor', (['[1.0, 5.0]'], {}), '([1.0, 5.0])\n', (2660, 2672), False, 'import torch\n'), ((6124, 6148), 'torch.tensor', 'torch.tensor', (['[1.0, 5.0]'], {}), '([1.0, 5.0])\n', (6136, 6148), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, deepsense.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import json
import os
import sys
from neptune.generated.swagger_client import (InputPath, QueuedRemoteExperimentParams, StringParam)
from neptune.internal.cli.commands.command_names import CommandNames
from neptune.internal.cli.commands.enqueue_utils import EnqueueUtils
from neptune.internal.cli.commands.executing.execution_paths import ExecutionPaths
from neptune.internal.cli.commands.neptune_command import NeptuneCommand
from neptune.internal.cli.commands.parsers.root_parser import NeptuneRootCommandParser
from neptune.internal.cli.commands.utils.pip_requirements_utils import create_string_param
from neptune.internal.cli.experiments.experiment_creator import ExperimentsCreator
from neptune.internal.cli.storage.populate_storage_utils import (CopyProgressBar, collect_files)
from neptune.internal.cli.storage.upload_storage_utils import upload_to_storage
from neptune.internal.common import NeptuneException
from neptune.internal.common.config.job_config import ConfigKeys
from neptune.internal.common.config.neptune_config import NeptuneConfig, load_global_config, load_local_config
from neptune.internal.common.models.rich_project import ProjectResolver
from neptune.internal.common.parsers.command_parsing_utils import (compose_string_command)
from neptune.internal.common.parsers.common_parameters_configurator import \
CommonParametersConfigurator
from neptune.internal.common.utils.git import send_git_info_if_present
class NeptuneRun(NeptuneCommand):
def __init__(self,
config,
local_storage,
api_service,
tracked_parameter_parser,
web_browser,
project,
neptune_exec_factory,
name,
environment=None,
inputs=None):
super(NeptuneRun, self).__init__(name, config, api_service)
self._tracked_parameter_parser = tracked_parameter_parser
self._rest_api_url = self.config.rest_url
self.logger.debug("Rest API url: %s", self._rest_api_url)
self.project = project
self.enqueue_utils = EnqueueUtils(config, api_service, web_browser)
self._neptune_exec_factory = neptune_exec_factory
self._local_storage = local_storage
self._command_parser = NeptuneRootCommandParser()
self.environment = environment
self.inputs = inputs or []
self.current_command = None
self.experiment_ids = []
self.tracked_params = None
self.experiment_config = None
self._experiments_creator = ExperimentsCreator(enqueue_utils=self.enqueue_utils, project=self.project)
def prepare(self, args):
self.experiment_config = self._create_run_parameters(args)
self.tracked_params = \
self.enqueue_utils.parse_experiment_arguments(self.experiment_config, self._tracked_parameter_parser)
self.config.parameters = self.experiment_config.parameters
# pylint:disable=arguments-differ
def run(self, args):
try:
self._ensure_executable_exists()
self.prepare(args)
result = self._create_experiments(args)
self.experiment_ids = result.experiment_ids
if args.known_args.snapshot:
custom_execution_paths = self._make_code_snapshot(result.short_id)
print(self._snapshot_info_message(snapshot_path=custom_execution_paths.sources_location))
else:
custom_execution_paths = None
self._close_experiment_creation_message()
self._configure_experiments(self.experiment_ids)
self.exit_code = self._exec_experiments(
experiment_ids=self.experiment_ids, debug=args.known_args.debug,
custom_execution_paths=custom_execution_paths)
except:
self.exit_code = self.UNKNOWN_EXCEPTION_EXIT_CODE
raise
def _create_experiments(self, args):
return self._experiments_creator.create(
experiment_config=self.experiment_config, enqueue_command=self._enqueue_command(args.raw_args),
notebook_absolute_path=self._get_notebook_absolute_path(), tracked_params=self.tracked_params,
parameters=self.config.parameters, remote_params=self._get_remote_params()
)
def _configure_experiments(self, experiment_ids):
self.api_service.add_experiments_backups(experiment_ids=experiment_ids, globs=list(self.config.backup))
self._upload_sources(experiment_ids)
send_git_info_if_present(self.api_service, experiment_ids)
for experiment_id in experiment_ids:
self.api_service.mark_experiment_waiting(experiment_id)
@classmethod
def _create_run_parameters(cls, args):
if getattr(args.known_args, 'executable', None):
print(u"Using --executable is deprecated. Pass the executable as a positional parameter.")
profile = getattr(args.known_args,
ConfigKeys.PROFILE,
CommonParametersConfigurator.DEFAULT_PROFILE)
return NeptuneConfig(
commandline_args=args,
local_config=load_local_config(args.known_args.config),
global_config=load_global_config(profile),
cli_parameters=(args.known_args.parameter or [])
)
def _ensure_executable_exists(self):
if self.config.executable is None:
raise NeptuneException(self.config.NO_EXECUTABLE_MESSAGE)
def _upload_sources(self, experiment_ids):
files_list, data_size, empty_dir_list = collect_files(exclude=self.config.exclude)
empty_dir_list = [(x, y.replace("./", "", 1)) for x, y in empty_dir_list]
copy_progress_bar = CopyProgressBar(data_size, u"Sending sources to server")
experiment_id = experiment_ids[0]
upload_to_storage(files_list=files_list,
dir_list=empty_dir_list,
upload_api_fun=self.api_service.upload_experiment_source,
upload_tarstream_api_fun=self.api_service.upload_experiment_source_as_tarstream,
callback=copy_progress_bar.update,
experiment_id=experiment_id)
copy_progress_bar.finalize()
self.api_service.finalize_experiment_upload(
experiment_id=experiment_id,
target_experiment_ids=experiment_ids
)
def abort(self):
try:
if self.current_command is not None:
self.current_command.abort()
finally:
if self.experiment_ids:
print(u'Marking experiments as aborted...')
# TODO group abort
self.api_service.mark_experiment_aborted(self.experiment_ids, with_retries=False)
@staticmethod
def _close_experiment_creation_message():
# Until this call, we can print formatted lines, like this:
# >
# > Experiment enqueued, id: ...
# >
sys.stdout.write(u'\n')
@staticmethod
def _confirmation_message(experiment_id):
return (u'>\n'
u'> Started experiment execution, id: {experiment_id}\n'
u'>\n').format(experiment_id=experiment_id)
@staticmethod
def _snapshot_info_message(snapshot_path):
return (u"> Source code and output are located in: {}\n"
u">").format(snapshot_path)
def _make_code_snapshot(self, short_id):
code_snapshot = self._local_storage.experiments_directory.copy_to_subdir(
src=os.getcwd(), dst_subdir_name=short_id)
return ExecutionPaths(sources_location=code_snapshot.absolute_path)
def _exec_experiments(self, experiment_ids, debug, custom_execution_paths):
exit_codes = [
self._exec_experiment(
experiment_id=experiment_id, print_confirmation=len(experiment_ids) > 1,
debug=debug, custom_execution_paths=custom_execution_paths)
for experiment_id in experiment_ids
]
return self._combined_exit_code(exit_codes)
def _exec_experiment(self, experiment_id, print_confirmation=False, debug=False, custom_execution_paths=None):
if print_confirmation:
print(self._confirmation_message(experiment_id))
exec_args_list = [CommandNames.EXEC, experiment_id]
CommonParametersConfigurator.append_debug_param(exec_args_list, debug)
args = self._command_parser.get_arguments(exec_args_list)
self.current_command = self._neptune_exec_factory.create(
experiment_id=experiment_id,
environment=self.environment,
custom_execution_paths=custom_execution_paths
)
self.current_command.run(args)
return self.current_command.exit_code
@staticmethod
def _enqueue_command(raw_args):
return compose_string_command(raw_args=[u'neptune'] + raw_args)
def _get_notebook_absolute_path(self):
return None
def _get_remote_params(self):
return None
@staticmethod
def _combined_exit_code(exit_codes):
non_zero_exit_codes = [c for c in exit_codes if c != 0]
return (non_zero_exit_codes + [0])[0]
class NeptuneRunWorker(NeptuneRun):
def __init__(self,
config,
local_storage,
api_service,
tracked_parameter_parser,
inputs,
environment,
worker,
web_browser,
project,
experiment_executor_factory):
super(NeptuneRunWorker, self).__init__(config=config, local_storage=local_storage, api_service=api_service,
tracked_parameter_parser=tracked_parameter_parser,
environment=environment, web_browser=web_browser,
project=project, inputs=inputs,
neptune_exec_factory=None, name=CommandNames.SEND)
self._rest_api_url = self.config.rest_url
self.logger.debug("Rest API url: %s", self._rest_api_url)
self.worker = worker
self.experiment_executor_factory = experiment_executor_factory
def run(self, args):
self._ensure_executable_exists()
self.prepare(args)
self.experiment_ids = self._create_experiments(args).experiment_ids
self._close_experiment_creation_message()
self._configure_experiments(self.experiment_ids)
def _get_remote_params(self):
inputs = self._get_inputs()
string_params = self._get_string_params()
token = self._get_token()
return QueuedRemoteExperimentParams(inputs=inputs,
environment=self.environment,
worker_type=self.worker,
string_params=string_params,
token=json.dumps(token, sort_keys=True))
def _get_string_params(self):
string_params = []
ml_framework = self.config.ml_framework
log_channels = self.config.log_channels
pip_requirements_file = self.config.pip_requirements_file
for log_channel in log_channels:
string_params.append(StringParam(name=CommonParametersConfigurator.LOG_CHANNEL, value=str(log_channel)))
if ml_framework:
string_params.append(StringParam(name=ConfigKeys.ML_FRAMEWORK, value=ml_framework))
if pip_requirements_file:
string_params.append(create_string_param(pip_requirements_file))
return string_params
def _get_token(self):
offline_token = self.experiment_executor_factory.offline_token_storage_service.load()
keycloak_api_service = self.experiment_executor_factory.keycloak_api_service
return keycloak_api_service.request_token_refresh(offline_token.refresh_token).raw
def _get_inputs(self):
inputs = []
for entry in self.inputs:
split_list = entry.split(':', 1)
destination = split_list[1] if len(split_list) == 2 else ''
inputs.append(InputPath(source=split_list[0], destination=destination))
return inputs
class NeptuneRunFactory(object):
def __init__(self, api_service, config, local_storage, tracked_parameter_parser, web_browser,
experiment_executor_factory):
self.api_service = api_service
self.config = config
self.local_storage = local_storage
self.tracked_parameter_parser = tracked_parameter_parser
self.web_browser = web_browser
self.experiment_executor_factory = experiment_executor_factory
def create(self, is_local, neptune_exec_factory, environment=None, worker=None, inputs=None):
web_browser = self.web_browser
project = ProjectResolver.resolve(
api_service=self.api_service,
organization_name=self.config.organization_name,
project_name=self.config.project_name)
if is_local:
return NeptuneRun(
config=self.config,
local_storage=self.local_storage,
api_service=self.api_service,
tracked_parameter_parser=self.tracked_parameter_parser,
environment=environment,
web_browser=web_browser,
neptune_exec_factory=neptune_exec_factory,
project=project,
name=CommandNames.RUN
)
else:
return NeptuneRunWorker(
config=self.config,
local_storage=self.local_storage,
api_service=self.api_service,
tracked_parameter_parser=self.tracked_parameter_parser,
inputs=inputs,
environment=environment,
worker=worker,
web_browser=web_browser,
project=project,
experiment_executor_factory=self.experiment_executor_factory
)
| [
"neptune.internal.cli.storage.upload_storage_utils.upload_to_storage",
"neptune.internal.cli.commands.enqueue_utils.EnqueueUtils",
"neptune.internal.common.config.neptune_config.load_global_config",
"neptune.internal.cli.storage.populate_storage_utils.CopyProgressBar",
"neptune.internal.common.NeptuneExcept... | [((2775, 2821), 'neptune.internal.cli.commands.enqueue_utils.EnqueueUtils', 'EnqueueUtils', (['config', 'api_service', 'web_browser'], {}), '(config, api_service, web_browser)\n', (2787, 2821), False, 'from neptune.internal.cli.commands.enqueue_utils import EnqueueUtils\n'), ((2957, 2983), 'neptune.internal.cli.commands.parsers.root_parser.NeptuneRootCommandParser', 'NeptuneRootCommandParser', ([], {}), '()\n', (2981, 2983), False, 'from neptune.internal.cli.commands.parsers.root_parser import NeptuneRootCommandParser\n'), ((3238, 3312), 'neptune.internal.cli.experiments.experiment_creator.ExperimentsCreator', 'ExperimentsCreator', ([], {'enqueue_utils': 'self.enqueue_utils', 'project': 'self.project'}), '(enqueue_utils=self.enqueue_utils, project=self.project)\n', (3256, 3312), False, 'from neptune.internal.cli.experiments.experiment_creator import ExperimentsCreator\n'), ((5214, 5272), 'neptune.internal.common.utils.git.send_git_info_if_present', 'send_git_info_if_present', (['self.api_service', 'experiment_ids'], {}), '(self.api_service, experiment_ids)\n', (5238, 5272), False, 'from neptune.internal.common.utils.git import send_git_info_if_present\n'), ((6280, 6322), 'neptune.internal.cli.storage.populate_storage_utils.collect_files', 'collect_files', ([], {'exclude': 'self.config.exclude'}), '(exclude=self.config.exclude)\n', (6293, 6322), False, 'from neptune.internal.cli.storage.populate_storage_utils import CopyProgressBar, collect_files\n'), ((6434, 6490), 'neptune.internal.cli.storage.populate_storage_utils.CopyProgressBar', 'CopyProgressBar', (['data_size', 'u"""Sending sources to server"""'], {}), "(data_size, u'Sending sources to server')\n", (6449, 6490), False, 'from neptune.internal.cli.storage.populate_storage_utils import CopyProgressBar, collect_files\n'), ((6542, 6828), 'neptune.internal.cli.storage.upload_storage_utils.upload_to_storage', 'upload_to_storage', ([], {'files_list': 'files_list', 'dir_list': 'empty_dir_list', 'upload_api_fun': 'self.api_service.upload_experiment_source', 'upload_tarstream_api_fun': 'self.api_service.upload_experiment_source_as_tarstream', 'callback': 'copy_progress_bar.update', 'experiment_id': 'experiment_id'}), '(files_list=files_list, dir_list=empty_dir_list,\n upload_api_fun=self.api_service.upload_experiment_source,\n upload_tarstream_api_fun=self.api_service.\n upload_experiment_source_as_tarstream, callback=copy_progress_bar.\n update, experiment_id=experiment_id)\n', (6559, 6828), False, 'from neptune.internal.cli.storage.upload_storage_utils import upload_to_storage\n'), ((7713, 7736), 'sys.stdout.write', 'sys.stdout.write', (['u"""\n"""'], {}), "(u'\\n')\n", (7729, 7736), False, 'import sys\n'), ((8331, 8391), 'neptune.internal.cli.commands.executing.execution_paths.ExecutionPaths', 'ExecutionPaths', ([], {'sources_location': 'code_snapshot.absolute_path'}), '(sources_location=code_snapshot.absolute_path)\n', (8345, 8391), False, 'from neptune.internal.cli.commands.executing.execution_paths import ExecutionPaths\n'), ((9084, 9154), 'neptune.internal.common.parsers.common_parameters_configurator.CommonParametersConfigurator.append_debug_param', 'CommonParametersConfigurator.append_debug_param', (['exec_args_list', 'debug'], {}), '(exec_args_list, debug)\n', (9131, 9154), False, 'from neptune.internal.common.parsers.common_parameters_configurator import CommonParametersConfigurator\n'), ((9596, 9652), 'neptune.internal.common.parsers.command_parsing_utils.compose_string_command', 'compose_string_command', ([], {'raw_args': "([u'neptune'] + raw_args)"}), "(raw_args=[u'neptune'] + raw_args)\n", (9618, 9652), False, 'from neptune.internal.common.parsers.command_parsing_utils import compose_string_command\n'), ((13691, 13837), 'neptune.internal.common.models.rich_project.ProjectResolver.resolve', 'ProjectResolver.resolve', ([], {'api_service': 'self.api_service', 'organization_name': 'self.config.organization_name', 'project_name': 'self.config.project_name'}), '(api_service=self.api_service, organization_name=\n self.config.organization_name, project_name=self.config.project_name)\n', (13714, 13837), False, 'from neptune.internal.common.models.rich_project import ProjectResolver\n'), ((6132, 6183), 'neptune.internal.common.NeptuneException', 'NeptuneException', (['self.config.NO_EXECUTABLE_MESSAGE'], {}), '(self.config.NO_EXECUTABLE_MESSAGE)\n', (6148, 6183), False, 'from neptune.internal.common import NeptuneException\n'), ((5860, 5901), 'neptune.internal.common.config.neptune_config.load_local_config', 'load_local_config', (['args.known_args.config'], {}), '(args.known_args.config)\n', (5877, 5901), False, 'from neptune.internal.common.config.neptune_config import NeptuneConfig, load_global_config, load_local_config\n'), ((5929, 5956), 'neptune.internal.common.config.neptune_config.load_global_config', 'load_global_config', (['profile'], {}), '(profile)\n', (5947, 5956), False, 'from neptune.internal.common.config.neptune_config import NeptuneConfig, load_global_config, load_local_config\n'), ((8277, 8288), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8286, 8288), False, 'import os\n'), ((11783, 11816), 'json.dumps', 'json.dumps', (['token'], {'sort_keys': '(True)'}), '(token, sort_keys=True)\n', (11793, 11816), False, 'import json\n'), ((12260, 12321), 'neptune.generated.swagger_client.StringParam', 'StringParam', ([], {'name': 'ConfigKeys.ML_FRAMEWORK', 'value': 'ml_framework'}), '(name=ConfigKeys.ML_FRAMEWORK, value=ml_framework)\n', (12271, 12321), False, 'from neptune.generated.swagger_client import InputPath, QueuedRemoteExperimentParams, StringParam\n'), ((12390, 12432), 'neptune.internal.cli.commands.utils.pip_requirements_utils.create_string_param', 'create_string_param', (['pip_requirements_file'], {}), '(pip_requirements_file)\n', (12409, 12432), False, 'from neptune.internal.cli.commands.utils.pip_requirements_utils import create_string_param\n'), ((12986, 13042), 'neptune.generated.swagger_client.InputPath', 'InputPath', ([], {'source': 'split_list[0]', 'destination': 'destination'}), '(source=split_list[0], destination=destination)\n', (12995, 13042), False, 'from neptune.generated.swagger_client import InputPath, QueuedRemoteExperimentParams, StringParam\n')] |
#
# Copyright (c) 2020, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import uuid
from datetime import datetime
from typing import List, Dict, Optional, Union
from neptune.new.exceptions import MetadataInconsistency, InternalClientError
from neptune.new.internal.backends.api_model import LeaderboardEntry, AttributeWithProperties, AttributeType
from neptune.new.internal.backends.hosted_neptune_backend import HostedNeptuneBackend
from neptune.new.internal.utils.paths import join_paths, parse_path
class RunsTableEntry:
def __init__(self, backend: HostedNeptuneBackend, _id: uuid.UUID, attributes: List[AttributeWithProperties]):
self._backend = backend
self._id = _id
self._attributes = attributes
def __getitem__(self, path: str) -> 'LeaderboardHandler':
return LeaderboardHandler(self, path)
def get_attribute_type(self, path: str) -> AttributeType:
for attr in self._attributes:
if attr.path == path:
return attr.type
raise ValueError("Could not find {} attribute".format(path))
def _get_key_name(self, path, prefix):
key_name = path[len(prefix):]
return key_name[1:] if len(key_name) > 0 and key_name[0] is '/' else key_name
def get_attributes_from_path(self, path: str):
path_attributes = {}
prefix = path
for attr in self._attributes:
if attr.path.startswith(prefix):
key_name = self._get_key_name(attr.path, prefix)
if '/' in key_name:
split = key_name.split('/')
if split[0] not in path_attributes:
path_attributes[split[0]] = self.get_attributes_from_path(f'{prefix}/{split[0]}')
else:
try:
path_attributes[key_name] = self.get_attribute_value(attr.path)
except MetadataInconsistency as e:
path_attributes[key_name] = e
return path_attributes
def get_attribute_value(self, path: str):
for attr in self._attributes:
if attr.path == path:
_type = attr.type
if _type == AttributeType.RUN_STATE:
return attr.properties.value
if _type == AttributeType.BOOL or _type == AttributeType.INT or _type == AttributeType.FLOAT or _type == AttributeType.STRING or _type == AttributeType.DATETIME:
return attr.properties.value
if _type == AttributeType.FLOAT_SERIES or _type == AttributeType.STRING_SERIES:
return attr.properties.last
if _type == AttributeType.IMAGE_SERIES:
raise MetadataInconsistency("Cannot get value for image series.")
if _type == AttributeType.FILE:
raise MetadataInconsistency("Cannot get value for file attribute. Use download() instead.")
if _type == AttributeType.FILE_SET:
raise MetadataInconsistency("Cannot get value for file set attribute. Use download() instead.")
if _type == AttributeType.STRING_SET:
return set(attr.properties.values)
if _type == AttributeType.GIT_REF:
return attr.properties.commit.commitId
if _type == AttributeType.NOTEBOOK_REF:
return attr.properties.notebookName
raise InternalClientError("Unsupported attribute type {}".format(_type))
raise ValueError("Could not find {} attribute".format(path))
def download_file_attribute(self, path: str, destination: Optional[str]):
for attr in self._attributes:
if attr.path == path:
_type = attr.type
if _type == AttributeType.FILE:
self._backend.download_file(self._id, parse_path(path), destination)
return
raise MetadataInconsistency("Cannot download file from attribute of type {}".format(_type))
raise ValueError("Could not find {} attribute".format(path))
def download_file_set_attribute(self, path: str, destination: Optional[str]):
for attr in self._attributes:
if attr.path == path:
_type = attr.type
if _type == AttributeType.FILE_SET:
self._backend.download_file_set(self._id, parse_path(path), destination)
return
raise MetadataInconsistency("Cannot download ZIP archive from attribute of type {}".format(_type))
raise ValueError("Could not find {} attribute".format(path))
class LeaderboardHandler:
def __init__(self, run: RunsTableEntry, path: str):
self._run = run
self._path = path
def __getitem__(self, path: str) -> 'LeaderboardHandler':
return LeaderboardHandler(self._run, join_paths(self._path, path))
def get(self):
return self._run.get_attribute_value(self._path)
def get_path(self):
return self._run.get_attributes_from_path(self._path)
def download(self, destination: Optional[str]):
attr_type = self._run.get_attribute_type(self._path)
if attr_type == AttributeType.FILE:
return self._run.download_file_attribute(self._path, destination)
elif attr_type == AttributeType.FILE_SET:
return self._run.download_file_set_attribute(self._path, destination)
raise MetadataInconsistency("Cannot download file from attribute of type {}".format(attr_type))
class RunsTable:
def __init__(self, backend: HostedNeptuneBackend, entries: List[LeaderboardEntry]):
self._backend = backend
self._entries = entries
def to_runs(self) -> List[RunsTableEntry]:
return [RunsTableEntry(self._backend, e.id, e.attributes) for e in self._entries]
def to_pandas(self):
# pylint:disable=import-outside-toplevel
import pandas as pd
def make_attribute_value(attribute: AttributeWithProperties) -> Optional[Union[str, float, datetime]]:
_type = attribute.type
_properties = attribute.properties
if _type == AttributeType.RUN_STATE:
return _properties.value
if _type == AttributeType.FLOAT or _type == AttributeType.STRING or _type == AttributeType.DATETIME:
return _properties.value
if _type == AttributeType.FLOAT_SERIES or _type == AttributeType.STRING_SERIES:
return _properties.last
if _type == AttributeType.IMAGE_SERIES:
return None
if _type == AttributeType.FILE or _type == AttributeType.FILE_SET:
return None
if _type == AttributeType.STRING_SET:
return ",".join(_properties.values)
if _type == AttributeType.GIT_REF:
return _properties.commit.commitId
if _type == AttributeType.NOTEBOOK_REF:
return _properties.notebookName
raise InternalClientError("Unsupported attribute type {}".format(_type))
def make_row(entry: LeaderboardEntry) -> Dict[str, Optional[Union[str, float, datetime]]]:
row: Dict[str, Union[str, float, datetime]] = dict()
for attr in entry.attributes:
value = make_attribute_value(attr)
if value is not None:
row[attr.path] = value
return row
def sort_key(attr):
domain = attr.split('/')[0]
if domain == 'sys':
return 0, attr
if domain == 'monitoring':
return 2, attr
return 1, attr
rows = dict((n, make_row(entry)) for (n, entry) in enumerate(self._entries))
df = pd.DataFrame.from_dict(data=rows, orient='index')
df = df.reindex(sorted(df.columns, key=sort_key), axis='columns')
return df
| [
"neptune.new.exceptions.MetadataInconsistency",
"neptune.new.internal.utils.paths.join_paths",
"neptune.new.internal.utils.paths.parse_path"
] | [((8356, 8405), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', ([], {'data': 'rows', 'orient': '"""index"""'}), "(data=rows, orient='index')\n", (8378, 8405), True, 'import pandas as pd\n'), ((5444, 5472), 'neptune.new.internal.utils.paths.join_paths', 'join_paths', (['self._path', 'path'], {}), '(self._path, path)\n', (5454, 5472), False, 'from neptune.new.internal.utils.paths import join_paths, parse_path\n'), ((3253, 3312), 'neptune.new.exceptions.MetadataInconsistency', 'MetadataInconsistency', (['"""Cannot get value for image series."""'], {}), "('Cannot get value for image series.')\n", (3274, 3312), False, 'from neptune.new.exceptions import MetadataInconsistency, InternalClientError\n'), ((3387, 3477), 'neptune.new.exceptions.MetadataInconsistency', 'MetadataInconsistency', (['"""Cannot get value for file attribute. Use download() instead."""'], {}), "(\n 'Cannot get value for file attribute. Use download() instead.')\n", (3408, 3477), False, 'from neptune.new.exceptions import MetadataInconsistency, InternalClientError\n'), ((3551, 3645), 'neptune.new.exceptions.MetadataInconsistency', 'MetadataInconsistency', (['"""Cannot get value for file set attribute. Use download() instead."""'], {}), "(\n 'Cannot get value for file set attribute. Use download() instead.')\n", (3572, 3645), False, 'from neptune.new.exceptions import MetadataInconsistency, InternalClientError\n'), ((4421, 4437), 'neptune.new.internal.utils.paths.parse_path', 'parse_path', (['path'], {}), '(path)\n', (4431, 4437), False, 'from neptune.new.internal.utils.paths import join_paths, parse_path\n'), ((4959, 4975), 'neptune.new.internal.utils.paths.parse_path', 'parse_path', (['path'], {}), '(path)\n', (4969, 4975), False, 'from neptune.new.internal.utils.paths import join_paths, parse_path\n')] |
"""Implements Neptune Logger."""
from abc import ABC
from typing import TYPE_CHECKING, List
from torchflare.callbacks.callback import Callbacks
from torchflare.callbacks.states import CallbackOrder
from torchflare.utils.imports_check import module_available
_AVAILABLE = module_available("neptune")
if _AVAILABLE:
import neptune.new as neptune
else:
neptune = None
if TYPE_CHECKING:
from torchflare.experiments.experiment import Experiment
class NeptuneLogger(Callbacks, ABC):
"""Callback to log your metrics and loss values to Neptune to track your experiments.
For more information about Neptune take a look at [Neptune](https://neptune.ai/)
Args:
project_dir: The qualified name of a project in a form of namespace/project_name
params: The hyperparameters for your model and experiment as a dictionary
experiment_name: The name of the experiment
api_token: User’s API token
tags: List of strings.
Examples:
.. code-block::
from torchflare.callbacks import NeptuneLogger
params = {"bs": 16, "lr": 0.3}
logger = NeptuneLogger(
project_dir="username/Experiments",
params=params,
experiment_name="Experiment_10",
tags=["Experiment", "fold_0"],
api_token="your_secret_api_token",
)
"""
def __init__(
self,
project_dir: str,
api_token: str,
params: dict = None,
experiment_name: str = None,
tags: List[str] = None,
):
"""Constructor for NeptuneLogger Class."""
super(NeptuneLogger, self).__init__(order=CallbackOrder.LOGGING)
self.project_dir = project_dir
self.api_token = api_token
self.params = params
self.tags = tags
self.experiment_name = experiment_name
self.experiment = None
def on_experiment_start(self, experiment: "Experiment"):
"""Start of experiment."""
self.experiment = neptune.init(
project=self.project_dir,
api_token=self.api_token,
tags=self.tags,
name=self.experiment_name,
)
self.experiment["params"] = self.params
def _log_metrics(self, name, value, epoch):
self.experiment[name].log(value=value, step=epoch)
def on_epoch_end(self, experiment: "Experiment"):
"""Method to log metrics and values at the end of very epoch."""
for key, value in experiment.exp_logs.items():
if key != experiment.epoch_key:
epoch = experiment.exp_logs[experiment.epoch_key]
self._log_metrics(name=key, value=value, epoch=epoch)
def on_experiment_end(self, experiment: "Experiment"):
"""Method to end experiment after training is done."""
self.experiment.stop()
self.experiment = None
| [
"neptune.new.init"
] | [((273, 300), 'torchflare.utils.imports_check.module_available', 'module_available', (['"""neptune"""'], {}), "('neptune')\n", (289, 300), False, 'from torchflare.utils.imports_check import module_available\n'), ((2068, 2180), 'neptune.new.init', 'neptune.init', ([], {'project': 'self.project_dir', 'api_token': 'self.api_token', 'tags': 'self.tags', 'name': 'self.experiment_name'}), '(project=self.project_dir, api_token=self.api_token, tags=self.\n tags, name=self.experiment_name)\n', (2080, 2180), True, 'import neptune.new as neptune\n')] |
#!/usr/bin/env python
# The MIT License (MIT)
# Copyright (c) 2020 <NAME>
# Paper: "Self-Supervised Relational Reasoning for Representation Learning", <NAME> & <NAME>, NeurIPS 2020
# GitHub: https://github.com/mpatacchiola/self-supervised-relational-reasoning
#
# Implementation of a standard neural network (no self-supervised components).
# This is used as baseline (upper bound) and during linear-evaluation and fine-tuning.
import math
import time
from torch.optim import SGD, Adam
import torch.nn.functional as F
from torch import nn
import torch
import torchvision.datasets as dset
import torchvision.transforms as transforms
import tqdm
import numpy as np
from utils import AverageMeter
import neptune
class StandardModel(torch.nn.Module):
def __init__(self, feature_extractor, num_classes, tot_epochs=200):
super(StandardModel, self).__init__()
self.num_classes = num_classes
self.tot_epochs = tot_epochs
self.feature_extractor = feature_extractor
feature_size = feature_extractor.feature_size
self.classifier = nn.Linear(feature_size, num_classes)
self.ce = torch.nn.CrossEntropyLoss()
self.optimizer = SGD([{"params": self.feature_extractor.parameters(), "lr": 0.1, "momentum": 0.9},
{"params": self.classifier.parameters(), "lr": 0.1, "momentum": 0.9}])
self.optimizer_lineval = Adam([{"params": self.classifier.parameters(), "lr": 0.001}])
self.optimizer_finetune = Adam([{"params": self.feature_extractor.parameters(), "lr": 0.001, "weight_decay": 1e-5},
{"params": self.classifier.parameters(), "lr": 0.0001, "weight_decay": 1e-5}])
neptune.init(f'valeriobiscione/TestProject')
neptune.create_experiment(tags=['viewpoint-inv'])
def forward(self, x, detach=False):
if(detach): out = self.feature_extractor(x).detach()
else: out = self.feature_extractor(x)
out = self.classifier(out)
return out
def train(self, epoch, train_loader):
start_time = time.time()
self.feature_extractor.train()
self.classifier.train()
start = time.time()
if(epoch==int(self.tot_epochs*0.5) or epoch==int(self.tot_epochs*0.75)):
for i_g, g in enumerate(self.optimizer.param_groups):
g["lr"] *= 0.1 #divide by 10
print("Group[" + str(i_g) + "] learning rate: " + str(g["lr"]))
loss_meter = AverageMeter()
accuracy_meter = AverageMeter()
print(torch.rand(1))
for i, (data, target) in enumerate(train_loader):
if torch.cuda.is_available(): data, target = data.cuda(), target.cuda()
self.optimizer.zero_grad()
output = self.forward(data)
loss = self.ce(output, target)
loss_meter.update(loss.item(), len(target))
loss.backward()
self.optimizer.step()
pred = output.argmax(-1)
correct = pred.eq(target.view_as(pred)).cpu().sum()
accuracy = (100.0 * correct / float(len(target)))
accuracy_meter.update(accuracy.item(), len(target))
if i % 5 == 0:
neptune.send_metric("accuracy", accuracy)
if i % 100 == 0:
print(f"time elapsed 100 iter: {time.time()-start}")
start = time.time()
elapsed_time = time.time() - start_time
print("Epoch [" + str(epoch) + "]"
+ "[" + str(time.strftime("%H:%M:%S", time.gmtime(elapsed_time))) + "]"
+ " loss: " + str(loss_meter.avg)
+ "; acc: " + str(accuracy_meter.avg) + "%")
return loss_meter.avg, accuracy_meter.avg
def linear_evaluation(self, epoch, train_loader):
self.feature_extractor.eval()
self.classifier.train()
minibatch_iter = tqdm.tqdm(train_loader, desc=f"(Epoch {epoch}) Minibatch")
loss_meter = AverageMeter()
accuracy_meter = AverageMeter()
for data, target in minibatch_iter:
if torch.cuda.is_available(): data, target = data.cuda(), target.cuda()
self.optimizer_lineval.zero_grad()
output = self.forward(data, detach=True)
loss = self.ce(output, target)
loss_meter.update(loss.item(), len(target))
loss.backward()
self.optimizer_lineval.step()
pred = output.argmax(-1)
correct = pred.eq(target.view_as(pred)).cpu().sum()
accuracy = (100.0 * correct / float(len(target)))
accuracy_meter.update(accuracy.item(), len(target))
minibatch_iter.set_postfix({"loss": loss_meter.avg, "acc": accuracy_meter.avg})
return loss_meter.avg, accuracy_meter.avg
def finetune(self, epoch, train_loader):
self.feature_extractor.train()
self.classifier.train()
if(epoch==int(self.tot_epochs*0.5) or epoch==int(self.tot_epochs*0.75)):
for i_g, g in enumerate(self.optimizer_finetune.param_groups):
g["lr"] *= 0.1 #divide by 10
print("Group[" + str(i_g) + "] learning rate: " + str(g["lr"]))
minibatch_iter = tqdm.tqdm(train_loader, desc=f"(Epoch {epoch}) Minibatch")
loss_meter = AverageMeter()
accuracy_meter = AverageMeter()
for data, target in minibatch_iter:
if torch.cuda.is_available(): data, target = data.cuda(), target.cuda()
self.optimizer_finetune.zero_grad()
output = self.forward(data)
loss = self.ce(output, target)
loss_meter.update(loss.item(), len(target))
loss.backward()
self.optimizer_finetune.step()
pred = output.argmax(-1)
correct = pred.eq(target.view_as(pred)).cpu().sum()
accuracy = (100.0 * correct / float(len(target)))
accuracy_meter.update(accuracy.item(), len(target))
minibatch_iter.set_postfix({"loss": loss_meter.avg, "acc": accuracy_meter.avg})
return loss_meter.avg, accuracy_meter.avg
def test(self, test_loader):
self.feature_extractor.eval()
self.classifier.eval()
loss_meter = AverageMeter()
accuracy_meter = AverageMeter()
with torch.no_grad():
for data, target in test_loader:
if torch.cuda.is_available(): data, target = data.cuda(), target.cuda()
output = self.forward(data)
loss = self.ce(output, target)
loss_meter.update(loss.item(), len(target))
pred = output.argmax(-1)
correct = pred.eq(target.view_as(pred)).cpu().sum()
accuracy = (100.0 * correct / float(len(target)))
accuracy_meter.update(accuracy.item(), len(target))
return loss_meter.avg, accuracy_meter.avg
def return_embeddings(self, data_loader, portion=0.5):
self.feature_extractor.eval()
embeddings_list = []
target_list = []
with torch.no_grad():
for i, (data, target) in enumerate(data_loader):
if torch.cuda.is_available(): data, target = data.cuda(), target.cuda()
features = self.feature_extractor(data)
embeddings_list.append(features)
target_list.append(target)
if(i>=int(len(data_loader)*portion)): break
return torch.cat(embeddings_list, dim=0).cpu().detach().numpy(), torch.cat(target_list, dim=0).cpu().detach().numpy()
def save(self, file_path="./checkpoint.dat"):
state_dict = self.classifier.state_dict()
feature_extractor_state_dict = self.feature_extractor.state_dict()
optimizer_state_dict = self.optimizer.state_dict()
optimizer_lineval_state_dict = self.optimizer_lineval.state_dict()
optimizer_finetune_state_dict = self.optimizer_finetune.state_dict()
torch.save({"classifier": state_dict,
"backbone": feature_extractor_state_dict,
"optimizer": optimizer_state_dict,
"optimizer_lineval": optimizer_lineval_state_dict,
"optimizer_finetune": optimizer_finetune_state_dict},
file_path)
def load(self, file_path):
checkpoint = torch.load(file_path)
self.classifier.load_state_dict(checkpoint["classifier"])
self.feature_extractor.load_state_dict(checkpoint["backbone"])
self.optimizer.load_state_dict(checkpoint["optimizer"])
self.optimizer_lineval.load_state_dict(checkpoint["optimizer_lineval"])
self.optimizer_finetune.load_state_dict(checkpoint["optimizer_finetune"])
| [
"neptune.send_metric",
"neptune.create_experiment",
"neptune.init"
] | [((1075, 1111), 'torch.nn.Linear', 'nn.Linear', (['feature_size', 'num_classes'], {}), '(feature_size, num_classes)\n', (1084, 1111), False, 'from torch import nn\n'), ((1130, 1157), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (1155, 1157), False, 'import torch\n'), ((1713, 1757), 'neptune.init', 'neptune.init', (['f"""valeriobiscione/TestProject"""'], {}), "(f'valeriobiscione/TestProject')\n", (1725, 1757), False, 'import neptune\n'), ((1766, 1815), 'neptune.create_experiment', 'neptune.create_experiment', ([], {'tags': "['viewpoint-inv']"}), "(tags=['viewpoint-inv'])\n", (1791, 1815), False, 'import neptune\n'), ((2086, 2097), 'time.time', 'time.time', ([], {}), '()\n', (2095, 2097), False, 'import time\n'), ((2185, 2196), 'time.time', 'time.time', ([], {}), '()\n', (2194, 2196), False, 'import time\n'), ((2490, 2504), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (2502, 2504), False, 'from utils import AverageMeter\n'), ((2530, 2544), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (2542, 2544), False, 'from utils import AverageMeter\n'), ((3895, 3953), 'tqdm.tqdm', 'tqdm.tqdm', (['train_loader'], {'desc': 'f"""(Epoch {epoch}) Minibatch"""'}), "(train_loader, desc=f'(Epoch {epoch}) Minibatch')\n", (3904, 3953), False, 'import tqdm\n'), ((3975, 3989), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (3987, 3989), False, 'from utils import AverageMeter\n'), ((4015, 4029), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (4027, 4029), False, 'from utils import AverageMeter\n'), ((5221, 5279), 'tqdm.tqdm', 'tqdm.tqdm', (['train_loader'], {'desc': 'f"""(Epoch {epoch}) Minibatch"""'}), "(train_loader, desc=f'(Epoch {epoch}) Minibatch')\n", (5230, 5279), False, 'import tqdm\n'), ((5301, 5315), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (5313, 5315), False, 'from utils import AverageMeter\n'), ((5341, 5355), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (5353, 5355), False, 'from utils import AverageMeter\n'), ((6244, 6258), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (6256, 6258), False, 'from utils import AverageMeter\n'), ((6284, 6298), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (6296, 6298), False, 'from utils import AverageMeter\n'), ((7980, 8222), 'torch.save', 'torch.save', (["{'classifier': state_dict, 'backbone': feature_extractor_state_dict,\n 'optimizer': optimizer_state_dict, 'optimizer_lineval':\n optimizer_lineval_state_dict, 'optimizer_finetune':\n optimizer_finetune_state_dict}", 'file_path'], {}), "({'classifier': state_dict, 'backbone':\n feature_extractor_state_dict, 'optimizer': optimizer_state_dict,\n 'optimizer_lineval': optimizer_lineval_state_dict, 'optimizer_finetune':\n optimizer_finetune_state_dict}, file_path)\n", (7990, 8222), False, 'import torch\n'), ((8374, 8395), 'torch.load', 'torch.load', (['file_path'], {}), '(file_path)\n', (8384, 8395), False, 'import torch\n'), ((2559, 2572), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (2569, 2572), False, 'import torch\n'), ((2648, 2673), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2671, 2673), False, 'import torch\n'), ((3428, 3439), 'time.time', 'time.time', ([], {}), '()\n', (3437, 3439), False, 'import time\n'), ((4089, 4114), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4112, 4114), False, 'import torch\n'), ((5415, 5440), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5438, 5440), False, 'import torch\n'), ((6312, 6327), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6325, 6327), False, 'import torch\n'), ((7077, 7092), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7090, 7092), False, 'import torch\n'), ((3228, 3269), 'neptune.send_metric', 'neptune.send_metric', (['"""accuracy"""', 'accuracy'], {}), "('accuracy', accuracy)\n", (3247, 3269), False, 'import neptune\n'), ((3392, 3403), 'time.time', 'time.time', ([], {}), '()\n', (3401, 3403), False, 'import time\n'), ((6393, 6418), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6416, 6418), False, 'import torch\n'), ((7174, 7199), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7197, 7199), False, 'import torch\n'), ((3347, 3358), 'time.time', 'time.time', ([], {}), '()\n', (3356, 3358), False, 'import time\n'), ((7466, 7499), 'torch.cat', 'torch.cat', (['embeddings_list'], {'dim': '(0)'}), '(embeddings_list, dim=0)\n', (7475, 7499), False, 'import torch\n'), ((7524, 7553), 'torch.cat', 'torch.cat', (['target_list'], {'dim': '(0)'}), '(target_list, dim=0)\n', (7533, 7553), False, 'import torch\n'), ((3550, 3575), 'time.gmtime', 'time.gmtime', (['elapsed_time'], {}), '(elapsed_time)\n', (3561, 3575), False, 'import time\n')] |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, deepsense.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from future.builtins import input
from future.utils import raise_from
import base64
import json
import socketserver
import sys
from threading import Thread
from flask import Flask, request
from neptune.internal.cli.commands.command_names import CommandNames
from neptune.internal.cli.commands.framework import Command
from neptune.internal.cli.commands.neptune_command import NeptuneCommand
from neptune.internal.common import NeptuneException, NeptuneInternalException
from neptune.internal.common.api.api_service_factory import create_services
from neptune.internal.common.exceptions.keycloak_exceptions import KeycloakException
from neptune.internal.common.threads.neptune_future import NeptuneFuture
class NeptuneLogout(Command):
name = u'logout'
LOGGED_OUT_MESSAGE = u'You have been successfully logged out.'
def __init__(self, token_storage):
self.token_storage = token_storage
def run(self, *_):
if self.token_storage.contains_token():
self.token_storage.clear()
print(self.LOGGED_OUT_MESSAGE)
class NeptuneManualLogin(Command):
name = u'manual login'
def __init__(self, config, auth_code_url, keycloak_service, token_storage,
api_service, webbrowser):
self.config = config
self.auth_code_url = auth_code_url
self.keycloak_service = keycloak_service
self.token_storage = token_storage
self.api_service = api_service
self.webbrowser = webbrowser
def run(self, *_):
print(u'Please follow {} to obtain authentication token.\n'.format(self.auth_code_url))
self.webbrowser.open(self.auth_code_url)
user_input = input(u'Authentication token: ')
authorization_code, redirect_uri = extract_fields(decode_token(user_input))
offline_token = self.keycloak_service.request_offline_token(
authorization_code=authorization_code,
redirect_uri=redirect_uri)
self.token_storage.save(offline_token)
services = create_services(self.token_storage)
services.api_service.user_logged_to_cli()
print(u'Login successful.')
class NeptuneApiToken(NeptuneCommand):
name = u'api key'
def __init__(self, config, api_service):
super(NeptuneApiToken, self).__init__(CommandNames.API_TOKEN, config, api_service=api_service)
def run(self, *_):
print(self.api_service.get_api_token())
class NeptuneLocalLogin(NeptuneCommand):
name = u'local login'
def __init__(self, config, keycloak_api_service, offline_token_storage_service,
api_service, webbrowser):
super(NeptuneLocalLogin, self).__init__(CommandNames.LOGIN, config, api_service=None)
self._keycloak_api_service = keycloak_api_service
self._offline_token_storage_service = offline_token_storage_service
self._aborted = False
self._stock_server_bind = socketserver.TCPServer.server_bind
self.api_service = api_service
self.webbrowser = webbrowser
def run(self, args):
webserver_port_future, authorization_code_future = self._start_webserver(
self._keycloak_api_service.get_local_login_redirect_url()
)
webserver_port = webserver_port_future.wait()
url = self._keycloak_api_service.get_request_authorization_code_url(
redirect_uri=self._webserver_url(webserver_port))
# Open webbrowser in the seperate thread to avoid freeze in Firefox.
t = Thread(target=self.webbrowser.open, args=(url,))
t.daemon = True
t.start()
print("Waiting for authentication, press Ctrl+C to abort...")
authorization_code = self._wait_for_authorization_code(authorization_code_future)
try:
offline_token = self._request_offline_token(
authorization_code=authorization_code,
redirect_uri=self._webserver_url(webserver_port)
)
except KeycloakException as e:
print(e.message)
sys.exit(1)
self._offline_token_storage_service.save(offline_token)
services = create_services(self._offline_token_storage_service)
# Performs operations needed to be run for a new user on his first login.
# TODO Consider moving this API call to Keycloak.
services.api_service.login()
services.api_service.user_logged_to_cli()
print('Login successful.')
def abort(self):
self._aborted = True
def _start_webserver(self, login_redirect_address):
app = Flask(__name__)
webserver_port_future = self._intercept_server_port()
authorization_code_future = NeptuneFuture()
app.add_url_rule(
rule='/',
endpoint='_authorization_code_request_handler',
view_func=self._authorization_code_request_handler(authorization_code_future, login_redirect_address)
)
webserver_port = Thread(target=app.run, kwargs={"port": 0})
webserver_port.setDaemon(True)
webserver_port.start()
return webserver_port_future, authorization_code_future
def _wait_for_authorization_code(self, authorization_code_future):
while not self._aborted:
authorization_code = authorization_code_future.wait(timeout=1)
if authorization_code:
return authorization_code
def _request_offline_token(self, authorization_code, redirect_uri):
offline_token = self._keycloak_api_service.request_offline_token(
authorization_code=authorization_code,
redirect_uri=redirect_uri
)
return offline_token
def _authorization_code_request_handler(self, authorization_code_future, login_redirect_address):
def handler():
authorization_code_future.set(request.args['code'])
request.environ.get('werkzeug.server.shutdown')()
return '<script type="text/javascript">' \
'window.location.href = "{frontend_address}";' \
'</script>'.format(frontend_address=login_redirect_address)
return handler
def _intercept_server_port(self):
websocket_port_future = NeptuneFuture()
def _server_bind_wrapper(tcp_server):
return_value = self._stock_server_bind(tcp_server)
websocket_port_future.set(tcp_server.socket.getsockname()[1])
socketserver.TCPServer.server_bind = self._stock_server_bind
return return_value
socketserver.TCPServer.server_bind = _server_bind_wrapper
return websocket_port_future
def _webserver_url(self, webserver_port):
return 'http://localhost:{}'.format(webserver_port)
def decode_token(string):
try:
raw_message = base64.b64decode(string)
return json.loads(raw_message.decode('UTF-8'))
except:
raise NeptuneException('Invalid authentication token.')
def extract_fields(message):
try:
redirect_uri = message['redirect_uri']
authorization_code = message['code']
except KeyError as error:
raise_from(NeptuneInternalException('Invalid JSON received from frontend.'), error)
return authorization_code, redirect_uri
| [
"neptune.internal.common.api.api_service_factory.create_services",
"neptune.internal.common.threads.neptune_future.NeptuneFuture",
"neptune.internal.common.NeptuneInternalException",
"neptune.internal.common.NeptuneException"
] | [((2326, 2358), 'future.builtins.input', 'input', (['u"""Authentication token: """'], {}), "(u'Authentication token: ')\n", (2331, 2358), False, 'from future.builtins import input\n'), ((2672, 2707), 'neptune.internal.common.api.api_service_factory.create_services', 'create_services', (['self.token_storage'], {}), '(self.token_storage)\n', (2687, 2707), False, 'from neptune.internal.common.api.api_service_factory import create_services\n'), ((4153, 4201), 'threading.Thread', 'Thread', ([], {'target': 'self.webbrowser.open', 'args': '(url,)'}), '(target=self.webbrowser.open, args=(url,))\n', (4159, 4201), False, 'from threading import Thread\n'), ((4788, 4840), 'neptune.internal.common.api.api_service_factory.create_services', 'create_services', (['self._offline_token_storage_service'], {}), '(self._offline_token_storage_service)\n', (4803, 4840), False, 'from neptune.internal.common.api.api_service_factory import create_services\n'), ((5228, 5243), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (5233, 5243), False, 'from flask import Flask, request\n'), ((5343, 5358), 'neptune.internal.common.threads.neptune_future.NeptuneFuture', 'NeptuneFuture', ([], {}), '()\n', (5356, 5358), False, 'from neptune.internal.common.threads.neptune_future import NeptuneFuture\n'), ((5618, 5660), 'threading.Thread', 'Thread', ([], {'target': 'app.run', 'kwargs': "{'port': 0}"}), "(target=app.run, kwargs={'port': 0})\n", (5624, 5660), False, 'from threading import Thread\n'), ((6878, 6893), 'neptune.internal.common.threads.neptune_future.NeptuneFuture', 'NeptuneFuture', ([], {}), '()\n', (6891, 6893), False, 'from neptune.internal.common.threads.neptune_future import NeptuneFuture\n'), ((7454, 7478), 'base64.b64decode', 'base64.b64decode', (['string'], {}), '(string)\n', (7470, 7478), False, 'import base64\n'), ((7560, 7609), 'neptune.internal.common.NeptuneException', 'NeptuneException', (['"""Invalid authentication token."""'], {}), "('Invalid authentication token.')\n", (7576, 7609), False, 'from neptune.internal.common import NeptuneException, NeptuneInternalException\n'), ((4691, 4702), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4699, 4702), False, 'import sys\n'), ((6530, 6577), 'flask.request.environ.get', 'request.environ.get', (['"""werkzeug.server.shutdown"""'], {}), "('werkzeug.server.shutdown')\n", (6549, 6577), False, 'from flask import Flask, request\n'), ((7791, 7855), 'neptune.internal.common.NeptuneInternalException', 'NeptuneInternalException', (['"""Invalid JSON received from frontend."""'], {}), "('Invalid JSON received from frontend.')\n", (7815, 7855), False, 'from neptune.internal.common import NeptuneException, NeptuneInternalException\n')] |
import neptune
# The init() function called this way assumes that
# NEPTUNE_API_TOKEN environment variable is defined.
neptune.init('vanducng/sandbox')
neptune.create_experiment(name='minimal_example')
# log some metrics
for i in range(100):
neptune.log_metric('loss', 0.95**i)
neptune.log_metric('AUC', 0.96) | [
"neptune.create_experiment",
"neptune.init",
"neptune.log_metric"
] | [((121, 153), 'neptune.init', 'neptune.init', (['"""vanducng/sandbox"""'], {}), "('vanducng/sandbox')\n", (133, 153), False, 'import neptune\n'), ((154, 203), 'neptune.create_experiment', 'neptune.create_experiment', ([], {'name': '"""minimal_example"""'}), "(name='minimal_example')\n", (179, 203), False, 'import neptune\n'), ((287, 318), 'neptune.log_metric', 'neptune.log_metric', (['"""AUC"""', '(0.96)'], {}), "('AUC', 0.96)\n", (305, 318), False, 'import neptune\n'), ((250, 287), 'neptune.log_metric', 'neptune.log_metric', (['"""loss"""', '(0.95 ** i)'], {}), "('loss', 0.95 ** i)\n", (268, 287), False, 'import neptune\n')] |
import sys
from dataclasses import asdict
from pathlib import Path
from pprint import pprint
from typing import Optional
import click
import matplotlib.pyplot as plt
import neptune
import torch
import torchaudio
from click import Context
from torch.nn.functional import mse_loss
from tqdm import trange, tqdm
from reformer_tts.config import Config
from reformer_tts.dataset.convert import PhonemeSequenceCreator
from reformer_tts.dataset.download import download_speech_videos_and_transcripts
from reformer_tts.dataset.preprocess import preprocess_data
from reformer_tts.dataset.visualize import plot_spectrogram, plot_attention_matrix
from reformer_tts.squeeze_wave.modules import SqueezeWave
from reformer_tts.training.train import train_tts as train_tts_function
from reformer_tts.training.train import train_vocoder as train_vocoder_function
from reformer_tts.training.wrappers import LitSqueezeWave, LitReformerTTS
@click.group()
@click.option("-c", "--config", envvar="REFORMER_TTS_CONFIG", default=None)
@click.pass_context
def cli(ctx: Context, config):
ctx.ensure_object(dict)
if config is None:
ctx.obj["CONFIG"] = Config() # use default values
else:
ctx.obj["CONFIG"] = Config.from_yaml_file(config)
@cli.command()
@click.option("-r", "--resume", type=str, default=None, help="Path to checkpoint to resume")
@click.pass_context
def train_tts(ctx: Context, resume: Optional[str]):
config = ctx.obj["CONFIG"]
if resume is not None:
resume = Path(resume)
train_tts_function(config, resume)
@cli.command()
@click.option("-r", "--resume", type=str, default=None, help="Path to checkpoint to resume")
@click.pass_context
def train_vocoder(ctx: Context, resume: str):
config = ctx.obj["CONFIG"]
if resume is not None:
resume = Path(resume)
train_vocoder_function(config, resume)
@cli.command()
@click.pass_context
def download(ctx: Context):
config = ctx.obj["CONFIG"]
download_speech_videos_and_transcripts(
url=config.dataset.source_url,
transcript_directory=config.dataset.structure.transcript_directory,
video_directory=config.dataset.structure.video_directory
)
@cli.command()
@click.pass_context
def preprocess(ctx: Context):
config = ctx.obj["CONFIG"]
preprocess_data(
trump_speaker_names=config.dataset.trump_speaker_names,
transcript_directory=config.transcript_directory,
merged_transcript_csv_path=config.merged_transcript_csv_path,
audio_directory=config.audio_directory,
video_directory=config.video_directory,
spectrogram_dir=config.mel_directory,
nltk_data_directory=config.nltk_data_directory,
audio_format=config.dataset.audio_format,
mel_format=config.dataset.mel_format,
use_tacotron2_spectrograms=config.dataset.use_tacotron2_spectrograms
)
@cli.command()
@click.option("-r", "--reformer-checkpoint", type=str, required=True, help="Path to reformer checkpoint")
@click.option("-s", "--squeeze-wave-checkpoint", type=str, required=True, help="Path to squeezewave checkpoint")
@click.option("-o", "--output-dir", type=str, required=True, help="Path where outputs will be saved")
@click.option("-m", "--max-samples", type=int, default=None, help="Maximum number of total generated samples")
@click.pass_context
def predict_samples(
ctx: Context,
reformer_checkpoint: str,
squeeze_wave_checkpoint: str,
output_dir: str,
max_samples: Optional[int]
):
"""
Generates predictions on the test_set portion of text-to-spectrogram dataset.
Provided config must be compatible with both reformer and squeezewave (keys and
values in config structure must be the same as the ones used during their training)
"""
config = ctx.obj["CONFIG"]
output_dir = Path(output_dir)
output_dir.mkdir(exist_ok=True, parents=True)
if torch.cuda.is_available():
torch.set_default_tensor_type(torch.cuda.FloatTensor)
device = torch.device('cuda')
on_gpu = True
else:
device = torch.device('cpu')
on_gpu = False
reformer = LitReformerTTS.load_from_checkpoint(reformer_checkpoint, config=config)
reformer = reformer.eval()
squeeze_wave = LitSqueezeWave.load_from_checkpoint(
squeeze_wave_checkpoint,
config=config,
on_gpu=on_gpu
)
squeeze_wave = SqueezeWave.remove_norms(squeeze_wave.model)
squeeze_wave = squeeze_wave.eval()
results = list()
reformer.prepare_data()
if len(reformer.test_set) == 0:
dataset = reformer.val_set
else:
dataset = reformer.test_set
if max_samples is None:
max_samples = len(dataset)
with torch.no_grad():
# todo: use ReformerTTS.infer
for test_sample_idx in trange(max_samples, desc="predicting"):
sample = dataset[test_sample_idx]
phonemes_in = sample['phonemes'].unsqueeze(0).to(device=device)
spectrogram_in = sample['spectrogram'].unsqueeze(0).to(device=device)
# todo: we shouldn't pass target spectrogram into reformer:
spectrogram_out, stop_out = reformer(phonemes_in, spectrogram_in[:, :-1, :])
mse = mse_loss(spectrogram_out, spectrogram_in[:, 1:, :])
cutoff: int = stop_out.argmax()
spectrogram_out: torch.Tensor = spectrogram_out.transpose(1, 2)
spectrogram_out = spectrogram_out[:, :, :cutoff]
audio_out = squeeze_wave.infer(spectrogram_out)
results.append({
"spectrogram": spectrogram_out.cpu(),
"spectrogram_mse": float(mse.cpu().numpy()),
"audio": audio_out.cpu(),
"idx": sample["idx"],
})
best_mse = min(results, key=lambda r: r["spectrogram_mse"])["spectrogram_mse"]
worst_mse = max(results, key=lambda r: r["spectrogram_mse"])["spectrogram_mse"]
mean_mse = sum(r["spectrogram_mse"] for r in results) / float(len(results))
print(f"{best_mse=:.4f}, {worst_mse=:.4f}, {mean_mse=:.4f}")
for result in tqdm(results, desc="saving"):
filename = f"pred-{result['idx']}-idx_{result['spectrogram_mse']:.4f}-mse"
spectrogram_path = output_dir / f"{filename}.png"
plot_spectrogram(result["spectrogram"], scale=False)
plt.savefig(str(spectrogram_path))
plt.close()
audio_path = output_dir / f"{filename}.wav"
torchaudio.save(
str(audio_path),
result["audio"],
config.dataset.audio_format.sampling_rate
)
print(f"Results saved to {output_dir.resolve()}")
@cli.command()
@click.option("-r", "--reformer-checkpoint", type=str, required=True, help="Path to reformer checkpoint")
@click.option("-s", "--squeeze-wave-checkpoint", type=str, required=True, help="Path to squeezewave checkpoint")
@click.option("-o", "--output-dir", type=str, required=True, help="Path where outputs will be saved")
@click.option("-S", "--strategy", type=str, default="concat", help="Strategy for TTS inference ('concat' or 'replace')")
@click.pass_context
def predict_from_text(
ctx: Context,
reformer_checkpoint: str,
squeeze_wave_checkpoint: str,
output_dir: str,
strategy: str,
):
config: Config = ctx.obj["CONFIG"]
# todo: refactor - most of this is the same as in predict_samples
output_dir = Path(output_dir)
output_dir.mkdir(exist_ok=True, parents=True)
if torch.cuda.is_available():
torch.set_default_tensor_type(torch.cuda.FloatTensor)
on_gpu = True
device = torch.device('cuda')
else:
on_gpu = False
device = torch.device('cpu')
reformer = LitReformerTTS.load_from_checkpoint(reformer_checkpoint, config=config)
reformer = reformer.eval()
squeeze_wave = LitSqueezeWave.load_from_checkpoint(
squeeze_wave_checkpoint,
config=config,
on_gpu=on_gpu
)
squeeze_wave = SqueezeWave.remove_norms(squeeze_wave.model)
squeeze_wave = squeeze_wave.eval()
phonemizer = PhonemeSequenceCreator(config.nltk_data_directory)
phoneme_encoder = reformer.get_phoneme_encoder()
print("Type a sentence and press enter to convert it to speech:")
with torch.no_grad():
for idx, line in enumerate(sys.stdin):
phonemes = phonemizer.phonemize(line)
print(f"Predicting from {phonemes=}...")
phonemes = " ".join(phonemes)
phonemes = phoneme_encoder(phonemes).unsqueeze(0).to(device=device)
stop_at_stop_token = config.experiment.tts_training.stop_loss_weight != 0.
spectrogram, stop = reformer.model.infer(
phonemes,
combine_strategy=strategy,
verbose=True,
stop_at_stop_token=stop_at_stop_token,
)
spectrogram = spectrogram[:, :, :stop.item()]
audio_out = squeeze_wave.infer(spectrogram)
spectrogram_path = output_dir / f"pred-stdin-{idx}.png"
plot_spectrogram(spectrogram.cpu(), scale=False)
plt.savefig(str(spectrogram_path))
plt.close()
audio_path = output_dir / f"pred-{strategy}-stdin-{idx}.wav"
torchaudio.save(
str(audio_path),
audio_out.cpu(),
config.dataset.audio_format.sampling_rate
)
print(f"Output saved to {audio_path.resolve()}")
@cli.command()
@click.option("-s", "--squeeze-wave-checkpoint", type=str, required=True, help="Path to squeezewave checkpoint")
@click.option("-o", "--output-dir", type=str, required=True, help="Path where outputs will be saved")
@click.pass_context
def predict_from_mel(ctx: Context, squeeze_wave_checkpoint: str, output_dir: str):
config: Config = ctx.obj["CONFIG"]
output_dir = Path(output_dir)
output_dir.mkdir(exist_ok=True, parents=True)
on_gpu = torch.cuda.is_available()
squeeze_wave = LitSqueezeWave.load_from_checkpoint(
squeeze_wave_checkpoint, config=config, on_gpu=False
)
squeeze_wave = SqueezeWave.remove_norms(squeeze_wave.model)
squeeze_wave = squeeze_wave.eval()
trump_spec = torch.load('data/preprocessed-tacotron2/mel/speech00_0000.pt')
lj_spec = torch.load('data/lj-speech-tacotron2/mel/LJ001-0001.pt')
prefix = str(Path(squeeze_wave_checkpoint).name)
for spec, suffix in zip([trump_spec, lj_spec], ["trump", "lj"]):
audio = squeeze_wave.infer(spec)
audio_path = output_dir / f"{prefix}-{suffix}.wav"
torchaudio.save(
str(audio_path), audio.cpu(), sample_rate=config.dataset.audio_format.sampling_rate
)
print(f"Results saved to {output_dir}")
@cli.command()
@click.option("-r", "--reformer-checkpoint", type=str, required=True, help="Path to reformer checkpoint")
@click.option("-o", "--output-dir", type=str, required=True, help="Path where outputs will be saved")
@click.pass_context
def visualize_attention(ctx, reformer_checkpoint, output_dir):
config: Config = ctx.obj["CONFIG"]
output_dir = Path(output_dir)
output_dir.mkdir(exist_ok=True, parents=True)
if torch.cuda.is_available():
torch.set_default_tensor_type(torch.cuda.FloatTensor)
on_gpu = True
device = torch.device('cuda')
else:
on_gpu = False
device = torch.device('cpu')
reformer = LitReformerTTS.load_from_checkpoint(
reformer_checkpoint,
config=config,
on_gpu=on_gpu,
)
reformer = reformer.eval()
reformer.prepare_data()
batch = next(iter(reformer.val_dataloader()))
batch = {key: batch[key].to(device=device) for key in batch}
with torch.no_grad():
_, _, _, _, _, attention_matrices = reformer(
batch['phonemes'],
batch['spectrogram'],
batch['stop_tokens'],
batch["loss_mask"],
use_transform=False,
)
attention_matrices = reformer.trim_attention_matrices(
attention_matrices,
batch["stop_tokens"],
)
attention_matrices = [
[matrix.cpu() for matrix in matrices] for matrices in attention_matrices
]
for i, matrices in enumerate(attention_matrices):
for j, matrix in enumerate(matrices):
plot_attention_matrix(matrix)
plt.savefig(output_dir / f"{i}_{j}.png")
@cli.command()
@click.pass_context
def show_config(ctx: Context):
config = ctx.obj["CONFIG"]
pprint(asdict(config))
@cli.command()
@click.option("-o", "--output", type=str, required=True, help="Path where config will be saved")
@click.pass_context
def save_config(ctx: Context, output):
""" Save all config variables (defaults + overrides from config file) """
config = ctx.obj["CONFIG"]
config.to_yaml_file(output)
print(f"Config saved to {output}")
@cli.command()
@click.argument('idx', type=str)
def remove_image_logs(idx):
""" Remove all image logs from experiment"""
proj = neptune.init("reformer-tts/reformer-tts")
exp = proj.get_experiments(idx)[0]
logs = exp.get_channels()
for name, channel in logs.items():
if channel.channelType == 'image':
exp.reset_log(name)
exp.set_property('cleaned_image_logs', True)
if __name__ == "__main__":
cli(obj={})
| [
"neptune.init"
] | [((925, 938), 'click.group', 'click.group', ([], {}), '()\n', (936, 938), False, 'import click\n'), ((940, 1014), 'click.option', 'click.option', (['"""-c"""', '"""--config"""'], {'envvar': '"""REFORMER_TTS_CONFIG"""', 'default': 'None'}), "('-c', '--config', envvar='REFORMER_TTS_CONFIG', default=None)\n", (952, 1014), False, 'import click\n'), ((1262, 1358), 'click.option', 'click.option', (['"""-r"""', '"""--resume"""'], {'type': 'str', 'default': 'None', 'help': '"""Path to checkpoint to resume"""'}), "('-r', '--resume', type=str, default=None, help=\n 'Path to checkpoint to resume')\n", (1274, 1358), False, 'import click\n'), ((1571, 1667), 'click.option', 'click.option', (['"""-r"""', '"""--resume"""'], {'type': 'str', 'default': 'None', 'help': '"""Path to checkpoint to resume"""'}), "('-r', '--resume', type=str, default=None, help=\n 'Path to checkpoint to resume')\n", (1583, 1667), False, 'import click\n'), ((2892, 3001), 'click.option', 'click.option', (['"""-r"""', '"""--reformer-checkpoint"""'], {'type': 'str', 'required': '(True)', 'help': '"""Path to reformer checkpoint"""'}), "('-r', '--reformer-checkpoint', type=str, required=True, help=\n 'Path to reformer checkpoint')\n", (2904, 3001), False, 'import click\n'), ((2998, 3113), 'click.option', 'click.option', (['"""-s"""', '"""--squeeze-wave-checkpoint"""'], {'type': 'str', 'required': '(True)', 'help': '"""Path to squeezewave checkpoint"""'}), "('-s', '--squeeze-wave-checkpoint', type=str, required=True,\n help='Path to squeezewave checkpoint')\n", (3010, 3113), False, 'import click\n'), ((3111, 3216), 'click.option', 'click.option', (['"""-o"""', '"""--output-dir"""'], {'type': 'str', 'required': '(True)', 'help': '"""Path where outputs will be saved"""'}), "('-o', '--output-dir', type=str, required=True, help=\n 'Path where outputs will be saved')\n", (3123, 3216), False, 'import click\n'), ((3213, 3327), 'click.option', 'click.option', (['"""-m"""', '"""--max-samples"""'], {'type': 'int', 'default': 'None', 'help': '"""Maximum number of total generated samples"""'}), "('-m', '--max-samples', type=int, default=None, help=\n 'Maximum number of total generated samples')\n", (3225, 3327), False, 'import click\n'), ((6751, 6860), 'click.option', 'click.option', (['"""-r"""', '"""--reformer-checkpoint"""'], {'type': 'str', 'required': '(True)', 'help': '"""Path to reformer checkpoint"""'}), "('-r', '--reformer-checkpoint', type=str, required=True, help=\n 'Path to reformer checkpoint')\n", (6763, 6860), False, 'import click\n'), ((6857, 6972), 'click.option', 'click.option', (['"""-s"""', '"""--squeeze-wave-checkpoint"""'], {'type': 'str', 'required': '(True)', 'help': '"""Path to squeezewave checkpoint"""'}), "('-s', '--squeeze-wave-checkpoint', type=str, required=True,\n help='Path to squeezewave checkpoint')\n", (6869, 6972), False, 'import click\n'), ((6970, 7075), 'click.option', 'click.option', (['"""-o"""', '"""--output-dir"""'], {'type': 'str', 'required': '(True)', 'help': '"""Path where outputs will be saved"""'}), "('-o', '--output-dir', type=str, required=True, help=\n 'Path where outputs will be saved')\n", (6982, 7075), False, 'import click\n'), ((7072, 7196), 'click.option', 'click.option', (['"""-S"""', '"""--strategy"""'], {'type': 'str', 'default': '"""concat"""', 'help': '"""Strategy for TTS inference (\'concat\' or \'replace\')"""'}), '(\'-S\', \'--strategy\', type=str, default=\'concat\', help=\n "Strategy for TTS inference (\'concat\' or \'replace\')")\n', (7084, 7196), False, 'import click\n'), ((9600, 9715), 'click.option', 'click.option', (['"""-s"""', '"""--squeeze-wave-checkpoint"""'], {'type': 'str', 'required': '(True)', 'help': '"""Path to squeezewave checkpoint"""'}), "('-s', '--squeeze-wave-checkpoint', type=str, required=True,\n help='Path to squeezewave checkpoint')\n", (9612, 9715), False, 'import click\n'), ((9713, 9818), 'click.option', 'click.option', (['"""-o"""', '"""--output-dir"""'], {'type': 'str', 'required': '(True)', 'help': '"""Path where outputs will be saved"""'}), "('-o', '--output-dir', type=str, required=True, help=\n 'Path where outputs will be saved')\n", (9725, 9818), False, 'import click\n'), ((10875, 10984), 'click.option', 'click.option', (['"""-r"""', '"""--reformer-checkpoint"""'], {'type': 'str', 'required': '(True)', 'help': '"""Path to reformer checkpoint"""'}), "('-r', '--reformer-checkpoint', type=str, required=True, help=\n 'Path to reformer checkpoint')\n", (10887, 10984), False, 'import click\n'), ((10981, 11086), 'click.option', 'click.option', (['"""-o"""', '"""--output-dir"""'], {'type': 'str', 'required': '(True)', 'help': '"""Path where outputs will be saved"""'}), "('-o', '--output-dir', type=str, required=True, help=\n 'Path where outputs will be saved')\n", (10993, 11086), False, 'import click\n'), ((12680, 12780), 'click.option', 'click.option', (['"""-o"""', '"""--output"""'], {'type': 'str', 'required': '(True)', 'help': '"""Path where config will be saved"""'}), "('-o', '--output', type=str, required=True, help=\n 'Path where config will be saved')\n", (12692, 12780), False, 'import click\n'), ((13033, 13064), 'click.argument', 'click.argument', (['"""idx"""'], {'type': 'str'}), "('idx', type=str)\n", (13047, 13064), False, 'import click\n'), ((1518, 1552), 'reformer_tts.training.train.train_tts', 'train_tts_function', (['config', 'resume'], {}), '(config, resume)\n', (1536, 1552), True, 'from reformer_tts.training.train import train_tts as train_tts_function\n'), ((1821, 1859), 'reformer_tts.training.train.train_vocoder', 'train_vocoder_function', (['config', 'resume'], {}), '(config, resume)\n', (1843, 1859), True, 'from reformer_tts.training.train import train_vocoder as train_vocoder_function\n'), ((1960, 2163), 'reformer_tts.dataset.download.download_speech_videos_and_transcripts', 'download_speech_videos_and_transcripts', ([], {'url': 'config.dataset.source_url', 'transcript_directory': 'config.dataset.structure.transcript_directory', 'video_directory': 'config.dataset.structure.video_directory'}), '(url=config.dataset.source_url,\n transcript_directory=config.dataset.structure.transcript_directory,\n video_directory=config.dataset.structure.video_directory)\n', (1998, 2163), False, 'from reformer_tts.dataset.download import download_speech_videos_and_transcripts\n'), ((2288, 2817), 'reformer_tts.dataset.preprocess.preprocess_data', 'preprocess_data', ([], {'trump_speaker_names': 'config.dataset.trump_speaker_names', 'transcript_directory': 'config.transcript_directory', 'merged_transcript_csv_path': 'config.merged_transcript_csv_path', 'audio_directory': 'config.audio_directory', 'video_directory': 'config.video_directory', 'spectrogram_dir': 'config.mel_directory', 'nltk_data_directory': 'config.nltk_data_directory', 'audio_format': 'config.dataset.audio_format', 'mel_format': 'config.dataset.mel_format', 'use_tacotron2_spectrograms': 'config.dataset.use_tacotron2_spectrograms'}), '(trump_speaker_names=config.dataset.trump_speaker_names,\n transcript_directory=config.transcript_directory,\n merged_transcript_csv_path=config.merged_transcript_csv_path,\n audio_directory=config.audio_directory, video_directory=config.\n video_directory, spectrogram_dir=config.mel_directory,\n nltk_data_directory=config.nltk_data_directory, audio_format=config.\n dataset.audio_format, mel_format=config.dataset.mel_format,\n use_tacotron2_spectrograms=config.dataset.use_tacotron2_spectrograms)\n', (2303, 2817), False, 'from reformer_tts.dataset.preprocess import preprocess_data\n'), ((3841, 3857), 'pathlib.Path', 'Path', (['output_dir'], {}), '(output_dir)\n', (3845, 3857), False, 'from pathlib import Path\n'), ((3916, 3941), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3939, 3941), False, 'import torch\n'), ((4151, 4222), 'reformer_tts.training.wrappers.LitReformerTTS.load_from_checkpoint', 'LitReformerTTS.load_from_checkpoint', (['reformer_checkpoint'], {'config': 'config'}), '(reformer_checkpoint, config=config)\n', (4186, 4222), False, 'from reformer_tts.training.wrappers import LitSqueezeWave, LitReformerTTS\n'), ((4273, 4367), 'reformer_tts.training.wrappers.LitSqueezeWave.load_from_checkpoint', 'LitSqueezeWave.load_from_checkpoint', (['squeeze_wave_checkpoint'], {'config': 'config', 'on_gpu': 'on_gpu'}), '(squeeze_wave_checkpoint, config=config,\n on_gpu=on_gpu)\n', (4308, 4367), False, 'from reformer_tts.training.wrappers import LitSqueezeWave, LitReformerTTS\n'), ((4413, 4457), 'reformer_tts.squeeze_wave.modules.SqueezeWave.remove_norms', 'SqueezeWave.remove_norms', (['squeeze_wave.model'], {}), '(squeeze_wave.model)\n', (4437, 4457), False, 'from reformer_tts.squeeze_wave.modules import SqueezeWave\n'), ((7507, 7523), 'pathlib.Path', 'Path', (['output_dir'], {}), '(output_dir)\n', (7511, 7523), False, 'from pathlib import Path\n'), ((7582, 7607), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7605, 7607), False, 'import torch\n'), ((7817, 7888), 'reformer_tts.training.wrappers.LitReformerTTS.load_from_checkpoint', 'LitReformerTTS.load_from_checkpoint', (['reformer_checkpoint'], {'config': 'config'}), '(reformer_checkpoint, config=config)\n', (7852, 7888), False, 'from reformer_tts.training.wrappers import LitSqueezeWave, LitReformerTTS\n'), ((7940, 8034), 'reformer_tts.training.wrappers.LitSqueezeWave.load_from_checkpoint', 'LitSqueezeWave.load_from_checkpoint', (['squeeze_wave_checkpoint'], {'config': 'config', 'on_gpu': 'on_gpu'}), '(squeeze_wave_checkpoint, config=config,\n on_gpu=on_gpu)\n', (7975, 8034), False, 'from reformer_tts.training.wrappers import LitSqueezeWave, LitReformerTTS\n'), ((8080, 8124), 'reformer_tts.squeeze_wave.modules.SqueezeWave.remove_norms', 'SqueezeWave.remove_norms', (['squeeze_wave.model'], {}), '(squeeze_wave.model)\n', (8104, 8124), False, 'from reformer_tts.squeeze_wave.modules import SqueezeWave\n'), ((8182, 8232), 'reformer_tts.dataset.convert.PhonemeSequenceCreator', 'PhonemeSequenceCreator', (['config.nltk_data_directory'], {}), '(config.nltk_data_directory)\n', (8204, 8232), False, 'from reformer_tts.dataset.convert import PhonemeSequenceCreator\n'), ((9974, 9990), 'pathlib.Path', 'Path', (['output_dir'], {}), '(output_dir)\n', (9978, 9990), False, 'from pathlib import Path\n'), ((10055, 10080), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10078, 10080), False, 'import torch\n'), ((10100, 10193), 'reformer_tts.training.wrappers.LitSqueezeWave.load_from_checkpoint', 'LitSqueezeWave.load_from_checkpoint', (['squeeze_wave_checkpoint'], {'config': 'config', 'on_gpu': '(False)'}), '(squeeze_wave_checkpoint, config=config,\n on_gpu=False)\n', (10135, 10193), False, 'from reformer_tts.training.wrappers import LitSqueezeWave, LitReformerTTS\n'), ((10223, 10267), 'reformer_tts.squeeze_wave.modules.SqueezeWave.remove_norms', 'SqueezeWave.remove_norms', (['squeeze_wave.model'], {}), '(squeeze_wave.model)\n', (10247, 10267), False, 'from reformer_tts.squeeze_wave.modules import SqueezeWave\n'), ((10325, 10387), 'torch.load', 'torch.load', (['"""data/preprocessed-tacotron2/mel/speech00_0000.pt"""'], {}), "('data/preprocessed-tacotron2/mel/speech00_0000.pt')\n", (10335, 10387), False, 'import torch\n'), ((10402, 10458), 'torch.load', 'torch.load', (['"""data/lj-speech-tacotron2/mel/LJ001-0001.pt"""'], {}), "('data/lj-speech-tacotron2/mel/LJ001-0001.pt')\n", (10412, 10458), False, 'import torch\n'), ((11221, 11237), 'pathlib.Path', 'Path', (['output_dir'], {}), '(output_dir)\n', (11225, 11237), False, 'from pathlib import Path\n'), ((11295, 11320), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11318, 11320), False, 'import torch\n'), ((11529, 11619), 'reformer_tts.training.wrappers.LitReformerTTS.load_from_checkpoint', 'LitReformerTTS.load_from_checkpoint', (['reformer_checkpoint'], {'config': 'config', 'on_gpu': 'on_gpu'}), '(reformer_checkpoint, config=config,\n on_gpu=on_gpu)\n', (11564, 11619), False, 'from reformer_tts.training.wrappers import LitSqueezeWave, LitReformerTTS\n'), ((13153, 13194), 'neptune.init', 'neptune.init', (['"""reformer-tts/reformer-tts"""'], {}), "('reformer-tts/reformer-tts')\n", (13165, 13194), False, 'import neptune\n'), ((1145, 1153), 'reformer_tts.config.Config', 'Config', ([], {}), '()\n', (1151, 1153), False, 'from reformer_tts.config import Config\n'), ((1214, 1243), 'reformer_tts.config.Config.from_yaml_file', 'Config.from_yaml_file', (['config'], {}), '(config)\n', (1235, 1243), False, 'from reformer_tts.config import Config\n'), ((1501, 1513), 'pathlib.Path', 'Path', (['resume'], {}), '(resume)\n', (1505, 1513), False, 'from pathlib import Path\n'), ((1804, 1816), 'pathlib.Path', 'Path', (['resume'], {}), '(resume)\n', (1808, 1816), False, 'from pathlib import Path\n'), ((3951, 4004), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.cuda.FloatTensor'], {}), '(torch.cuda.FloatTensor)\n', (3980, 4004), False, 'import torch\n'), ((4022, 4042), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (4034, 4042), False, 'import torch\n'), ((4092, 4111), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (4104, 4111), False, 'import torch\n'), ((4739, 4754), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4752, 4754), False, 'import torch\n'), ((4825, 4863), 'tqdm.trange', 'trange', (['max_samples'], {'desc': '"""predicting"""'}), "(max_samples, desc='predicting')\n", (4831, 4863), False, 'from tqdm import trange, tqdm\n'), ((6135, 6163), 'tqdm.tqdm', 'tqdm', (['results'], {'desc': '"""saving"""'}), "(results, desc='saving')\n", (6139, 6163), False, 'from tqdm import trange, tqdm\n'), ((7617, 7670), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.cuda.FloatTensor'], {}), '(torch.cuda.FloatTensor)\n', (7646, 7670), False, 'import torch\n'), ((7710, 7730), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (7722, 7730), False, 'import torch\n'), ((7781, 7800), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (7793, 7800), False, 'import torch\n'), ((8366, 8381), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8379, 8381), False, 'import torch\n'), ((11330, 11383), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.cuda.FloatTensor'], {}), '(torch.cuda.FloatTensor)\n', (11359, 11383), False, 'import torch\n'), ((11423, 11443), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (11435, 11443), False, 'import torch\n'), ((11494, 11513), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (11506, 11513), False, 'import torch\n'), ((11830, 11845), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11843, 11845), False, 'import torch\n'), ((12646, 12660), 'dataclasses.asdict', 'asdict', (['config'], {}), '(config)\n', (12652, 12660), False, 'from dataclasses import asdict\n'), ((5250, 5301), 'torch.nn.functional.mse_loss', 'mse_loss', (['spectrogram_out', 'spectrogram_in[:, 1:, :]'], {}), '(spectrogram_out, spectrogram_in[:, 1:, :])\n', (5258, 5301), False, 'from torch.nn.functional import mse_loss\n'), ((6327, 6379), 'reformer_tts.dataset.visualize.plot_spectrogram', 'plot_spectrogram', (["result['spectrogram']"], {'scale': '(False)'}), "(result['spectrogram'], scale=False)\n", (6343, 6379), False, 'from reformer_tts.dataset.visualize import plot_spectrogram, plot_attention_matrix\n'), ((6439, 6450), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6448, 6450), True, 'import matplotlib.pyplot as plt\n'), ((9268, 9279), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9277, 9279), True, 'import matplotlib.pyplot as plt\n'), ((10477, 10506), 'pathlib.Path', 'Path', (['squeeze_wave_checkpoint'], {}), '(squeeze_wave_checkpoint)\n', (10481, 10506), False, 'from pathlib import Path\n'), ((12453, 12482), 'reformer_tts.dataset.visualize.plot_attention_matrix', 'plot_attention_matrix', (['matrix'], {}), '(matrix)\n', (12474, 12482), False, 'from reformer_tts.dataset.visualize import plot_spectrogram, plot_attention_matrix\n'), ((12495, 12535), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output_dir / f'{i}_{j}.png')"], {}), "(output_dir / f'{i}_{j}.png')\n", (12506, 12535), True, 'import matplotlib.pyplot as plt\n')] |
import itertools
import json
import logging
import os
import tempfile
from typing import List, Any, Dict, Tuple
import gin
import numpy as np
import pandas as pd
from experiments.src.gin import get_default_experiment_name, parse_gin_str
from experiments.src.training.training_utils import get_metric_cls
GridResultDict = Dict[frozenset, Dict[int, Dict[str, float]]]
def get_grid_results_dict() -> GridResultDict:
grid_results_dict = create_grid_results_dict()
study_name = get_default_experiment_name()
if gin.query_parameter('train.use_neptune'):
fetch_grid_results_dict_from_neptune(grid_results_dict, study_name)
else:
fetch_grid_results_dict_from_local(grid_results_dict, study_name)
return grid_results_dict
def create_grid_results_dict() -> GridResultDict:
seeds = get_params_dict()['data.split_seed']
hps_dict = get_hps_dict()
hps_product_list = _get_params_product_list(hps_dict)
return {frozenset(hps.items()): {seed: None for seed in seeds} for hps in hps_product_list}
# neptune
def fetch_grid_results_dict_from_neptune(results_dict: GridResultDict, study_name: str) -> None:
import neptune
user_name = gin.query_parameter('neptune.user_name')
project_name = gin.query_parameter('neptune.project_name')
project = neptune.init(f'{user_name}/{project_name}')
df = download_dataframe_from_neptune(project, study_name)
hps_names = list(get_hps_dict().keys())
for idx, row in df.iterrows():
hps = dict(row[hps_names])
seed = int(row['data.split_seed'])
results = download_results_from_neptune(project, row['id'], 'results.json')
results_dict[frozenset(hps.items())][seed] = results
def download_dataframe_from_neptune(neptune_project, name: str) -> pd.DataFrame:
df = neptune_project.get_leaderboard(state='succeeded', tag=name)
params_dict = get_params_dict()
df.rename(columns={f'parameter_{p}': p for p in params_dict.keys()}, inplace=True)
df = df[['id', 'name'] + list(params_dict.keys())]
for param_name, param_value in params_dict.items():
dtype = type(param_value[0])
df[param_name] = df[param_name].astype(dtype) if dtype != int else df[param_name].astype(float).astype(int)
return df
def download_results_from_neptune(neptune_project, id: str, artifact_name: str) -> Dict[str, float]:
try:
with tempfile.TemporaryDirectory() as tmp:
experiment = neptune_project.get_experiments(id=id)[0]
experiment.download_artifact(artifact_name, tmp)
return load_results_from_local(os.path.join(tmp, artifact_name))
except Exception as e:
raise RuntimeError(f'Downloading artifacts failed on {id}. Exception: {e}')
# local
def fetch_grid_results_dict_from_local(grid_results_dict: GridResultDict, study_name: str) -> None:
root_path = gin.query_parameter('optuna.root_path')
save_path = os.path.join(root_path, study_name)
hps_names = set(get_hps_dict().keys())
for path in os.listdir(save_path):
trial_path = os.path.join(save_path, path)
if os.path.isdir(trial_path):
experiment_path = os.path.join(trial_path, study_name)
gin_path = os.path.join(experiment_path, "gin-config-essential.txt")
with open(gin_path, 'r') as fp:
gin_str = fp.read()
gin_dict = parse_gin_str(gin_str)
hps = {k: v for k, v in gin_dict.items() if k in hps_names}
seed = gin_dict['data.split_seed']
results = load_results_from_local(os.path.join(experiment_path, 'results.json'))
grid_results_dict[frozenset(hps.items())][seed] = results
def load_results_from_local(output_path: str) -> Dict[str, float]:
return json.load(open(output_path, 'r'))[0]
# results check
def check_grid_results_dict(grid_results_dict: GridResultDict) -> None:
any_missing_result = False
for params, results in grid_results_dict.items():
for seed, result in results.items():
if result is None:
any_missing_result = True
logging.error(f'Results for seed: {seed} and hps: {dict(params)} are missing')
assert not any_missing_result
# helper methods
def get_params_dict() -> Dict[str, Any]:
params_dict: dict = gin.query_parameter('optuna.params')
return {k: v.__deepcopy__(None) if isinstance(v, gin.config.ConfigurableReference) else v
for k, v in params_dict.items()}
def get_hps_dict() -> Dict[str, Any]:
params_dict = get_params_dict()
return {k: v for k, v in params_dict.items() if k != 'data.split_seed'}
def _get_params_product_list(params_dict: Dict[str, Any]) -> List[dict]:
params_dict = {k: list(map(lambda x: (k, x), v)) for k, v in params_dict.items()}
return [dict(params) for params in itertools.product(*params_dict.values())]
# compute results
def compute_result(grid_results_dict: GridResultDict) -> Tuple[frozenset, Dict[str, float], str]:
metric_cls = get_metric_cls()
metric_name = metric_cls.__name__.lower()
agg_fn = min if metric_cls.direction == 'minimize' else max
valid_metric = f'valid_{metric_name}'
test_metric = f'test_{metric_name}'
results = [(params, average_dictionary(results)) for params, results in grid_results_dict.items()]
results = [x for x in results if not np.isnan(x[1][valid_metric])]
assert len(results) > 0, "'results' contains only nan!"
params, result = agg_fn(results, key=lambda x: x[1][valid_metric])
return params, result, test_metric
def average_dictionary(dictionary: Dict[int, Dict[str, float]]) -> Dict[str, float]:
values = list(dictionary.values())[0].keys()
agg = {v: [dictionary[seed][v] for seed in dictionary.keys()] for v in values}
mean = {k: np.mean(lst) for k, lst in agg.items()}
std = {f'{k}_std': np.std(lst) for k, lst in agg.items()}
return {**mean, **std}
def print_result(params: frozenset, result: Dict[str, float], test_metric: str) -> None:
print(f'Best params: {dict(params)}')
for key in sorted(key for key in result.keys() if not key.endswith('_std')):
print(f'\t{key}: {rounded_mean_std(result, key)}')
print(f'Result: {rounded_mean_std(result, test_metric)}')
def rounded_mean_std(result: Dict[str, float], key: str) -> str:
return f'{round(result[key], 3)} \u00B1 {round(result[f"{key}_std"], 3)}'
| [
"neptune.init"
] | [((486, 515), 'experiments.src.gin.get_default_experiment_name', 'get_default_experiment_name', ([], {}), '()\n', (513, 515), False, 'from experiments.src.gin import get_default_experiment_name, parse_gin_str\n'), ((523, 563), 'gin.query_parameter', 'gin.query_parameter', (['"""train.use_neptune"""'], {}), "('train.use_neptune')\n", (542, 563), False, 'import gin\n'), ((1184, 1224), 'gin.query_parameter', 'gin.query_parameter', (['"""neptune.user_name"""'], {}), "('neptune.user_name')\n", (1203, 1224), False, 'import gin\n'), ((1244, 1287), 'gin.query_parameter', 'gin.query_parameter', (['"""neptune.project_name"""'], {}), "('neptune.project_name')\n", (1263, 1287), False, 'import gin\n'), ((1302, 1345), 'neptune.init', 'neptune.init', (['f"""{user_name}/{project_name}"""'], {}), "(f'{user_name}/{project_name}')\n", (1314, 1345), False, 'import neptune\n'), ((2875, 2914), 'gin.query_parameter', 'gin.query_parameter', (['"""optuna.root_path"""'], {}), "('optuna.root_path')\n", (2894, 2914), False, 'import gin\n'), ((2931, 2966), 'os.path.join', 'os.path.join', (['root_path', 'study_name'], {}), '(root_path, study_name)\n', (2943, 2966), False, 'import os\n'), ((3027, 3048), 'os.listdir', 'os.listdir', (['save_path'], {}), '(save_path)\n', (3037, 3048), False, 'import os\n'), ((4321, 4357), 'gin.query_parameter', 'gin.query_parameter', (['"""optuna.params"""'], {}), "('optuna.params')\n", (4340, 4357), False, 'import gin\n'), ((5027, 5043), 'experiments.src.training.training_utils.get_metric_cls', 'get_metric_cls', ([], {}), '()\n', (5041, 5043), False, 'from experiments.src.training.training_utils import get_metric_cls\n'), ((3071, 3100), 'os.path.join', 'os.path.join', (['save_path', 'path'], {}), '(save_path, path)\n', (3083, 3100), False, 'import os\n'), ((3112, 3137), 'os.path.isdir', 'os.path.isdir', (['trial_path'], {}), '(trial_path)\n', (3125, 3137), False, 'import os\n'), ((5814, 5826), 'numpy.mean', 'np.mean', (['lst'], {}), '(lst)\n', (5821, 5826), True, 'import numpy as np\n'), ((5877, 5888), 'numpy.std', 'np.std', (['lst'], {}), '(lst)\n', (5883, 5888), True, 'import numpy as np\n'), ((2394, 2423), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2421, 2423), False, 'import tempfile\n'), ((3169, 3205), 'os.path.join', 'os.path.join', (['trial_path', 'study_name'], {}), '(trial_path, study_name)\n', (3181, 3205), False, 'import os\n'), ((3229, 3286), 'os.path.join', 'os.path.join', (['experiment_path', '"""gin-config-essential.txt"""'], {}), "(experiment_path, 'gin-config-essential.txt')\n", (3241, 3286), False, 'import os\n'), ((3390, 3412), 'experiments.src.gin.parse_gin_str', 'parse_gin_str', (['gin_str'], {}), '(gin_str)\n', (3403, 3412), False, 'from experiments.src.gin import get_default_experiment_name, parse_gin_str\n'), ((2603, 2635), 'os.path.join', 'os.path.join', (['tmp', 'artifact_name'], {}), '(tmp, artifact_name)\n', (2615, 2635), False, 'import os\n'), ((3579, 3624), 'os.path.join', 'os.path.join', (['experiment_path', '"""results.json"""'], {}), "(experiment_path, 'results.json')\n", (3591, 3624), False, 'import os\n'), ((5380, 5408), 'numpy.isnan', 'np.isnan', (['x[1][valid_metric]'], {}), '(x[1][valid_metric])\n', (5388, 5408), True, 'import numpy as np\n')] |
#
# Copyright (c) 2022, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from unittest.mock import MagicMock
import pytest
from neptune.new.exceptions import ProjectNotFound
from neptune.new.sync.utils import get_project
@pytest.fixture(name="backend")
def backend_fixture():
return MagicMock()
def test_get_project_no_name_set(mocker, backend):
# given
mocker.patch.object(os, "getenv")
os.getenv.return_value = None
# expect
assert get_project(None, backend=backend) is None
def test_get_project_project_not_found(backend):
# given
backend.get_project.side_effect = ProjectNotFound("foo")
# expect
assert get_project("foo", backend=backend) is None
| [
"neptune.new.exceptions.ProjectNotFound",
"neptune.new.sync.utils.get_project"
] | [((760, 790), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""backend"""'}), "(name='backend')\n", (774, 790), False, 'import pytest\n'), ((825, 836), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (834, 836), False, 'from unittest.mock import MagicMock\n'), ((1143, 1165), 'neptune.new.exceptions.ProjectNotFound', 'ProjectNotFound', (['"""foo"""'], {}), "('foo')\n", (1158, 1165), False, 'from neptune.new.exceptions import ProjectNotFound\n'), ((999, 1033), 'neptune.new.sync.utils.get_project', 'get_project', (['None'], {'backend': 'backend'}), '(None, backend=backend)\n', (1010, 1033), False, 'from neptune.new.sync.utils import get_project\n'), ((1191, 1226), 'neptune.new.sync.utils.get_project', 'get_project', (['"""foo"""'], {'backend': 'backend'}), "('foo', backend=backend)\n", (1202, 1226), False, 'from neptune.new.sync.utils import get_project\n')] |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 5