repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
Tim-TSENet | Tim-TSENet-main/TSDNET/test_tasnet_one_hot_reg.py | import os
import torch
from data_loader.AudioReader import AudioReader, write_wav
import argparse
from torch.nn.parallel import data_parallel
from model.model import TSDNet,TSDNet_one_hot,TSDNet_plus_one_hot
from logger.set_logger import setup_logger
import logging
from config.option import parse
import torchaudio
from utils.util import handle_scp, handle_scp_inf
from torch.utils.data import DataLoader as Loader
from data_loader.Dataset_light import Datasets
from model import model
from logger import set_logger
from config import option
import argparse
import torch
import time
import soundfile as sf
import metrics # import metrics.py file
import tsd_utils as utils # import utils.py
import pandas as pd
import numpy as np
from tabulate import tabulate
def time_to_frame(tm,st=True):
radio = 10.0/624
if st:
n_fame = tm//radio
else:
n_fame = math.ceil(tm/radio)
if n_fame >= 624:
n_fame = 623
if n_fame < 0:
n_fame = 0
return int(n_fame)
# def time_to_frame(tm):
# return int(tm/(10.0/312))
def read_wav(fname, return_rate=False):
'''
Read wavfile using Pytorch audio
input:
fname: wav file path
return_rate: Whether to return the sampling rate
output:
src: output tensor of size C x L
L is the number of audio frames
C is the number of channels.
sr: sample rate
'''
src, sr = torchaudio.load(fname, channels_first=True)
if return_rate:
return src.squeeze(), sr
else:
return src.squeeze()
class Separation():
def __init__(self, mix_scp, ref_scp, inf_scp, yaml_path, model, gpuid, pred_file='./tsd_result.tsv'):
super(Separation, self).__init__()
self.mix_audio = handle_scp(mix_scp)
self.ref_audio = handle_scp(ref_scp)
self.clss, self.onset, self.offset = handle_scp_inf(inf_scp)
self.key = list(self.mix_audio.keys())
opt = parse(yaml_path)
tsdnet = TSDNet_one_hot()
dicts = torch.load(model, map_location='cpu')
tsdnet.load_state_dict(dicts["model_state_dict"])
setup_logger(opt['logger']['name'], opt['logger']['path'],
screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
self.logger = logging.getLogger(opt['logger']['name'])
self.logger.info('Load checkpoint from {}, epoch {: d}'.format(model, dicts["epoch"]))
self.tsdnet=tsdnet.cuda()
self.device=torch.device('cuda:{}'.format(
gpuid[0]) if len(gpuid) > 0 else 'cpu')
self.pred_file = pred_file
self.label_path = opt['label_path']
self.save_tsv_path = opt['save_tsv_path']
def test(self):
self.tsdnet.eval()
time_predictions = []
class_result_file = 'class_result_{}.txt'
event_file = 'event_{}.txt'
segment_file = 'segment_{}.txt'
with torch.no_grad():
for i in range(len(self.key)):
index = self.key[i]
ref_index = index.replace('.wav', '_re.wav')
cls = str(self.clss[index])
onset = self.onset[index]
offset = self.offset[index]
onset_frame = time_to_frame(onset)
offset_frame = time_to_frame(offset)
cls_vec = torch.zeros(41)
cls_vec[self.clss[index]] = 1.
cls_vec = cls_vec.unsqueeze(0)
cls_index = cls_vec.argmax(1)
# cls_index = torch.from_numpy(cls_index)
# print('cls_index ',cls_index)
# assert 1==2
cls_index = cls_index.to(self.device)
cls = 'class_' + cls
mix = read_wav(self.mix_audio[index])
ref = read_wav(self.ref_audio[ref_index])
mix = mix.to(self.device)
ref = ref.to(self.device)
mix = mix[None,:]
ref = ref[None,:]
# cls_index = cls_index[None,:]
# print('cls_index ',cls_index.shape)
x_cls, out_tsd_time, out_tsd_up,_ = self.tsdnet(mix, ref,cls_index)
# print('onset_frame, offset_frame',onset_frame, offset_frame)
# assert 1==2
# x_cls: <bs,50>
# out_tsd_time: <bs,t/2>
# out_tsd_up: <bs,t>
out_tsd_time = out_tsd_time.detach().cpu().numpy()
out_tsd_up = out_tsd_up.detach().cpu().numpy()
# print('out_tsd_time ',out_tsd_time)
# print('onset ',onset)
# print('offset ',offset)
# print('out_tsd_time ',out_tsd_time.shape)
# print('out_tsd_up ',out_tsd_up.shape)
print('out_tsd_time ',out_tsd_time)
print('out_tsd_up ',out_tsd_up)
st = out_tsd_time.argmax(1)
ed = out_tsd_up.argmax(1)
# print('st ',st)
# print('ed ',ed)
# assert 1==2
# assert 1==2
st_time = st
if st_time < 0:
st_time = 0
ed_time = ed
if ed_time > 10:
ed_time= 10
# print('st_time ',st_time)
# print('ed_time ',ed_time)
start_frame = time_to_frame(st_time)
end_frame = time_to_frame(ed_time)
# print('start_frame ',start_frame)
# print('end_frame ',end_frame)
# assert 1==2
pred = np.zeros(624)
pred[start_frame:end_frame] = 1.0
pred = pred[None,:]
# pred = out_tsd_up.detach().cpu().numpy() # transpose to numpy
# pred = pred[:,:,0]
# print(pred.shape)
thres = 0.5
window_size = 1
filtered_pred = utils.median_filter(pred, window_size=window_size, threshold=thres)
decoded_pred = [] #
decoded_pred_ = utils.decode_with_timestamps(str(cls),filtered_pred[0,:])
if len(decoded_pred_) == 0: # neg deal
decoded_pred_.append((str(cls),0,0))
decoded_pred.append(decoded_pred_)
for num_batch in range(len(decoded_pred)): # when we test our model,the batch_size is 1
#print('len(decoded_pred) ',len(decoded_pred))
filename = index.split('/')[-1]
# Save each frame output, for later visualization
label_prediction = decoded_pred[num_batch] # frame predict
for event_label, onset, offset in label_prediction:
time_predictions.append({
'filename': filename,
'onset': onset,
'offset': offset,
'event_label': str(event_label)}) # get real predict results,including event_label,onset,offset
assert len(time_predictions) > 0, "No outputs, lower threshold?"
pred_df = pd.DataFrame(time_predictions, columns=['filename', 'onset', 'offset','event_label']) # it store the happen event and its time information
time_ratio = 10.0/pred.shape[1] # calculate time
pred_df = utils.predictions_to_time(pred_df, ratio=time_ratio) # transform the number of frame to real time
label_path = self.label_path
test_data_filename = os.path.splitext(os.path.basename(label_path))[0]
print('test_data_filename ',test_data_filename)
pred_file = 'hard_predictions_{}.txt'
if pred_file: # it name is hard_predictions...
pred_df.to_csv(os.path.join(self.save_tsv_path, pred_file.format(test_data_filename)),
index=False, sep="\t")
strong_labels_df = pd.read_csv(self.label_path, sep='\t') # get
if not np.issubdtype(strong_labels_df['filename'].dtype, np.number):
strong_labels_df['filename'] = strong_labels_df['filename'].apply(os.path.basename)
sed_eval = True
if sed_eval:
event_result, segment_result = metrics.compute_metrics(
strong_labels_df, pred_df, time_resolution=0.2) # calculate f1
print("Event Based Results:\n{}".format(event_result))
event_results_dict = event_result.results_class_wise_metrics()
class_wise_results_df = pd.DataFrame().from_dict({
f: event_results_dict[f]['f_measure']
for f in event_results_dict.keys()}).T
class_wise_results_df.to_csv(os.path.join(
self.save_tsv_path, class_result_file.format(test_data_filename)), sep='\t')
print("Class wise F1-Macro:\n{}".format(
tabulate(class_wise_results_df, headers='keys', tablefmt='github')))
if event_file:
with open(os.path.join(self.save_tsv_path,
event_file.format(test_data_filename)), 'w') as wp:
wp.write(event_result.__str__())
print("=" * 100)
print(segment_result)
if segment_file:
with open(os.path.join(self.save_tsv_path, segment_file.format(test_data_filename)), 'w') as wp:
wp.write(segment_result.__str__())
event_based_results = pd.DataFrame(
event_result.results_class_wise_average_metrics()['f_measure'], index=['event_based'])
segment_based_results = pd.DataFrame(
segment_result.results_class_wise_average_metrics()
['f_measure'], index=['segment_based'])
result_quick_report = pd.concat((event_based_results, segment_based_results))
# Add two columns
with open(os.path.join(self.save_tsv_path, 'quick_report_{}.md'.format(test_data_filename)), 'w') as wp:
print(tabulate(result_quick_report, headers='keys', tablefmt='github'), file=wp)
print("Quick Report: \n{}".format(tabulate(result_quick_report, headers='keys', tablefmt='github')))
def main():
parser=argparse.ArgumentParser()
parser.add_argument(
'-yaml', type=str, default='./config/Conv_Tasnet/train.yml', help='Path to yaml file.')
parser.add_argument(
'-model', type=str, default='/apdcephfs/share_1316500/donchaoyang/tsss/TSD_exp/checkpoint_fsd2018_new/TSDNet_one_hot_reg_new/best.pt', help="Path to model file.")
parser.add_argument(
'-max_num', type=str, default=10000, help="Max number for testing samples.")
parser.add_argument(
'-gpuid', type=str, default='0', help='Enter GPU id number')
parser.add_argument(
'-save_path', type=str, default='./result/Conv_Tasnet/', help='save result path')
args=parser.parse_args()
gpuid=[int(i) for i in args.gpuid.split(',')]
separation=Separation('/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/tt_mix.scp',
'/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/tt_re.scp',
'/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/tt_inf.scp',
args.yaml, args.model, gpuid)
separation.test()
if __name__ == "__main__":
main()
| 11,496 | 45.358871 | 170 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/dualrnn_test.py | import os
import torch
from data_loader.AudioReader import AudioReader, write_wav
import argparse
from torch.nn.parallel import data_parallel
from model.model_rnn import Dual_RNN_model
from logger.set_logger import setup_logger
import logging
from config.option import parse
import tqdm
class Separation():
def __init__(self, mix_path, yaml_path, model, gpuid):
super(Separation, self).__init__()
self.mix = AudioReader(mix_path, sample_rate=8000)
opt = parse(yaml_path)
net = Dual_RNN_model(**opt['Dual_Path_RNN'])
dicts = torch.load(model, map_location='cpu')
net.load_state_dict(dicts["model_state_dict"])
setup_logger(opt['logger']['name'], opt['logger']['path'],
screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
self.logger = logging.getLogger(opt['logger']['name'])
self.logger.info('Load checkpoint from {}, epoch {: d}'.format(model, dicts["epoch"]))
self.net=net.cuda()
self.device=torch.device('cuda:{}'.format(
gpuid[0]) if len(gpuid) > 0 else 'cpu')
self.gpuid=tuple(gpuid)
def inference(self, file_path):
with torch.no_grad():
for key, egs in tqdm.tqdm(self.mix):
#self.logger.info("Compute on utterance {}...".format(key))
egs=egs.to(self.device)
norm = torch.norm(egs,float('inf'))
if len(self.gpuid) != 0:
if egs.dim() == 1:
egs = torch.unsqueeze(egs, 0)
ests=self.net(egs)
spks=[torch.squeeze(s.detach().cpu()) for s in ests]
else:
if egs.dim() == 1:
egs = torch.unsqueeze(egs, 0)
ests=self.net(egs)
spks=[torch.squeeze(s.detach()) for s in ests]
index=0
for s in spks:
s = s[:egs.shape[1]]
s = s - torch.mean(s)
s = s/torch.max(torch.abs(s))
#norm
#s = s*norm/torch.max(torch.abs(s))
s = s.unsqueeze(0)
index += 1
os.makedirs(file_path+'/spk'+str(index), exist_ok=True)
filename=file_path+'/spk'+str(index)+'/'+key
write_wav(filename, s, 8000)
break
self.logger.info("Compute over {:d} utterances".format(len(self.mix)))
def main():
parser=argparse.ArgumentParser()
parser.add_argument(
'-mix_scp', type=str, default='../create_scp/tt_mix.scp', help='Path to mix scp file.')
parser.add_argument(
'-yaml', type=str, default='./config/train_rnn_opt.yml', help='Path to yaml file.')
parser.add_argument(
'-model', type=str, default='./checkpoint/Dual_Path_RNN_opt/best.pt', help="Path to model file.")
parser.add_argument(
'-gpuid', type=str, default='0', help='Enter GPU id number')
parser.add_argument(
'-save_path', type=str, default='./result/dual-rnn/', help='save result path')
args=parser.parse_args()
gpuid=[int(i) for i in args.gpuid.split(',')]
separation=Separation(args.mix_scp, args.yaml, args.model, gpuid)
separation.inference(args.save_path)
if __name__ == "__main__":
main() | 3,384 | 40.790123 | 105 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/trainer/trainer_Tasnet_tse.py | import sys
sys.path.append('../')
from utils.util import check_parameters
import time
import logging
from logger.set_logger import setup_logger
from model.loss import get_loss
import torch
import os
import matplotlib.pyplot as plt
from torch.nn.parallel import data_parallel
class Trainer(object):
def __init__(self, train_dataloader, val_dataloader, test_dataloader, Conv_Tasnet, optimizer, scheduler, opt):
super(Trainer).__init__()
self.train_dataloader = train_dataloader
self.val_dataloader = val_dataloader
self.test_dataloader = test_dataloader
self.scheduler = scheduler
self.num_spks = opt['num_spks']
self.cur_epoch = 0
self.total_epoch = opt['train']['epoch']
self.early_stop = opt['train']['early_stop']
self.print_freq = opt['logger']['print_freq']
# setup_logger(opt['logger']['name'], opt['logger']['path'],
# screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
self.logger = logging.getLogger(opt['logger']['name'])
self.checkpoint = opt['train']['path']
self.name = opt['name']
self.nFrameShift = opt['datasets']['audio_setting']['nFrameShift']
self.audio_length = opt['datasets']['audio_setting']['audio_length']
self.sr = opt['datasets']['audio_setting']['sample_rate']
self.ratio = 0.3
if opt['train']['gpuid']:
self.logger.info('Load Nvida GPU .....')
self.device = torch.device(
'cuda:{}'.format(opt['train']['gpuid'][0]))
self.gpuid = opt['train']['gpuid']
self.convtasnet = Conv_Tasnet.to(self.device)
self.logger.info(
'Loading Conv-TasNet parameters: {:.3f} Mb'.format(check_parameters(self.convtasnet)))
else:
self.logger.info('Load CPU ...........')
self.device = torch.device('cpu')
self.convtasnet = Conv_Tasnet.to(self.device)
self.logger.info(
'Loading Conv-TasNet parameters: {:.3f} Mb'.format(check_parameters(self.convtasnet)))
if opt['resume']['state']:
ckp = torch.load(opt['resume']['path'], map_location='cpu')
self.cur_epoch = ckp['epoch']
self.logger.info("Resume from checkpoint {}: epoch {:.3f}".format(
opt['resume']['path'], self.cur_epoch))
self.convtasnet = Conv_Tasnet.load_state_dict(
ckp['model_state_dict']).to(self.device)
self.optimizer = optimizer.load_state_dict(ckp['optim_state_dict'])
else:
self.convtasnet = Conv_Tasnet.to(self.device)
self.optimizer = optimizer
if opt['optim']['clip_norm']:
self.clip_norm = opt['optim']['clip_norm']
self.logger.info(
"Gradient clipping by {}, default L2".format(self.clip_norm))
else:
self.clip_norm = 0
def train(self, epoch):
self.logger.info('Start training from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.convtasnet.train()
num_batchs = len(self.train_dataloader)
total_loss = 0.0
total_loss_cls = 0.0
total_loss_tsd = 0.0
num_index = 1
start_time = time.time()
for mix, s1, ref, tse, cls, onset, offset, tsd_lab, sim_lab, L_lab in self.train_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
tse = tse.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
onset = onset.to(self.device)
offset = offset.to(self.device)
tsd_lab = tsd_lab.to(self.device)
self.optimizer.zero_grad()
if self.gpuid:
# model = torch.nn.DataParallel(self.convtasnet)
# out = model(mix, ref)
est_cls, est_tsd, est_tsd_up = self.convtasnet(mix, ref, tse)
else:
est_cls, est_tsd, est_tsd_up = self.convtasnet(mix, ref, tse)
# l = Loss(out, s1)
epoch_loss, loss_cls, loss_tsd = get_loss(est_cls, cls, est_tsd, tsd_lab)
total_loss += epoch_loss.item()
total_loss_cls += loss_cls.item()
total_loss_tsd += loss_tsd.item()
epoch_loss.backward()
if self.clip_norm:
torch.nn.utils.clip_grad_norm_(
self.convtasnet.parameters(), self.clip_norm)
self.optimizer.step()
if num_index % self.print_freq == 0:
message = '<epoch:{:d}, iter:{:d}, lr:{:.3e}, total_loss:{:.3f}, total_loss_cls:{:.3f}, total_loss_tsd:{:.3f}>'.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss / num_index,
total_loss_cls / num_index,
total_loss_tsd / num_index)
self.logger.info(message)
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_cls = total_loss_cls / num_index
total_loss_tsd = total_loss_tsd / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, loss_cls:{:.3f}, loss_tsd:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_cls, total_loss_tsd, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, total_loss_cls, total_loss_tsd
def validation(self, epoch):
self.logger.info('Start Validation from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.convtasnet.eval()
num_batchs = len(self.val_dataloader)
num_index = 1
total_loss = 0.0
total_loss_cls = 0.0
total_loss_tsd = 0.0
start_time = time.time()
with torch.no_grad():
for mix, s1, ref, tse, cls, onset, offset, tsd_lab, sim_lab, L_lab in self.val_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
tse = tse.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
onset = onset.to(self.device)
offset = offset.to(self.device)
tsd_lab = tsd_lab.to(self.device)
self.optimizer.zero_grad()
if self.gpuid:
#model = torch.nn.DataParallel(self.convtasnet)
#out = model(mix)
# out = torch.nn.parallel.data_parallel(self.convtasnet,mix,device_ids=self.gpuid)
est_cls, est_tsd, est_tsd_up = self.convtasnet(mix, ref, tse)
else:
est_cls, est_tsd, est_tsd_up = self.convtasnet(mix, ref, tse)
# l = Loss(out, s1)
# print('est_tsd ',est_tsd.shape)
# print('tsd_lab ',tsd_lab.shape)
# assert 1==2
epoch_loss, loss_cls, loss_tsd = get_loss(est_cls, cls, est_tsd, tsd_lab)
total_loss += epoch_loss.item()
total_loss_cls += loss_cls.item()
total_loss_tsd += loss_tsd.item()
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_cls = total_loss_cls / num_index
total_loss_tsd = total_loss_tsd / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, loss_cls:{:.3f}, loss_tsd:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_cls, total_loss_tsd, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, total_loss_cls, total_loss_tsd
def test(self, epoch):
self.logger.info(
'Start Test from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.convtasnet.eval()
num_batchs = len(self.test_dataloader)
num_index = 1
total_loss = 0.0
total_loss_cls = 0.0
total_loss_tsd = 0.0
start_time = time.time()
with torch.no_grad():
for mix, s1, ref, tse, cls, onset, offset, tsd_lab, sim_lab, L_lab in self.test_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
tse = tse.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
onset = onset.to(self.device)
offset = offset.to(self.device)
tsd_lab = tsd_lab.to(self.device)
self.optimizer.zero_grad()
if self.gpuid:
# model = torch.nn.DataParallel(self.convtasnet)
# out = model(mix)
# out = torch.nn.parallel.data_parallel(self.convtasnet, mix, device_ids=self.gpuid)
est_cls, est_tsd, est_tsd_up = self.convtasnet(mix, ref, tse)
else:
est_cls, est_tsd, est_tsd_up = self.convtasnet(mix, ref, tse)
# l = Loss(out, s1)
epoch_loss, loss_cls, loss_tsd = get_loss(est_cls, cls, est_tsd, tsd_lab)
total_loss += epoch_loss.item()
total_loss_cls += loss_cls.item()
total_loss_tsd += loss_tsd.item()
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_cls = total_loss_cls / num_index
total_loss_tsd = total_loss_tsd / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, loss_cls:{:.3f}, loss_tsd:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_cls, total_loss_tsd, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, total_loss_cls, total_loss_tsd
def run(self):
train_loss = []
val_loss = []
test_loss = []
with torch.cuda.device(self.gpuid[0]):
self.save_checkpoint(self.cur_epoch, best=False)
v_loss,_,_ = self.validation(self.cur_epoch)
best_loss = v_loss
self.logger.info("Starting epoch from {:d}, loss = {:.4f}".format(self.cur_epoch, best_loss))
no_improve = 0
# starting training part
while self.cur_epoch < self.total_epoch:
self.cur_epoch += 1
t_loss,_,_ = self.train(self.cur_epoch)
v_loss,_,_ = self.validation(self.cur_epoch)
tt_loss,_,_ = self.test(self.cur_epoch)
train_loss.append(t_loss)
val_loss.append(v_loss)
test_loss.append(tt_loss)
# schedule here
self.scheduler.step()
if v_loss >= best_loss:
no_improve += 1
self.logger.info(
'No improvement, Best Loss: {:.4f}'.format(best_loss))
else:
best_loss = v_loss
no_improve = 0
self.save_checkpoint(self.cur_epoch, best=True)
self.logger.info('Epoch: {:d}, Now Best Loss Change: {:.4f}'.format(
self.cur_epoch, best_loss))
self.logger.info('Epoch: {:d}, Best Loss Test: {:.4f}'.format(
self.cur_epoch, tt_loss))
if no_improve == self.early_stop:
self.logger.info(
"Stop training cause no impr for {:d} epochs".format(
no_improve))
break
self.save_checkpoint(self.cur_epoch, best=False)
self.logger.info("Training for {:d}/{:d} epoches done!".format(
self.cur_epoch, self.total_epoch))
# draw loss image
plt.title("Loss of train, val and test")
x = [i for i in range(self.cur_epoch)]
plt.plot(x, train_loss, 'b-', label=u'train_loss', linewidth=0.8)
plt.plot(x, val_loss, 'c-', label=u'val_loss', linewidth=0.8)
plt.plot(x, test_loss, 'g', label=u'test_loss', linewidth=0.8)
plt.legend()
plt.ylabel('loss')
plt.xlabel('epoch')
plt.savefig('loss.png')
def save_checkpoint(self, epoch, best=True):
'''
save model
best: the best model
'''
os.makedirs(os.path.join(self.checkpoint, self.name), exist_ok=True)
torch.save({
'epoch': epoch,
'model_state_dict': self.convtasnet.state_dict(),
'optim_state_dict': self.optimizer.state_dict()
},
os.path.join(self.checkpoint, self.name, '{0}.pt'.format('best' if best else 'last')))
| 13,121 | 44.404844 | 146 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/trainer/trainer_Tasnet_one_hot_regresion.py | import sys
sys.path.append('../')
from utils.util import check_parameters
import time
import logging
from logger.set_logger import setup_logger
from model.loss import get_loss, get_loss_one_hot, get_loss_one_hot_focal, get_loss_one_hot_focal_sim,get_loss_one_hot_reg,get_loss_one_hot_reg_two
import torch
import os
import matplotlib.pyplot as plt
from torch.nn.parallel import data_parallel
class Trainer(object):
def __init__(self, train_dataloader, val_dataloader, test_dataloader, Conv_Tasnet, optimizer, scheduler, opt):
super(Trainer).__init__()
self.train_dataloader = train_dataloader
self.val_dataloader = val_dataloader
self.test_dataloader = test_dataloader
self.scheduler = scheduler
self.num_spks = opt['num_spks']
self.cur_epoch = 0
self.total_epoch = opt['train']['epoch']
self.early_stop = opt['train']['early_stop']
self.opt = opt
self.print_freq = opt['logger']['print_freq']
# setup_logger(opt['logger']['name'], opt['logger']['path'],
# screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
self.logger = logging.getLogger(opt['logger']['name'])
self.checkpoint = opt['train']['path']
self.name = opt['name']
self.nFrameShift = opt['datasets']['audio_setting']['nFrameShift']
self.audio_length = opt['datasets']['audio_setting']['audio_length']
self.sr = opt['datasets']['audio_setting']['sample_rate']
self.ratio = 0.3
if opt['train']['gpuid']:
self.logger.info('Load Nvida GPU .....')
self.device = torch.device(
'cuda:{}'.format(opt['train']['gpuid'][0]))
self.gpuid = opt['train']['gpuid']
self.convtasnet = Conv_Tasnet.to(self.device)
self.logger.info(
'Loading Conv-TasNet parameters: {:.3f} Mb'.format(check_parameters(self.convtasnet)))
else:
self.logger.info('Load CPU ...........')
self.device = torch.device('cpu')
self.convtasnet = Conv_Tasnet.to(self.device)
self.logger.info(
'Loading Conv-TasNet parameters: {:.3f} Mb'.format(check_parameters(self.convtasnet)))
if opt['resume']['state']:
ckp = torch.load(opt['resume']['path'], map_location='cpu')
self.cur_epoch = ckp['epoch']
self.logger.info("Resume from checkpoint {}: epoch {:.3f}".format(
opt['resume']['path'], self.cur_epoch))
self.convtasnet = Conv_Tasnet.load_state_dict(
ckp['model_state_dict']).to(self.device)
self.optimizer = optimizer.load_state_dict(ckp['optim_state_dict'])
else:
self.convtasnet = Conv_Tasnet.to(self.device)
self.optimizer = optimizer
if opt['optim']['clip_norm']:
self.clip_norm = opt['optim']['clip_norm']
self.logger.info(
"Gradient clipping by {}, default L2".format(self.clip_norm))
else:
self.clip_norm = 0
def train(self, epoch):
self.logger.info('Start training from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.convtasnet.train()
num_batchs = len(self.train_dataloader)
total_loss = 0.0
total_loss_cls = 0.0
total_loss_tsd = 0.0
num_index = 1
start_time = time.time()
for mix, s1, ref, cls, onset, offset, tsd_lab, sim_lab, real_time in self.train_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
cls_index = cls.argmax(1)
onset = onset.to(self.device)
offset = offset.to(self.device)
tsd_lab = tsd_lab.to(self.device)
sim_lab = sim_lab.to(self.device)
real_time = real_time.to(self.device)
# print('onset ', onset)
# print('offset ', offset)
# print('real_time ',real_time)
# assert 1==2
self.optimizer.zero_grad()
if self.gpuid:
# model = torch.nn.DataParallel(self.convtasnet)
# out = model(mix, ref)
est_cls, est_tsd, est_tsd2, sim_cos = self.convtasnet(mix, ref,cls_index.long())
else:
est_cls, est_tsd, est_tsd2, sim_cos = self.convtasnet(mix, ref,cls_index.long())
# l = Loss(out, s1)
if self.opt['two']:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_reg_two(est_tsd, est_tsd2, real_time)
else:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_reg(est_tsd,real_time)
total_loss += epoch_loss.item()
total_loss_cls += loss_cls.item()
total_loss_tsd += loss_tsd.item()
epoch_loss.backward()
if self.clip_norm:
torch.nn.utils.clip_grad_norm_(
self.convtasnet.parameters(), self.clip_norm)
self.optimizer.step()
if num_index % self.print_freq == 0:
message = '<epoch:{:d}, iter:{:d}, lr:{:.3e}, total_loss:{:.3f}, total_loss_cls:{:.3f}, total_loss_tsd:{:.3f}>'.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss / num_index,
total_loss_cls / num_index,
total_loss_tsd / num_index)
self.logger.info(message)
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_cls = total_loss_cls / num_index
total_loss_tsd = total_loss_tsd / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, loss_cls:{:.3f}, loss_tsd:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_cls, total_loss_tsd, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, total_loss_cls, total_loss_tsd
def validation(self, epoch):
self.logger.info('Start Validation from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.convtasnet.eval()
num_batchs = len(self.val_dataloader)
num_index = 1
total_loss = 0.0
total_loss_cls = 0.0
total_loss_tsd = 0.0
start_time = time.time()
with torch.no_grad():
for mix, s1, ref, cls, onset, offset, tsd_lab, sim_lab, real_time in self.val_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
cls_index = cls.argmax(1)
onset = onset.to(self.device)
offset = offset.to(self.device)
tsd_lab = tsd_lab.to(self.device)
sim_lab = sim_lab.to(self.device)
real_time = real_time.to(self.device)
self.optimizer.zero_grad()
if self.gpuid:
#model = torch.nn.DataParallel(self.convtasnet)
#out = model(mix)
# out = torch.nn.parallel.data_parallel(self.convtasnet,mix,device_ids=self.gpuid)
est_cls, est_tsd, est_tsd2, sim_cos = self.convtasnet(mix, ref, cls_index.long())
else:
est_cls, est_tsd, est_tsd2, sim_cos = self.convtasnet(mix, ref, cls_index.long())
# l = Loss(out, s1)
if self.opt['two']:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_reg_two(est_tsd, est_tsd2, real_time)
else:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_reg(est_tsd,real_time)
#epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_focal(est_cls, cls, est_tsd, tsd_lab, sim_cos, sim_lab)
#epoch_loss, loss_cls, loss_tsd = get_loss_one_hot(est_cls, cls, est_tsd, tsd_lab, sim_cos, sim_lab)
total_loss += epoch_loss.item()
total_loss_cls += loss_cls.item()
total_loss_tsd += loss_tsd.item()
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_cls = total_loss_cls / num_index
total_loss_tsd = total_loss_tsd / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, loss_cls:{:.3f}, loss_tsd:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_cls, total_loss_tsd, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, total_loss_cls, total_loss_tsd
def test(self, epoch):
self.logger.info(
'Start Test from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.convtasnet.eval()
num_batchs = len(self.test_dataloader)
num_index = 1
total_loss = 0.0
total_loss_cls = 0.0
total_loss_tsd = 0.0
start_time = time.time()
with torch.no_grad():
for mix, s1, ref, cls, onset, offset, tsd_lab,sim_lab,real_time in self.test_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
cls_index = cls.argmax(1)
onset = onset.to(self.device)
offset = offset.to(self.device)
tsd_lab = tsd_lab.to(self.device)
sim_lab = sim_lab.to(self.device)
real_time = real_time.to(self.device)
self.optimizer.zero_grad()
if self.gpuid:
# model = torch.nn.DataParallel(self.convtasnet)
# out = model(mix)
# out = torch.nn.parallel.data_parallel(self.convtasnet, mix, device_ids=self.gpuid)
est_cls, est_tsd, est_tsd2, sim_cos = self.convtasnet(mix, ref, cls_index.long())
else:
est_cls, est_tsd, est_tsd2, sim_cos = self.convtasnet(mix, ref, cls_index.long())
if self.opt['two']:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_reg_two(est_tsd, est_tsd2, real_time)
else:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_reg(est_tsd,real_time)
# l = Loss(out, s1)
#epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_focal(est_cls, cls, est_tsd, tsd_lab, sim_cos, sim_lab)
#epoch_loss, loss_cls, loss_tsd = get_loss_one_hot(est_cls, cls, est_tsd, tsd_lab, sim_cos, sim_lab)
total_loss += epoch_loss.item()
total_loss_cls += loss_cls.item()
total_loss_tsd += loss_tsd.item()
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_cls = total_loss_cls / num_index
total_loss_tsd = total_loss_tsd / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, loss_cls:{:.3f}, loss_tsd:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_cls, total_loss_tsd, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, total_loss_cls, total_loss_tsd
def run(self):
train_loss = []
val_loss = []
test_loss = []
with torch.cuda.device(self.gpuid[0]):
self.save_checkpoint(self.cur_epoch, best=False)
v_loss,_,_ = self.validation(self.cur_epoch)
best_loss = v_loss
self.logger.info("Starting epoch from {:d}, loss = {:.4f}".format(self.cur_epoch, best_loss))
no_improve = 0
# starting training part
while self.cur_epoch < self.total_epoch:
self.cur_epoch += 1
t_loss,_,_ = self.train(self.cur_epoch)
v_loss,_,_ = self.validation(self.cur_epoch)
tt_loss,_,_ = self.test(self.cur_epoch)
train_loss.append(t_loss)
val_loss.append(v_loss)
test_loss.append(tt_loss)
# schedule here
self.scheduler.step()
if v_loss >= best_loss:
no_improve += 1
self.logger.info(
'No improvement, Best Loss: {:.4f}'.format(best_loss))
else:
best_loss = v_loss
no_improve = 0
self.save_checkpoint(self.cur_epoch, best=True)
self.logger.info('Epoch: {:d}, Now Best Loss Change: {:.4f}'.format(
self.cur_epoch, best_loss))
self.logger.info('Epoch: {:d}, Best Loss Test: {:.4f}'.format(
self.cur_epoch, tt_loss))
if no_improve == self.early_stop:
self.logger.info(
"Stop training cause no impr for {:d} epochs".format(
no_improve))
break
self.save_checkpoint(self.cur_epoch, best=False)
self.logger.info("Training for {:d}/{:d} epoches done!".format(
self.cur_epoch, self.total_epoch))
# draw loss image
plt.title("Loss of train, val and test")
x = [i for i in range(self.cur_epoch)]
plt.plot(x, train_loss, 'b-', label=u'train_loss', linewidth=0.8)
plt.plot(x, val_loss, 'c-', label=u'val_loss', linewidth=0.8)
plt.plot(x, test_loss, 'g', label=u'test_loss', linewidth=0.8)
plt.legend()
plt.ylabel('loss')
plt.xlabel('epoch')
plt.savefig('loss.png')
def save_checkpoint(self, epoch, best=True):
'''
save model
best: the best model
'''
os.makedirs(os.path.join(self.checkpoint, self.name), exist_ok=True)
torch.save({
'epoch': epoch,
'model_state_dict': self.convtasnet.state_dict(),
'optim_state_dict': self.optimizer.state_dict()
},
os.path.join(self.checkpoint, self.name, '{0}.pt'.format('best' if best else 'last')))
| 14,648 | 47.506623 | 147 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/trainer/trainer_Dual_RNN.py | import sys
sys.path.append('../')
from utils.util import check_parameters
import time
import logging
from logger.set_logger import setup_logger
from model.loss import Loss
import torch
import os
import matplotlib.pyplot as plt
from torch.nn.parallel import data_parallel
class Trainer(object):
def __init__(self, train_dataloader, val_dataloader, Dual_RNN, optimizer, scheduler, opt):
super(Trainer).__init__()
self.train_dataloader = train_dataloader
self.val_dataloader = val_dataloader
self.scheduler = scheduler
self.num_spks = opt['num_spks']
self.cur_epoch = 0
self.total_epoch = opt['train']['epoch']
self.early_stop = opt['train']['early_stop']
self.print_freq = opt['logger']['print_freq']
# setup_logger(opt['logger']['name'], opt['logger']['path'],
# screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
self.logger = logging.getLogger(opt['logger']['name'])
self.checkpoint = opt['train']['path']
self.name = opt['name']
if opt['train']['gpuid']:
self.logger.info('Load Nvida GPU .....')
self.device = torch.device(
'cuda:{}'.format(opt['train']['gpuid'][0]))
self.gpuid = opt['train']['gpuid']
self.dualrnn = Dual_RNN.to(self.device)
self.logger.info(
'Loading Dual-Path-RNN parameters: {:.3f} Mb'.format(check_parameters(self.dualrnn)))
else:
self.logger.info('Load CPU ...........')
self.device = torch.device('cpu')
self.dualrnn = Dual_RNN.to(self.device)
self.logger.info(
'Loading Dual-Path-RNN parameters: {:.3f} Mb'.format(check_parameters(self.dualrnn)))
if opt['resume']['state']:
ckp = torch.load(os.path.join(
opt['resume']['path'], 'best.pt'), map_location='cpu')
self.cur_epoch = ckp['epoch']
self.logger.info("Resume from checkpoint {}: epoch {:.3f}".format(
opt['resume']['path'], self.cur_epoch))
Dual_RNN.load_state_dict(
ckp['model_state_dict'])
self.dualrnn = Dual_RNN.to(self.device)
optimizer.load_state_dict(ckp['optim_state_dict'])
self.optimizer = optimizer
lr = self.optimizer.param_groups[0]['lr']
self.adjust_learning_rate(self.optimizer, lr*0.5)
else:
self.dualrnn = Dual_RNN.to(self.device)
self.optimizer = optimizer
if opt['optim']['clip_norm']:
self.clip_norm = opt['optim']['clip_norm']
self.logger.info(
"Gradient clipping by {}, default L2".format(self.clip_norm))
else:
self.clip_norm = 0
def train(self, epoch):
self.logger.info(
'Start training from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.dualrnn.train()
num_batchs = len(self.train_dataloader)
total_loss = 0.0
num_index = 1
start_time = time.time()
for mix, ref in self.train_dataloader:
mix = mix.to(self.device)
ref = [ref[i].to(self.device) for i in range(self.num_spks)]
self.optimizer.zero_grad()
if self.gpuid:
out = torch.nn.parallel.data_parallel(self.dualrnn,mix,device_ids=self.gpuid)
#out = self.dualrnn(mix)
else:
out = self.dualrnn(mix)
l = Loss(out, ref)
epoch_loss = l
total_loss += epoch_loss.item()
epoch_loss.backward()
if self.clip_norm:
torch.nn.utils.clip_grad_norm_(
self.dualrnn.parameters(), self.clip_norm)
self.optimizer.step()
if num_index % self.print_freq == 0:
message = '<epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}>'.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss/num_index)
self.logger.info(message)
num_index += 1
end_time = time.time()
total_loss = total_loss/num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, (end_time-start_time)/60)
self.logger.info(message)
return total_loss
def validation(self, epoch):
self.logger.info(
'Start Validation from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.dualrnn.eval()
num_batchs = len(self.val_dataloader)
num_index = 1
total_loss = 0.0
start_time = time.time()
with torch.no_grad():
for mix, ref in self.val_dataloader:
mix = mix.to(self.device)
ref = [ref[i].to(self.device) for i in range(self.num_spks)]
self.optimizer.zero_grad()
if self.gpuid:
#model = torch.nn.DataParallel(self.dualrnn)
#out = model(mix)
out = torch.nn.parallel.data_parallel(self.dualrnn,mix,device_ids=self.gpuid)
else:
out = self.dualrnn(mix)
l = Loss(out, ref)
epoch_loss = l
total_loss += epoch_loss.item()
if num_index % self.print_freq == 0:
message = '<epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}>'.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss/num_index)
self.logger.info(message)
num_index += 1
end_time = time.time()
total_loss = total_loss/num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, (end_time-start_time)/60)
self.logger.info(message)
return total_loss
def run(self):
train_loss = []
val_loss = []
with torch.cuda.device(self.gpuid[0]):
self.save_checkpoint(self.cur_epoch, best=False)
v_loss = self.validation(self.cur_epoch)
best_loss = v_loss
self.logger.info("Starting epoch from {:d}, loss = {:.4f}".format(
self.cur_epoch, best_loss))
no_improve = 0
# starting training part
while self.cur_epoch < self.total_epoch:
self.cur_epoch += 1
t_loss = self.train(self.cur_epoch)
v_loss = self.validation(self.cur_epoch)
train_loss.append(t_loss)
val_loss.append(v_loss)
# schedule here
self.scheduler.step(v_loss)
if v_loss >= best_loss:
no_improve += 1
self.logger.info(
'No improvement, Best Loss: {:.4f}'.format(best_loss))
else:
best_loss = v_loss
no_improve = 0
self.save_checkpoint(self.cur_epoch, best=True)
self.logger.info('Epoch: {:d}, Now Best Loss Change: {:.4f}'.format(
self.cur_epoch, best_loss))
if no_improve == self.early_stop:
self.logger.info(
"Stop training cause no impr for {:d} epochs".format(
no_improve))
break
self.save_checkpoint(self.cur_epoch, best=False)
self.logger.info("Training for {:d}/{:d} epoches done!".format(
self.cur_epoch, self.total_epoch))
# draw loss image
plt.title("Loss of train and test")
x = [i for i in range(self.cur_epoch)]
plt.plot(x, train_loss, 'b-', label=u'train_loss', linewidth=0.8)
plt.plot(x, val_loss, 'c-', label=u'val_loss', linewidth=0.8)
plt.legend()
#plt.xticks(l, lx)
plt.ylabel('loss')
plt.xlabel('epoch')
plt.savefig('loss.png')
def save_checkpoint(self, epoch, best=True):
'''
save model
best: the best model
'''
os.makedirs(os.path.join(self.checkpoint, self.name), exist_ok=True)
torch.save({
'epoch': epoch,
'model_state_dict': self.dualrnn.state_dict(),
'optim_state_dict': self.optimizer.state_dict()
},
os.path.join(self.checkpoint, self.name, '{0}.pt'.format('best' if best else 'last')))
| 8,747 | 39.5 | 112 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/trainer/trainer_Tasnet.py | import sys
sys.path.append('../')
from utils.util import check_parameters
import time
import logging
from logger.set_logger import setup_logger
from model.loss import get_loss
import torch
import os
import matplotlib.pyplot as plt
from torch.nn.parallel import data_parallel
class Trainer(object):
def __init__(self, train_dataloader, val_dataloader, test_dataloader, Conv_Tasnet, optimizer, scheduler, opt):
super(Trainer).__init__()
self.train_dataloader = train_dataloader
self.val_dataloader = val_dataloader
self.test_dataloader = test_dataloader
self.scheduler = scheduler
self.num_spks = opt['num_spks']
self.cur_epoch = 0
self.total_epoch = opt['train']['epoch']
self.early_stop = opt['train']['early_stop']
self.print_freq = opt['logger']['print_freq']
# setup_logger(opt['logger']['name'], opt['logger']['path'],
# screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
self.logger = logging.getLogger(opt['logger']['name'])
self.checkpoint = opt['train']['path']
self.name = opt['name']
self.nFrameShift = opt['datasets']['audio_setting']['nFrameShift']
self.audio_length = opt['datasets']['audio_setting']['audio_length']
self.sr = opt['datasets']['audio_setting']['sample_rate']
self.ratio = 0.3
if opt['train']['gpuid']:
self.logger.info('Load Nvida GPU .....')
self.device = torch.device(
'cuda:{}'.format(opt['train']['gpuid'][0]))
self.gpuid = opt['train']['gpuid']
self.convtasnet = Conv_Tasnet.to(self.device)
self.logger.info(
'Loading Conv-TasNet parameters: {:.3f} Mb'.format(check_parameters(self.convtasnet)))
else:
self.logger.info('Load CPU ...........')
self.device = torch.device('cpu')
self.convtasnet = Conv_Tasnet.to(self.device)
self.logger.info(
'Loading Conv-TasNet parameters: {:.3f} Mb'.format(check_parameters(self.convtasnet)))
if opt['resume']['state']:
ckp = torch.load(opt['resume']['path'], map_location='cpu')
self.cur_epoch = ckp['epoch']
self.logger.info("Resume from checkpoint {}: epoch {:.3f}".format(
opt['resume']['path'], self.cur_epoch))
self.convtasnet = Conv_Tasnet.load_state_dict(
ckp['model_state_dict']).to(self.device)
self.optimizer = optimizer.load_state_dict(ckp['optim_state_dict'])
else:
self.convtasnet = Conv_Tasnet.to(self.device)
self.optimizer = optimizer
if opt['optim']['clip_norm']:
self.clip_norm = opt['optim']['clip_norm']
self.logger.info(
"Gradient clipping by {}, default L2".format(self.clip_norm))
else:
self.clip_norm = 0
def train(self, epoch):
self.logger.info('Start training from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.convtasnet.train()
num_batchs = len(self.train_dataloader)
total_loss = 0.0
total_loss_cls = 0.0
total_loss_tsd = 0.0
num_index = 1
start_time = time.time()
for mix, s1, ref, cls, onset, offset, tsd_lab, sim_lab, L_lab in self.train_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
onset = onset.to(self.device)
offset = offset.to(self.device)
tsd_lab = tsd_lab.to(self.device)
self.optimizer.zero_grad()
if self.gpuid:
# model = torch.nn.DataParallel(self.convtasnet)
# out = model(mix, ref)
est_cls, est_tsd, est_tsd_up = self.convtasnet(mix, ref)
else:
est_cls, est_tsd, est_tsd_up = self.convtasnet(mix, ref)
# l = Loss(out, s1)
epoch_loss, loss_cls, loss_tsd = get_loss(est_cls, cls, est_tsd, tsd_lab)
total_loss += epoch_loss.item()
total_loss_cls += loss_cls.item()
total_loss_tsd += loss_tsd.item()
epoch_loss.backward()
if self.clip_norm:
torch.nn.utils.clip_grad_norm_(
self.convtasnet.parameters(), self.clip_norm)
self.optimizer.step()
if num_index % self.print_freq == 0:
message = '<epoch:{:d}, iter:{:d}, lr:{:.3e}, total_loss:{:.3f}, total_loss_cls:{:.3f}, total_loss_tsd:{:.3f}>'.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss / num_index,
total_loss_cls / num_index,
total_loss_tsd / num_index)
self.logger.info(message)
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_cls = total_loss_cls / num_index
total_loss_tsd = total_loss_tsd / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, loss_cls:{:.3f}, loss_tsd:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_cls, total_loss_tsd, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, total_loss_cls, total_loss_tsd
def validation(self, epoch):
self.logger.info('Start Validation from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.convtasnet.eval()
num_batchs = len(self.val_dataloader)
num_index = 1
total_loss = 0.0
total_loss_cls = 0.0
total_loss_tsd = 0.0
start_time = time.time()
with torch.no_grad():
for mix, s1, ref, cls, onset, offset, tsd_lab, sim_lab, L_lab in self.val_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
onset = onset.to(self.device)
offset = offset.to(self.device)
tsd_lab = tsd_lab.to(self.device)
self.optimizer.zero_grad()
if self.gpuid:
#model = torch.nn.DataParallel(self.convtasnet)
#out = model(mix)
# out = torch.nn.parallel.data_parallel(self.convtasnet,mix,device_ids=self.gpuid)
est_cls, est_tsd, est_tsd_up = self.convtasnet(mix, ref)
else:
est_cls, est_tsd, est_tsd_up = self.convtasnet(mix, ref)
# l = Loss(out, s1)
# print('est_tsd ',est_tsd.shape)
# print('tsd_lab ',tsd_lab.shape)
# assert 1==2
epoch_loss, loss_cls, loss_tsd = get_loss(est_cls, cls, est_tsd, tsd_lab)
total_loss += epoch_loss.item()
total_loss_cls += loss_cls.item()
total_loss_tsd += loss_tsd.item()
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_cls = total_loss_cls / num_index
total_loss_tsd = total_loss_tsd / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, loss_cls:{:.3f}, loss_tsd:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_cls, total_loss_tsd, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, total_loss_cls, total_loss_tsd
def test(self, epoch):
self.logger.info(
'Start Test from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.convtasnet.eval()
num_batchs = len(self.test_dataloader)
num_index = 1
total_loss = 0.0
total_loss_cls = 0.0
total_loss_tsd = 0.0
start_time = time.time()
with torch.no_grad():
for mix, s1, ref, cls, onset, offset, tsd_lab, sim_lab, L_lab in self.test_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
onset = onset.to(self.device)
offset = offset.to(self.device)
tsd_lab = tsd_lab.to(self.device)
self.optimizer.zero_grad()
if self.gpuid:
# model = torch.nn.DataParallel(self.convtasnet)
# out = model(mix)
# out = torch.nn.parallel.data_parallel(self.convtasnet, mix, device_ids=self.gpuid)
est_cls, est_tsd, est_tsd_up = self.convtasnet(mix, ref)
else:
est_cls, est_tsd, est_tsd_up = self.convtasnet(mix, ref)
# l = Loss(out, s1)
epoch_loss, loss_cls, loss_tsd = get_loss(est_cls, cls, est_tsd, tsd_lab)
total_loss += epoch_loss.item()
total_loss_cls += loss_cls.item()
total_loss_tsd += loss_tsd.item()
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_cls = total_loss_cls / num_index
total_loss_tsd = total_loss_tsd / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, loss_cls:{:.3f}, loss_tsd:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_cls, total_loss_tsd, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, total_loss_cls, total_loss_tsd
def run(self):
train_loss = []
val_loss = []
test_loss = []
with torch.cuda.device(self.gpuid[0]):
self.save_checkpoint(self.cur_epoch, best=False)
v_loss,_,_ = self.validation(self.cur_epoch)
best_loss = v_loss
self.logger.info("Starting epoch from {:d}, loss = {:.4f}".format(self.cur_epoch, best_loss))
no_improve = 0
# starting training part
while self.cur_epoch < self.total_epoch:
self.cur_epoch += 1
t_loss,_,_ = self.train(self.cur_epoch)
v_loss,_,_ = self.validation(self.cur_epoch)
tt_loss,_,_ = self.test(self.cur_epoch)
train_loss.append(t_loss)
val_loss.append(v_loss)
test_loss.append(tt_loss)
# schedule here
self.scheduler.step()
if v_loss >= best_loss:
no_improve += 1
self.logger.info(
'No improvement, Best Loss: {:.4f}'.format(best_loss))
else:
best_loss = v_loss
no_improve = 0
self.save_checkpoint(self.cur_epoch, best=True)
self.logger.info('Epoch: {:d}, Now Best Loss Change: {:.4f}'.format(
self.cur_epoch, best_loss))
self.logger.info('Epoch: {:d}, Best Loss Test: {:.4f}'.format(
self.cur_epoch, tt_loss))
if no_improve == self.early_stop:
self.logger.info(
"Stop training cause no impr for {:d} epochs".format(
no_improve))
break
self.save_checkpoint(self.cur_epoch, best=False)
self.logger.info("Training for {:d}/{:d} epoches done!".format(
self.cur_epoch, self.total_epoch))
# draw loss image
plt.title("Loss of train, val and test")
x = [i for i in range(self.cur_epoch)]
plt.plot(x, train_loss, 'b-', label=u'train_loss', linewidth=0.8)
plt.plot(x, val_loss, 'c-', label=u'val_loss', linewidth=0.8)
plt.plot(x, test_loss, 'g', label=u'test_loss', linewidth=0.8)
plt.legend()
plt.ylabel('loss')
plt.xlabel('epoch')
plt.savefig('loss.png')
def save_checkpoint(self, epoch, best=True):
'''
save model
best: the best model
'''
os.makedirs(os.path.join(self.checkpoint, self.name), exist_ok=True)
torch.save({
'epoch': epoch,
'model_state_dict': self.convtasnet.state_dict(),
'optim_state_dict': self.optimizer.state_dict()
},
os.path.join(self.checkpoint, self.name, '{0}.pt'.format('best' if best else 'last')))
| 12,954 | 44.297203 | 146 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/trainer/trainer_Tasnet_one_hot.py | import sys
sys.path.append('../')
from utils.util import check_parameters
import time
import logging
from logger.set_logger import setup_logger
from model.loss import get_loss, get_loss_one_hot, get_loss_one_hot_focal, get_loss_one_hot_focal_sim
import torch
import os
import matplotlib.pyplot as plt
from torch.nn.parallel import data_parallel
class Trainer(object):
def __init__(self, train_dataloader, val_dataloader, test_dataloader, Conv_Tasnet, optimizer, scheduler, opt):
super(Trainer).__init__()
self.train_dataloader = train_dataloader
self.val_dataloader = val_dataloader
self.test_dataloader = test_dataloader
self.scheduler = scheduler
self.num_spks = opt['num_spks']
self.cur_epoch = 0
self.total_epoch = opt['train']['epoch']
self.early_stop = opt['train']['early_stop']
self.opt = opt
self.print_freq = opt['logger']['print_freq']
# setup_logger(opt['logger']['name'], opt['logger']['path'],
# screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
self.logger = logging.getLogger(opt['logger']['name'])
self.checkpoint = opt['train']['path']
self.name = opt['name']
self.nFrameShift = opt['datasets']['audio_setting']['nFrameShift']
self.audio_length = opt['datasets']['audio_setting']['audio_length']
self.sr = opt['datasets']['audio_setting']['sample_rate']
self.ratio = 0.3
if opt['train']['gpuid']:
self.logger.info('Load Nvida GPU .....')
self.device = torch.device(
'cuda:{}'.format(opt['train']['gpuid'][0]))
self.gpuid = opt['train']['gpuid']
self.convtasnet = Conv_Tasnet.to(self.device)
self.logger.info(
'Loading Conv-TasNet parameters: {:.3f} Mb'.format(check_parameters(self.convtasnet)))
else:
self.logger.info('Load CPU ...........')
self.device = torch.device('cpu')
self.convtasnet = Conv_Tasnet.to(self.device)
self.logger.info(
'Loading Conv-TasNet parameters: {:.3f} Mb'.format(check_parameters(self.convtasnet)))
if opt['resume']['state']:
ckp = torch.load(opt['resume']['path'], map_location='cpu')
self.cur_epoch = ckp['epoch']
self.logger.info("Resume from checkpoint {}: epoch {:.3f}".format(
opt['resume']['path'], self.cur_epoch))
self.convtasnet = Conv_Tasnet.load_state_dict(
ckp['model_state_dict']).to(self.device)
self.optimizer = optimizer.load_state_dict(ckp['optim_state_dict'])
else:
self.convtasnet = Conv_Tasnet.to(self.device)
self.optimizer = optimizer
if opt['optim']['clip_norm']:
self.clip_norm = opt['optim']['clip_norm']
self.logger.info(
"Gradient clipping by {}, default L2".format(self.clip_norm))
else:
self.clip_norm = 0
def train(self, epoch):
self.logger.info(
'Start training from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.convtasnet.train()
num_batchs = len(self.train_dataloader)
total_loss = 0.0
total_loss_cls = 0.0
total_loss_tsd = 0.0
num_index = 1
start_time = time.time()
for mix, s1, ref, cls, onset, offset, tsd_lab,sim_lab,L_lab in self.train_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
cls_index = cls.argmax(1)
onset = onset.to(self.device)
offset = offset.to(self.device)
tsd_lab = tsd_lab.to(self.device)
sim_lab = sim_lab.to(self.device)
L_lab = L_lab.to(self.device)
self.optimizer.zero_grad()
if self.gpuid:
# model = torch.nn.DataParallel(self.convtasnet)
# out = model(mix, ref)
est_cls, est_tsd_time,est_tsd_time_up, sim_cos = self.convtasnet(mix, ref, cls_index.long())
else:
est_cls, est_tsd_time,est_tsd_time_up, sim_cos = self.convtasnet(mix, ref, cls_index.long())
# l = Loss(out, s1)
if self.opt['sim']:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_focal_sim(est_cls, cls, est_tsd, tsd_lab, sim_cos, sim_lab)
elif self.opt['focal_loss']:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_focal(est_cls, cls, est_tsd, tsd_lab, sim_cos, sim_lab)
else:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot(est_cls, cls, est_tsd_time, L_lab, sim_cos, sim_lab)
total_loss += epoch_loss.item()
total_loss_cls += loss_cls.item()
total_loss_tsd += loss_tsd.item()
epoch_loss.backward()
if self.clip_norm:
torch.nn.utils.clip_grad_norm_(
self.convtasnet.parameters(), self.clip_norm)
self.optimizer.step()
if num_index % self.print_freq == 0:
message = '<epoch:{:d}, iter:{:d}, lr:{:.3e}, total_loss:{:.3f}, total_loss_cls:{:.3f}, total_loss_tsd:{:.3f}>'.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss / num_index,
total_loss_cls / num_index,
total_loss_tsd / num_index)
self.logger.info(message)
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_cls = total_loss_cls / num_index
total_loss_tsd = total_loss_tsd / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, loss_cls:{:.3f}, loss_tsd:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_cls, total_loss_tsd, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, total_loss_cls, total_loss_tsd
def validation(self, epoch):
self.logger.info('Start Validation from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.convtasnet.eval()
num_batchs = len(self.val_dataloader)
num_index = 1
total_loss = 0.0
total_loss_cls = 0.0
total_loss_tsd = 0.0
start_time = time.time()
with torch.no_grad():
for mix, s1, ref, cls, onset, offset, tsd_lab, sim_lab, L_lab in self.val_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
cls_index = cls.argmax(1)
onset = onset.to(self.device)
offset = offset.to(self.device)
tsd_lab = tsd_lab.to(self.device)
sim_lab = sim_lab.to(self.device)
L_lab = L_lab.to(self.device)
self.optimizer.zero_grad()
if self.gpuid:
#model = torch.nn.DataParallel(self.convtasnet)
#out = model(mix)
# out = torch.nn.parallel.data_parallel(self.convtasnet,mix,device_ids=self.gpuid)
est_cls, est_tsd_time, est_tsd_time_up, sim_cos = self.convtasnet(mix, ref, cls_index.long())
else:
est_cls, est_tsd_time,est_tsd_time_up, sim_cos = self.convtasnet(mix, ref, cls_index.long())
# l = Loss(out, s1)
if self.opt['sim']:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_focal_sim(est_cls, cls, est_tsd, tsd_lab, sim_cos, sim_lab)
elif self.opt['focal_loss']:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_focal(est_cls, cls, est_tsd_time, tsd_lab, sim_cos, sim_lab)
else:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot(est_cls, cls, est_tsd_time, L_lab, sim_cos, sim_lab)
#epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_focal(est_cls, cls, est_tsd, tsd_lab, sim_cos, sim_lab)
#epoch_loss, loss_cls, loss_tsd = get_loss_one_hot(est_cls, cls, est_tsd, tsd_lab, sim_cos, sim_lab)
total_loss += epoch_loss.item()
total_loss_cls += loss_cls.item()
total_loss_tsd += loss_tsd.item()
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_cls = total_loss_cls / num_index
total_loss_tsd = total_loss_tsd / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, loss_cls:{:.3f}, loss_tsd:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_cls, total_loss_tsd, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, total_loss_cls, total_loss_tsd
def test(self, epoch):
self.logger.info(
'Start Test from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.convtasnet.eval()
num_batchs = len(self.test_dataloader)
num_index = 1
total_loss = 0.0
total_loss_cls = 0.0
total_loss_tsd = 0.0
start_time = time.time()
with torch.no_grad():
for mix, s1, ref, cls, onset, offset, tsd_lab, sim_lab, L_lab in self.test_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
cls_index = cls.argmax(1)
onset = onset.to(self.device)
offset = offset.to(self.device)
tsd_lab = tsd_lab.to(self.device)
sim_lab = sim_lab.to(self.device)
L_lab = L_lab.to(self.device)
self.optimizer.zero_grad()
if self.gpuid:
# model = torch.nn.DataParallel(self.convtasnet)
# out = model(mix)
# out = torch.nn.parallel.data_parallel(self.convtasnet, mix, device_ids=self.gpuid)
est_cls, est_tsd_time,est_tsd_time_up, sim_cos = self.convtasnet(mix, ref, cls_index.long())
else:
est_cls, est_tsd_time,est_tsd_time_up, sim_cos = self.convtasnet(mix, ref, cls_index.long())
if self.opt['sim']:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_focal_sim(est_cls, cls, est_tsd, tsd_lab, sim_cos, sim_lab)
elif self.opt['focal_loss']:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_focal(est_cls, cls, est_tsd, tsd_lab, sim_cos, sim_lab)
else:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot(est_cls, cls, est_tsd_time, L_lab, sim_cos, sim_lab)
# l = Loss(out, s1)
#epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_focal(est_cls, cls, est_tsd, tsd_lab, sim_cos, sim_lab)
#epoch_loss, loss_cls, loss_tsd = get_loss_one_hot(est_cls, cls, est_tsd, tsd_lab, sim_cos, sim_lab)
total_loss += epoch_loss.item()
total_loss_cls += loss_cls.item()
total_loss_tsd += loss_tsd.item()
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_cls = total_loss_cls / num_index
total_loss_tsd = total_loss_tsd / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, loss_cls:{:.3f}, loss_tsd:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_cls, total_loss_tsd, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, total_loss_cls, total_loss_tsd
def run(self):
train_loss = []
val_loss = []
test_loss = []
with torch.cuda.device(self.gpuid[0]):
self.save_checkpoint(self.cur_epoch, best=False)
v_loss,_,_ = self.validation(self.cur_epoch)
best_loss = v_loss
self.logger.info("Starting epoch from {:d}, loss = {:.4f}".format(self.cur_epoch, best_loss))
no_improve = 0
# starting training part
while self.cur_epoch < self.total_epoch:
self.cur_epoch += 1
t_loss,_,_ = self.train(self.cur_epoch)
v_loss,_,_ = self.validation(self.cur_epoch)
tt_loss,_,_ = self.test(self.cur_epoch)
train_loss.append(t_loss)
val_loss.append(v_loss)
test_loss.append(tt_loss)
# schedule here
self.scheduler.step()
if v_loss >= best_loss:
no_improve += 1
self.logger.info(
'No improvement, Best Loss: {:.4f}'.format(best_loss))
else:
best_loss = v_loss
no_improve = 0
self.save_checkpoint(self.cur_epoch, best=True)
self.logger.info('Epoch: {:d}, Now Best Loss Change: {:.4f}'.format(
self.cur_epoch, best_loss))
self.logger.info('Epoch: {:d}, Best Loss Test: {:.4f}'.format(
self.cur_epoch, tt_loss))
if no_improve == self.early_stop:
self.logger.info(
"Stop training cause no impr for {:d} epochs".format(
no_improve))
break
self.save_checkpoint(self.cur_epoch, best=False)
self.logger.info("Training for {:d}/{:d} epoches done!".format(
self.cur_epoch, self.total_epoch))
# draw loss image
plt.title("Loss of train, val and test")
x = [i for i in range(self.cur_epoch)]
plt.plot(x, train_loss, 'b-', label=u'train_loss', linewidth=0.8)
plt.plot(x, val_loss, 'c-', label=u'val_loss', linewidth=0.8)
plt.plot(x, test_loss, 'g', label=u'test_loss', linewidth=0.8)
plt.legend()
plt.ylabel('loss')
plt.xlabel('epoch')
plt.savefig('loss.png')
def save_checkpoint(self, epoch, best=True):
'''
save model
best: the best model
'''
os.makedirs(os.path.join(self.checkpoint, self.name), exist_ok=True)
torch.save({
'epoch': epoch,
'model_state_dict': self.convtasnet.state_dict(),
'optim_state_dict': self.optimizer.state_dict()
},
os.path.join(self.checkpoint, self.name, '{0}.pt'.format('best' if best else 'last')))
| 15,168 | 48.734426 | 146 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/data_loader/AudioData.py | import torch.nn.functional as F
from utils import util
import torch
import torchaudio
import sys
sys.path.append('../')
def read_wav(fname, return_rate=False):
'''
Read wavfile using Pytorch audio
input:
fname: wav file path
return_rate: Whether to return the sampling rate
output:
src: output tensor of size C x L
L is the number of audio frames
C is the number of channels.
sr: sample rate
'''
src, sr = torchaudio.load(fname, channels_first=True)
if return_rate:
return src.squeeze(), sr
else:
return src.squeeze()
def write_wav(fname, src, sample_rate):
'''
Write wav file
input:
fname: wav file path
src: frames of audio
sample_rate: An integer which is the sample rate of the audio
output:
None
'''
torchaudio.save(fname, src, sample_rate)
class AudioReader(object):
'''
Class that reads Wav format files
Input:
scp_path (str): a different scp file address
sample_rate (int, optional): sample rate (default: 8000)
chunk_size (int, optional): split audio size (default: 32000(4 s))
least_size (int, optional): Minimum split size (default: 16000(2 s))
Output:
split audio (list)
'''
def __init__(self, scp_path, sample_rate=8000, chunk_size=32000, least_size=16000):
super(AudioReader, self).__init__()
self.sample_rate = sample_rate
self.index_dict = util.handle_scp(scp_path)
self.keys = list(self.index_dict.keys())
self.audio = []
self.chunk_size = chunk_size
self.least_size = least_size
self.split()
def split(self):
'''
split audio with chunk_size and least_size
'''
for key in self.keys:
utt = read_wav(self.index_dict[key])
if utt.shape[0] < self.least_size:
continue
if utt.shape[0] > self.least_size and utt.shape[0] < self.chunk_size:
gap = self.chunk_size-utt.shape[0]
self.audio.append(F.pad(utt, (0, gap), mode='constant'))
if utt.shape[0] >= self.chunk_size:
start = 0
while True:
if start + self.chunk_size > utt.shape[0]:
break
self.audio.append(utt[start:start+self.chunk_size])
start += self.least_size
if __name__ == "__main__":
a = AudioReader("/home/likai/data1/create_scp/cv_mix.scp")
audio = a.audio
print(len(audio))
| 2,751 | 30.272727 | 87 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/data_loader/Dataset.py | import sys
sys.path.append('../')
from data_loader.AudioData import AudioReader
import torch
from torch.utils.data import Dataset
import numpy as np
class Datasets(Dataset):
'''
Load audio data
mix_scp: file path of mix audio (type: str)
ref_scp: file path of ground truth audio (type: list[spk1,spk2])
chunk_size (int, optional): split audio size (default: 32000(4 s))
least_size (int, optional): Minimum split size (default: 16000(2 s))
'''
def __init__(self, mix_scp=None, s1_scp=None, ref_scp=None, sample_rate=32000, chunk_size=64000, least_size=64000):
super(Datasets, self).__init__()
self.mix_audio = AudioReader(
mix_scp, sample_rate=sample_rate, chunk_size=chunk_size, least_size=least_size).audio
self.ref_audio = AudioReader(
ref_scp, sample_rate=sample_rate, chunk_size=chunk_size, least_size=least_size).audio
self.s1_audio = AudioReader(
s1_scp, sample_rate=sample_rate, chunk_size=chunk_size, least_size=least_size).audio
def __len__(self):
return len(self.mix_audio)
def __getitem__(self, index):
return self.mix_audio[index], self.s1_audio[index], self.ref_audio[index]
if __name__ == "__main__":
dataset = Datasets("/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tto_mix.scp",
"/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tto_s1.scp", "/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tto_re.scp")
for i in dataset.mix_audio:
print(i.shape)
if i.shape[0] != 64000:
print('fail')
| 1,666 | 36.886364 | 183 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/data_loader/AudioReader.py | import sys
sys.path.append('../')
import torchaudio
import torch
from utils.util import handle_scp
def read_wav(fname, return_rate=False):
'''
Read wavfile using Pytorch audio
input:
fname: wav file path
return_rate: Whether to return the sampling rate
output:
src: output tensor of size C x L
L is the number of audio frames
C is the number of channels.
sr: sample rate
'''
src, sr = torchaudio.load(fname, channels_first=True)
if return_rate:
return src.squeeze(), sr
else:
return src.squeeze()
def write_wav(fname, src, sample_rate):
'''
Write wav file
input:
fname: wav file path
src: frames of audio
sample_rate: An integer which is the sample rate of the audio
output:
None
'''
torchaudio.save(fname, src, sample_rate)
class AudioReader(object):
'''
Class that reads Wav format files
Input as a different scp file address
Output a matrix of wav files in all scp files.
'''
def __init__(self, scp_path, sample_rate=8000):
super(AudioReader, self).__init__()
self.sample_rate = sample_rate
self.index_dict = handle_scp(scp_path)
self.keys = list(self.index_dict.keys())
def _load(self, key):
src, sr = read_wav(self.index_dict[key], return_rate=True)
if self.sample_rate is not None and sr != self.sample_rate:
raise RuntimeError('SampleRate mismatch: {:d} vs {:d}'.format(
sr, self.sample_rate))
return src
def __len__(self):
return len(self.keys)
def __iter__(self):
for key in self.keys:
yield key, self._load(key)
def __getitem__(self, index):
if type(index) not in [int, str]:
raise IndexError('Unsupported index type: {}'.format(type(index)))
if type(index) == int:
num_uttrs = len(self.keys)
if num_uttrs < index and index < 0:
raise KeyError('Interger index out of range, {:d} vs {:d}'.format(
index, num_uttrs))
index = self.keys[index]
if index not in self.index_dict:
raise KeyError("Missing utterance {}!".format(index))
return self._load(index)
if __name__ == "__main__":
r = AudioReader('/home/likai/data1/create_scp/cv_s2.scp')
index = 0
print(r[1])
| 2,556 | 28.732558 | 82 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/data_loader/Dataset_light.py | import sys
sys.path.append('../')
import torch
from torch.utils.data import DataLoader, Dataset
import torch.nn.functional as F
import random
import numpy as np
import soundfile as sf
import torchaudio
from utils.util import handle_scp, handle_scp_inf
from model.model import STFT
import os
import pickle
import math
nFrameLen = 512
nFrameShift = 256
nFFT = 512
stft = STFT(frame_len=nFrameLen, frame_hop=nFrameShift, num_fft=nFFT)
def time_to_frame(tm,M):
ans = int(tm/(10.0/M))
if ans < 0:
ans = 0
if ans > M:
ans = M
return ans
def read_wav(fname, return_rate=False):
'''
Read wavfile using Pytorch audio
input:
fname: wav file path
return_rate: Whether to return the sampling rate
output:
src: output tensor of size C x L
L is the number of audio frames
C is the number of channels.
sr: sample rate
'''
src, sr = torchaudio.load(fname, channels_first=True)
if return_rate:
return src.squeeze(), sr
else:
return src.squeeze()
class Datasets(Dataset):
'''
Load audio data
mix_scp: file path of mix audio (type: str)
ref_scp: file path of ground truth audio (type: list[spk1,spk2])
'''
def __init__(self, mix_scp=None, s1_scp=None, ref_scp=None, inf_scp=None, sr=16000, cls_num=50, audio_length=10, hop_size=256):
super(Datasets, self).__init__()
self.mix_audio = handle_scp(mix_scp)
self.s1_audio = handle_scp(s1_scp)
self.ref_audio = handle_scp(ref_scp)
self.clss, self.onsets, self.offsets = handle_scp_inf(inf_scp)
self.sr = sr
self.cls_num = cls_num
self.audio_length = audio_length
self.samples = sr * audio_length
self.max_frame = (self.samples // hop_size - 1) // 2
self.key = list(self.mix_audio.keys())
def __len__(self):
return len(self.key)
def __getitem__(self, index):
index = self.key[index]
s1_index = index.replace('.wav', '_lab.wav')
ref_index = index.replace('.wav', '_re.wav')
mix = read_wav(self.mix_audio[index])
s1 = read_wav(self.s1_audio[s1_index])
ref = read_wav(self.ref_audio[ref_index])
cls = torch.zeros(self.cls_num)
cls[self.clss[index]] = 1.
tsd_lab = torch.zeros(self.max_frame)
sim_lab = torch.zeros(self.max_frame)
real_time = torch.zeros(2)
tmp_st = math.floor(self.onsets[index])
if tmp_st < 0:
tmp_st = 0
tmp_ed = math.ceil(self.offsets[index])
if tmp_ed > 10:
tmp_ed = 10
real_time[0] = tmp_st
real_time[1] = tmp_ed
M = 156
start_frame = round(self.max_frame * self.onsets[index] / self.audio_length) if round(self.max_frame * self.onsets[index] / self.audio_length) >= 0 else 0
end_frame = round(self.max_frame * self.offsets[index] / self.audio_length) if round(self.max_frame * self.offsets[index] / self.audio_length) < self.max_frame else self.max_frame - 1
L_st = time_to_frame(self.onsets[index], M)
L_ed = time_to_frame(self.offsets[index], M)
L_lab = torch.zeros(M)
L_lab[L_st:L_ed] = 1.0
tsd_lab[start_frame:end_frame] = 1.
sim_lab[start_frame:end_frame] = 1.0
if start_frame>0:
sim_lab[0:start_frame] = -1.0
if end_frame < self.max_frame:
sim_lab[end_frame:] = -1.0
onset = self.onsets[index]
offset = self.offsets[index]
return mix, s1, ref, cls, onset, offset, tsd_lab, sim_lab, L_lab
def get_mean_std(self):
preNormFile = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/norm.info'
# if os.path.exists(preNormFile):
# print('normfile exists, just load it!')
# return
fea_norm_cal = []
lab_norm_cal = []
print('Calculate mean and std.')
num = 0
for i in self.key:
mix = read_wav(self.mix_audio[i])
print(self.mix_audio[i])
s1 = read_wav(self.mix_audio[i].replace('.wav', '_lab.wav'))
fea, _= stft(mix[None, :]) #1,f,t
lab, _= stft(s1[None, :]) #1,f,t
fea = torch.log(fea ** 2 + 1e-20)
lab = torch.log(lab ** 2 + 1e-20)
fea = fea[0].numpy()
lab = lab[0].numpy()
if num == 0:
fea_norm_cal = fea
lab_norm_cal = lab
else:
fea_norm_cal = np.concatenate((fea_norm_cal,fea), -1)
lab_norm_cal = np.concatenate((lab_norm_cal,lab), -1)
num += 1
if num > 5000:
break
n_frame = np.shape(fea_norm_cal)[-1]
self.fea_mean = np.mean(fea_norm_cal, axis=-1)
self.lab_mean = np.mean(lab_norm_cal, axis=-1)
print(n_frame)
print('fea_mean and fea_std size: {}'.format(np.shape(self.fea_mean)[0]))
print('lab_mean and lab_std size: {}'.format(np.shape(self.lab_mean)[0]))
for i in range(n_frame):
if i == 0:
self.fea_std = np.square(fea_norm_cal[:,i] - self.fea_mean)
self.lab_std = np.square(lab_norm_cal[:,i] - self.lab_mean)
else:
self.fea_std += np.square(fea_norm_cal[:,i] - self.fea_mean)
self.lab_std += np.square(lab_norm_cal[:,i] - self.lab_mean)
self.fea_std = np.sqrt(self.fea_std / n_frame)
self.lab_std = np.sqrt(self.lab_std / n_frame)
print(f'restore mean and std in {preNormFile}')
export_dict = {
'feaMean': self.fea_mean.astype(np.float32),
'feaStd': self.fea_std.astype(np.float32),
'labMean': self.lab_mean.astype(np.float32),
'labStd': self.lab_std.astype(np.float32)
}
with open(preNormFile, 'wb') as fid:
pickle.dump(export_dict, fid)
class Datasets_tse(Dataset):
'''
Load audio data
mix_scp: file path of mix audio (type: str)
ref_scp: file path of ground truth audio (type: list[spk1,spk2])
'''
def __init__(self, mix_scp=None, s1_scp=None, ref_scp=None, inf_scp=None, tse_scp=None, sr=16000, cls_num=50, audio_length=10, hop_size=256):
super(Datasets_tse, self).__init__()
self.mix_audio = handle_scp(mix_scp)
self.s1_audio = handle_scp(s1_scp)
self.ref_audio = handle_scp(ref_scp)
self.tse_audio = handle_scp(tse_scp)
self.clss, self.onsets, self.offsets = handle_scp_inf(inf_scp)
self.sr = sr
self.cls_num = cls_num
self.audio_length = audio_length
self.samples = sr * audio_length
self.max_frame = (self.samples // hop_size - 1) // 2
self.key = list(self.mix_audio.keys())
def __len__(self):
return len(self.key)
def __getitem__(self, index):
index = self.key[index]
s1_index = index.replace('.wav', '_lab.wav')
ref_index = index.replace('.wav', '_re.wav')
tse_index = index.replace('.wav','_tse.wav')
mix = read_wav(self.mix_audio[index])
s1 = read_wav(self.s1_audio[s1_index])
ref = read_wav(self.ref_audio[ref_index])
tse = read_wav(self.tse_audio[tse_index])
cls = torch.zeros(self.cls_num)
cls[self.clss[index]] = 1.
tsd_lab = torch.zeros(self.max_frame)
sim_lab = torch.zeros(self.max_frame)
real_time = torch.zeros(2)
tmp_st = math.floor(self.onsets[index])
if tmp_st < 0:
tmp_st = 0
tmp_ed = math.ceil(self.offsets[index])
if tmp_ed > 10:
tmp_ed = 10
real_time[0] = tmp_st
real_time[1] = tmp_ed
M = 156
start_frame = round(self.max_frame * self.onsets[index] / self.audio_length) if round(self.max_frame * self.onsets[index] / self.audio_length) >= 0 else 0
end_frame = round(self.max_frame * self.offsets[index] / self.audio_length) if round(self.max_frame * self.offsets[index] / self.audio_length) < self.max_frame else self.max_frame - 1
L_st = time_to_frame(self.onsets[index], M)
L_ed = time_to_frame(self.offsets[index], M)
L_lab = torch.zeros(M)
L_lab[L_st:L_ed] = 1.0
tsd_lab[start_frame:end_frame] = 1.
sim_lab[start_frame:end_frame] = 1.0
if start_frame>0:
sim_lab[0:start_frame] = -1.0
if end_frame < self.max_frame:
sim_lab[end_frame:] = -1.0
onset = self.onsets[index]
offset = self.offsets[index]
return mix, s1, ref, tse, cls, onset, offset, tsd_lab, sim_lab, L_lab
if __name__ == "__main__":
datasets = Datasets("/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/tr_mix.scp",
"/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/tr_s1.scp",
"/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/tr_re.scp",
"/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/tr_inf.scp",
"/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/tr_tse.scp",
16000,
50,
10)
print(datasets.key)
# datasets.get_mean_std()
| 9,413 | 37.740741 | 191 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/utils/util.py | import torch
import torch.nn as nn
def handle_scp(scp_path):
'''
Read scp file script
input:
scp_path: .scp file's file path
output:
scp_dict: {'key':'wave file path'}
'''
scp_dict = dict()
line = 0
lines = open(scp_path, 'r').readlines()
for l in lines:
scp_parts = l.strip().split()
line += 1
if len(scp_parts) != 2:
raise RuntimeError("For {}, format error in line[{:d}]: {}".format(
scp_path, line, scp_parts))
if len(scp_parts) == 2:
key, value = scp_parts
if key in scp_dict:
raise ValueError("Duplicated key \'{0}\' exists in {1}".format(
key, scp_path))
scp_dict[key] = value
return scp_dict
def handle_scp_inf(scp_path):
'''
Read information scp file script
input:
scp_path: .scp file's file path
output:
scp_dict: {'key':'wave file path'}
'''
scp_dict_cls = dict()
scp_dict_onset = dict()
scp_dict_offset = dict()
line = 0
lines = open(scp_path, 'r').readlines()
for l in lines:
scp_parts = l.strip().split()
line += 1
if len(scp_parts) != 4:
raise RuntimeError("For {}, format error in line[{:d}]: {}".format(
scp_path, line, scp_parts))
if len(scp_parts) == 4:
key, cls, onset, offset = scp_parts
if key in scp_dict_cls:
raise ValueError("Duplicated key \'{0}\' exists in {1}".format(
key, scp_path))
scp_dict_cls[key] = int(cls)
scp_dict_onset[key] = float(onset)
scp_dict_offset[key] = float(offset)
return scp_dict_cls, scp_dict_onset, scp_dict_offset
def check_parameters(net):
'''
Returns module parameters. Mb
'''
parameters = sum(param.numel() for param in net.parameters())
return parameters / 10**6
| 1,932 | 26.225352 | 79 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/model/model_rnn.py | import sys
sys.path.append('../')
import torch.nn.functional as F
from torch import nn
import torch
from utils.util import check_parameters
import warnings
warnings.filterwarnings('ignore')
class GlobalLayerNorm(nn.Module):
'''
Calculate Global Layer Normalization
dim: (int or list or torch.Size) –
input shape from an expected input of size
eps: a value added to the denominator for numerical stability.
elementwise_affine: a boolean value that when set to True,
this module has learnable per-element affine parameters
initialized to ones (for weights) and zeros (for biases).
'''
def __init__(self, dim, shape, eps=1e-8, elementwise_affine=True):
super(GlobalLayerNorm, self).__init__()
self.dim = dim
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
if shape == 3:
self.weight = nn.Parameter(torch.ones(self.dim, 1))
self.bias = nn.Parameter(torch.zeros(self.dim, 1))
if shape == 4:
self.weight = nn.Parameter(torch.ones(self.dim, 1, 1))
self.bias = nn.Parameter(torch.zeros(self.dim, 1, 1))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
def forward(self, x):
# x = N x C x K x S or N x C x L
# N x 1 x 1
# cln: mean,var N x 1 x K x S
# gln: mean,var N x 1 x 1
if x.dim() == 4:
mean = torch.mean(x, (1, 2, 3), keepdim=True)
var = torch.mean((x-mean)**2, (1, 2, 3), keepdim=True)
if self.elementwise_affine:
x = self.weight*(x-mean)/torch.sqrt(var+self.eps)+self.bias
else:
x = (x-mean)/torch.sqrt(var+self.eps)
if x.dim() == 3:
mean = torch.mean(x, (1, 2), keepdim=True)
var = torch.mean((x-mean)**2, (1, 2), keepdim=True)
if self.elementwise_affine:
x = self.weight*(x-mean)/torch.sqrt(var+self.eps)+self.bias
else:
x = (x-mean)/torch.sqrt(var+self.eps)
return x
class CumulativeLayerNorm(nn.LayerNorm):
'''
Calculate Cumulative Layer Normalization
dim: you want to norm dim
elementwise_affine: learnable per-element affine parameters
'''
def __init__(self, dim, elementwise_affine=True):
super(CumulativeLayerNorm, self).__init__(
dim, elementwise_affine=elementwise_affine, eps=1e-8)
def forward(self, x):
# x: N x C x K x S or N x C x L
# N x K x S x C
if x.dim() == 4:
x = x.permute(0, 2, 3, 1).contiguous()
# N x K x S x C == only channel norm
x = super().forward(x)
# N x C x K x S
x = x.permute(0, 3, 1, 2).contiguous()
if x.dim() == 3:
x = torch.transpose(x, 1, 2)
# N x L x C == only channel norm
x = super().forward(x)
# N x C x L
x = torch.transpose(x, 1, 2)
return x
def select_norm(norm, dim, shape):
if norm == 'gln':
return GlobalLayerNorm(dim, shape, elementwise_affine=True)
if norm == 'cln':
return CumulativeLayerNorm(dim, elementwise_affine=True)
if norm == 'ln':
return nn.GroupNorm(1, dim, eps=1e-8)
else:
return nn.BatchNorm1d(dim)
class Encoder(nn.Module):
'''
Conv-Tasnet Encoder part
kernel_size: the length of filters
out_channels: the number of filters
'''
def __init__(self, kernel_size=2, out_channels=64):
super(Encoder, self).__init__()
self.conv1d = nn.Conv1d(in_channels=1, out_channels=out_channels,
kernel_size=kernel_size, stride=kernel_size//2, groups=1, bias=False)
def forward(self, x):
"""
Input:
x: [B, T], B is batch size, T is times
Returns:
x: [B, C, T_out]
T_out is the number of time steps
"""
# B x T -> B x 1 x T
x = torch.unsqueeze(x, dim=1)
# B x 1 x T -> B x C x T_out
x = self.conv1d(x)
x = F.relu(x)
return x
class Decoder(nn.ConvTranspose1d):
'''
Decoder of the TasNet
This module can be seen as the gradient of Conv1d with respect to its input.
It is also known as a fractionally-strided convolution
or a deconvolution (although it is not an actual deconvolution operation).
'''
def __init__(self, *args, **kwargs):
super(Decoder, self).__init__(*args, **kwargs)
def forward(self, x):
"""
x: [B, N, L]
"""
if x.dim() not in [2, 3]:
raise RuntimeError("{} accept 3/4D tensor as input".format(
self.__name__))
x = super().forward(x if x.dim() == 3 else torch.unsqueeze(x, 1))
if torch.squeeze(x).dim() == 1:
x = torch.squeeze(x, dim=1)
else:
x = torch.squeeze(x)
return x
class Dual_RNN_Block(nn.Module):
'''
Implementation of the intra-RNN and the inter-RNN
input:
in_channels: The number of expected features in the input x
out_channels: The number of features in the hidden state h
rnn_type: RNN, LSTM, GRU
norm: gln = "Global Norm", cln = "Cumulative Norm", ln = "Layer Norm"
dropout: If non-zero, introduces a Dropout layer on the outputs
of each LSTM layer except the last layer,
with dropout probability equal to dropout. Default: 0
bidirectional: If True, becomes a bidirectional LSTM. Default: False
'''
def __init__(self, out_channels,
hidden_channels, rnn_type='LSTM', norm='ln',
dropout=0, bidirectional=False, num_spks=2):
super(Dual_RNN_Block, self).__init__()
# RNN model
self.intra_rnn = getattr(nn, rnn_type)(
out_channels, hidden_channels, 1, batch_first=True, dropout=dropout, bidirectional=bidirectional)
self.inter_rnn = getattr(nn, rnn_type)(
out_channels, hidden_channels, 1, batch_first=True, dropout=dropout, bidirectional=bidirectional)
# Norm
self.intra_norm = select_norm(norm, out_channels, 4)
self.inter_norm = select_norm(norm, out_channels, 4)
# Linear
self.intra_linear = nn.Linear(
hidden_channels*2 if bidirectional else hidden_channels, out_channels)
self.inter_linear = nn.Linear(
hidden_channels*2 if bidirectional else hidden_channels, out_channels)
def forward(self, x):
'''
x: [B, N, K, S]
out: [Spks, B, N, K, S]
'''
B, N, K, S = x.shape
# intra RNN
# [BS, K, N]
intra_rnn = x.permute(0, 3, 2, 1).contiguous().view(B*S, K, N)
# [BS, K, H]
intra_rnn, _ = self.intra_rnn(intra_rnn)
# [BS, K, N]
intra_rnn = self.intra_linear(intra_rnn.contiguous().view(B*S*K, -1)).view(B*S, K, -1)
# [B, S, K, N]
intra_rnn = intra_rnn.view(B, S, K, N)
# [B, N, K, S]
intra_rnn = intra_rnn.permute(0, 3, 2, 1).contiguous()
intra_rnn = self.intra_norm(intra_rnn)
# [B, N, K, S]
intra_rnn = intra_rnn + x
# inter RNN
# [BK, S, N]
inter_rnn = intra_rnn.permute(0, 2, 3, 1).contiguous().view(B*K, S, N)
# [BK, S, H]
inter_rnn, _ = self.inter_rnn(inter_rnn)
# [BK, S, N]
inter_rnn = self.inter_linear(inter_rnn.contiguous().view(B*S*K, -1)).view(B*K, S, -1)
# [B, K, S, N]
inter_rnn = inter_rnn.view(B, K, S, N)
# [B, N, K, S]
inter_rnn = inter_rnn.permute(0, 3, 1, 2).contiguous()
inter_rnn = self.inter_norm(inter_rnn)
# [B, N, K, S]
out = inter_rnn + intra_rnn
return out
class Dual_Path_RNN(nn.Module):
'''
Implementation of the Dual-Path-RNN model
input:
in_channels: The number of expected features in the input x
out_channels: The number of features in the hidden state h
rnn_type: RNN, LSTM, GRU
norm: gln = "Global Norm", cln = "Cumulative Norm", ln = "Layer Norm"
dropout: If non-zero, introduces a Dropout layer on the outputs
of each LSTM layer except the last layer,
with dropout probability equal to dropout. Default: 0
bidirectional: If True, becomes a bidirectional LSTM. Default: False
num_layers: number of Dual-Path-Block
K: the length of chunk
num_spks: the number of speakers
'''
def __init__(self, in_channels, out_channels, hidden_channels,
rnn_type='LSTM', norm='ln', dropout=0,
bidirectional=False, num_layers=4, K=200, num_spks=2):
super(Dual_Path_RNN, self).__init__()
self.K = K
self.num_spks = num_spks
self.num_layers = num_layers
self.norm = select_norm(norm, in_channels, 3)
self.conv1d = nn.Conv1d(in_channels, out_channels, 1, bias=False)
self.dual_rnn = nn.ModuleList([])
for i in range(num_layers):
self.dual_rnn.append(Dual_RNN_Block(out_channels, hidden_channels,
rnn_type=rnn_type, norm=norm, dropout=dropout,
bidirectional=bidirectional))
self.conv2d = nn.Conv2d(
out_channels, out_channels*num_spks, kernel_size=1)
self.end_conv1x1 = nn.Conv1d(out_channels, in_channels, 1, bias=False)
self.prelu = nn.PReLU()
self.activation = nn.ReLU()
# gated output layer
self.output = nn.Sequential(nn.Conv1d(out_channels, out_channels, 1),
nn.Tanh()
)
self.output_gate = nn.Sequential(nn.Conv1d(out_channels, out_channels, 1),
nn.Sigmoid()
)
def forward(self, x):
'''
x: [B, N, L]
'''
# [B, N, L]
x = self.norm(x)
# [B, N, L]
x = self.conv1d(x)
# [B, N, K, S]
x, gap = self._Segmentation(x, self.K)
# [B, N*spks, K, S]
for i in range(self.num_layers):
x = self.dual_rnn[i](x)
x = self.prelu(x)
x = self.conv2d(x)
# [B*spks, N, K, S]
B, _, K, S = x.shape
x = x.view(B*self.num_spks,-1, K, S)
# [B*spks, N, L]
x = self._over_add(x, gap)
x = self.output(x)*self.output_gate(x)
# [spks*B, N, L]
x = self.end_conv1x1(x)
# [B*spks, N, L] -> [B, spks, N, L]
_, N, L = x.shape
x = x.view(B, self.num_spks, N, L)
x = self.activation(x)
# [spks, B, N, L]
x = x.transpose(0, 1)
return x
def _padding(self, input, K):
'''
padding the audio times
K: chunks of length
P: hop size
input: [B, N, L]
'''
B, N, L = input.shape
P = K // 2
gap = K - (P + L % K) % K
if gap > 0:
pad = torch.Tensor(torch.zeros(B, N, gap)).type(input.type())
input = torch.cat([input, pad], dim=2)
_pad = torch.Tensor(torch.zeros(B, N, P)).type(input.type())
input = torch.cat([_pad, input, _pad], dim=2)
return input, gap
def _Segmentation(self, input, K):
'''
the segmentation stage splits
K: chunks of length
P: hop size
input: [B, N, L]
output: [B, N, K, S]
'''
B, N, L = input.shape
P = K // 2
input, gap = self._padding(input, K)
# [B, N, K, S]
input1 = input[:, :, :-P].contiguous().view(B, N, -1, K)
input2 = input[:, :, P:].contiguous().view(B, N, -1, K)
input = torch.cat([input1, input2], dim=3).view(
B, N, -1, K).transpose(2, 3)
return input.contiguous(), gap
def _over_add(self, input, gap):
'''
Merge sequence
input: [B, N, K, S]
gap: padding length
output: [B, N, L]
'''
B, N, K, S = input.shape
P = K // 2
# [B, N, S, K]
input = input.transpose(2, 3).contiguous().view(B, N, -1, K * 2)
input1 = input[:, :, :, :K].contiguous().view(B, N, -1)[:, :, P:]
input2 = input[:, :, :, K:].contiguous().view(B, N, -1)[:, :, :-P]
input = input1 + input2
# [B, N, L]
if gap > 0:
input = input[:, :, :-gap]
return input
class Dual_RNN_model(nn.Module):
'''
model of Dual Path RNN
input:
in_channels: The number of expected features in the input x
out_channels: The number of features in the hidden state h
hidden_channels: The hidden size of RNN
kernel_size: Encoder and Decoder Kernel size
rnn_type: RNN, LSTM, GRU
norm: gln = "Global Norm", cln = "Cumulative Norm", ln = "Layer Norm"
dropout: If non-zero, introduces a Dropout layer on the outputs
of each LSTM layer except the last layer,
with dropout probability equal to dropout. Default: 0
bidirectional: If True, becomes a bidirectional LSTM. Default: False
num_layers: number of Dual-Path-Block
K: the length of chunk
num_spks: the number of speakers
'''
def __init__(self, in_channels, out_channels, hidden_channels,
kernel_size=2, rnn_type='LSTM', norm='ln', dropout=0,
bidirectional=False, num_layers=4, K=200, num_spks=2):
super(Dual_RNN_model,self).__init__()
self.encoder = Encoder(kernel_size=kernel_size,out_channels=in_channels)
self.separation = Dual_Path_RNN(in_channels, out_channels, hidden_channels,
rnn_type=rnn_type, norm=norm, dropout=dropout,
bidirectional=bidirectional, num_layers=num_layers, K=K, num_spks=num_spks)
self.decoder = Decoder(in_channels=in_channels, out_channels=1, kernel_size=kernel_size, stride=kernel_size//2, bias=False)
self.num_spks = num_spks
def forward(self, x):
'''
x: [B, L]
'''
# [B, N, L]
e = self.encoder(x)
# [spks, B, N, L]
s = self.separation(e)
# [B, N, L] -> [B, L]
out = [s[i]*e for i in range(self.num_spks)]
audio = [self.decoder(out[i]) for i in range(self.num_spks)]
return audio
if __name__ == "__main__":
rnn = Dual_RNN_model(256, 64, 128,bidirectional=True, norm='ln', num_layers=6)
#encoder = Encoder(16, 512)
x = torch.ones(1, 100)
out = rnn(x)
print("{:.3f}".format(check_parameters(rnn)*1000000))
print(rnn)
| 15,142 | 35.314149 | 131 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/model/PANNS.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from torchlibrosa.augmentation import SpecAugmentation
def init_layer(layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.)
def init_bn(bn):
"""Initialize a Batchnorm layer. """
bn.bias.data.fill_(0.)
bn.weight.data.fill_(1.)
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.conv2 = nn.Conv2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.init_weight()
def init_weight(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
x = F.relu_(self.bn2(self.conv2(x)))
if pool_type == 'max':
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg':
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg+max':
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
else:
raise Exception('Incorrect argument!')
return x
class CNN10(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num):
super(CNN10, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db,
freeze_parameters=True)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(64)
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.fc1 = nn.Linear(512, 512, bias=True)
self.fc_audioset = nn.Linear(512, classes_num, bias=True)
self.init_weight()
def init_weight(self):
init_bn(self.bn0)
init_layer(self.fc1)
init_layer(self.fc_audioset)
def forward(self, input):
"""
Input: (batch_size, data_length)"""
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
# print('x ',x.shape) # [4, 512, 31, 4]
# assert 1==2
x = torch.mean(x, dim=3)
(x1, _) = torch.max(x, dim=2)
x2 = torch.mean(x, dim=2)
x = x1 + x2
x = F.relu_(self.fc1(x))
embedding = x
return embedding
def extract_frame(self, input):
"""
Input: (batch_size, data_length)"""
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = self.conv_block2(x, pool_size=(1, 2), pool_type='avg')
x = self.conv_block3(x, pool_size=(1, 2), pool_type='avg')
x = self.conv_block4(x, pool_size=(1, 2), pool_type='avg')
# print('x ',x.shape) # [4, 512, 31, 4]
# assert 1==2
x = torch.mean(x, dim=3).transpose(1,2) # b,312,512
return x | 5,456 | 39.422222 | 107 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/model/loss.py | import torch
from itertools import permutations
import numpy as np
from pypesq import pesq
import torch.nn as nn
class FocalLoss(nn.Module):
def __init__(self,alpha=0.35, gamma=2):
super(FocalLoss,self).__init__()
self.gamma = gamma
self.alpha = alpha
self.eps = 1e-9
#self.bce = BCELoss()
def forward(self,predict,target,frame_level_time=None):
# print()
tmp = self.alpha*(1-predict)**self.gamma # t = 1
# print('predict ',predict.shape)
# print('target ',target.shape)
# assert 1==2
# tmp = self.alpha
# print('tmp ',tmp.shape)
tmp2 = (1-self.alpha)*(predict)**self.gamma # t=0
# tmp2 = (1-self.alpha)
# print('tmp2 ',tmp2.shape)
# print(torch.log(predict).shape)
# print('target ',target.shape)
# assert 1==2
loss = (-target*tmp*torch.log(predict+self.eps)).mean() + (-(1-target)*tmp2*torch.log(1-predict+self.eps)).mean()
#loss = (-target*torch.log(predict)).mean() + (-(1-target)*torch.log(1-predict)).mean()
#loss_bce = (-target*torch.log(predict)).mean() + (-(1-target)*torch.log(1-predict)).mean()
# print('loss_bce ',loss_bce)
# loss_function = torch.nn.BCELoss()
# loss = loss_function(predict, target.squeeze())
# print(self.bce(predict,target))
# print('loss ',loss)
# assert 1==2
return loss
def nll_loss(output, target):
'''Negative likelihood loss. The output should be obtained using F.log_softmax(x).
Args:
output: (N, classes_num)
target: (N, classes_num)
'''
loss = - torch.mean(target * output)
return loss
def tsd_loss(output, target):
'''BCE loss.
Args:
output: (N)
target: (N)
'''
loss_function = torch.nn.BCELoss()
loss = loss_function(output, target.squeeze())
return loss
def sisnr_loss(x, s, eps=1e-8):
"""
calculate training loss
input:
x: separated signal, N x S tensor
s: reference signal, N x S tensor
Return:
sisnr: N tensor
"""
if x.shape != s.shape:
if x.shape[-1] > s.shape[-1]:
x = x[:, :s.shape[-1]]
else:
s = s[:, :x.shape[-1]]
def l2norm(mat, keepdim=False):
return torch.norm(mat, dim=-1, keepdim=keepdim)
if x.shape != s.shape:
raise RuntimeError(
"Dimention mismatch when calculate si-snr, {} vs {}".format(
x.shape, s.shape))
x_zm = x - torch.mean(x, dim=-1, keepdim=True)
s_zm = s - torch.mean(s, dim=-1, keepdim=True)
t = torch.sum(
x_zm * s_zm, dim=-1,
keepdim=True) * s_zm / (l2norm(s_zm, keepdim=True)**2 + eps)
loss = -20. * torch.log10(eps + l2norm(t) / (l2norm(x_zm - t) + eps))
return torch.sum(loss) / x.shape[0]
def sisnri(x, s, m):
"""
Arguments:
x: separated signal, BS x S
s: reference signal, BS x S
m: mixture signal, BS x S
Return:
sisnri: N tensor
"""
sisnr = sisnr_loss(x, s)
sisnr_ori = sisnr_loss(m, s)
return sisnr_ori - sisnr
def lfb_mse_loss(x, s):
"""
est_spec, ref_spec: BS x F x T
return: log fbank MSE: BS tensor
"""
if x.shape != s.shape:
if x.shape[-1] > s.shape[-1]:
x = x[:, :, :s.shape[-1]]
else:
s = s[:, :, :x.shape[-1]]
t = torch.sum((x - s) ** 2)/(x.shape[0]*x.shape[1]*x.shape[2])
return t
def mse_loss(x, s):
"""
calculate training loss
input:
x: separated signal, N x S tensor
s: reference signal, N x S tensor
Return:
return: N tensor
"""
if x.shape != s.shape:
if x.shape[-1] > s.shape[-1]:
x = x[:, :s.shape[-1]]
else:
s = s[:, :x.shape[-1]]
t = torch.sum((x - s) ** 2)/(x.shape[0]*x.shape[1])
return t
def get_pesq(est_wav, lab_wav):
num = est_wav.shape[0]
score = 0.0
for i in range(num):
score += pesq(est_wav[i].cpu().detach(), lab_wav[i].cpu().detach(), 16000)
score = score / num
return score
def get_loss(est_cls, lab_cls, est_tsd, lab_tsd):
loss_cls = nll_loss(est_cls, lab_cls)
loss_tsd = tsd_loss(est_tsd, lab_tsd)
# print('loss_cls ',loss_cls)
# print('loss_tsd ',loss_tsd)
# assert 1==2
loss = loss_cls * 10. + loss_tsd
return loss, loss_cls, loss_tsd
def get_loss_one_hot(est_cls, lab_cls, est_tsd, lab_tsd, sim_cos=None, sim_lab=None):
# loss_cls = nll_loss(est_cls, lab_cls)
# print('est_tsd ', est_tsd.shape)
# print('lab_tsd ',lab_tsd.shape)
# assert 1==2
loss_tsd = tsd_loss(est_tsd, lab_tsd)
# if sim_cos !=None:
# loss_cls = mse_loss(sim_cos,sim_lab)
# else:
# loss_cls = loss_tsd
loss_cls = loss_tsd
# print('loss_cls ',loss_cls)
# print('loss_tsd ',loss_tsd)
# assert 1==2
loss = loss_tsd
return loss, loss_cls, loss_tsd
def get_loss_one_hot_reg(est,lab):
# loss_cls = nll_loss(est_cls, lab_cls)
loss_tsd = mse_loss(est,lab)
# if sim_cos !=None:
# loss_cls = mse_loss(sim_cos,sim_lab)
# else:
# loss_cls = loss_tsd
loss_cls = loss_tsd
# print('loss_cls ',loss_cls)
# print('loss_tsd ',loss_tsd)
# assert 1==2
loss = loss_tsd
return loss, loss_cls, loss_tsd
def get_loss_one_hot_reg_two(st, ed, lab):
crossentropyloss = nn.CrossEntropyLoss()
# loss_cls = nll_loss(est_cls, lab_cls)
loss_st = crossentropyloss(st,lab[:,0].long())
loss_ed = crossentropyloss(ed,lab[:,1].long())
# if sim_cos !=None:
# loss_cls = mse_loss(sim_cos,sim_lab)
# else:
# loss_cls = loss_tsd
# print('loss_cls ',loss_cls)
# print('loss_tsd ',loss_tsd)
# assert 1==2
loss = loss_st + loss_st
return loss, loss_st, loss_st
def get_loss_one_hot_focal(est_cls, lab_cls, est_tsd, lab_tsd, sim_cos=None, sim_lab=None):
# loss_cls = nll_loss(est_cls, lab_cls)
focalLoss = FocalLoss()
loss_tsd = focalLoss(est_tsd, lab_tsd.squeeze())
# print('loss_tsd ',loss_tsd)
# assert 1==2
# loss_cls = loss_tsd
# if sim_cos !=None:
# loss_cls = mse_loss(sim_cos,sim_lab)
# else:
# loss_cls = loss_tsd
loss_cls = loss_tsd
# print('loss_cls ',loss_cls)
# print('loss_tsd ',loss_tsd)
# assert 1==2
loss = loss_tsd
# assert 1==2
return loss, loss_cls, loss_tsd
def get_loss_one_hot_focal_sim(est_cls, lab_cls, est_tsd, lab_tsd, sim_cos=None, sim_lab=None):
# loss_cls = nll_loss(est_cls, lab_cls)
focalLoss = FocalLoss()
loss_tsd = focalLoss(est_tsd, lab_tsd.squeeze())
# print('loss_tsd ',loss_tsd)
# assert 1==2
# loss_cls = loss_tsd
if sim_cos !=None:
loss_cls = mse_loss(sim_cos,sim_lab)
else:
loss_cls = loss_tsd
# print('loss_cls ',loss_cls)
# print('loss_tsd ',loss_tsd)
# assert 1==2
loss = 2*loss_tsd + loss_cls
# assert 1==2
return loss, loss_cls, loss_tsd
| 7,028 | 29.428571 | 121 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/model/model.py | import torch
from torch import nn
import torch.nn.functional as F
import sys
sys.path.append('../')
from utils.util import check_parameters
from model.PANNS import CNN10
from model.tsd import TSD, TSD2, TSD2_tse, TSD_plus, TSD_plus_sim, TSD_IS,TSD_regresion,TSD_regresion_two_cls
import math
def init_kernel(frame_len,
frame_hop,
num_fft=None,
window="sqrt_hann"):
if window != "sqrt_hann":
raise RuntimeError("Now only support sqrt hanning window in order "
"to make signal perfectly reconstructed")
if not num_fft:
# FFT points
fft_size = 2 ** math.ceil(math.log2(frame_len))
else:
fft_size = num_fft
# window [window_length]
window = torch.hann_window(frame_len) ** 0.5
S_ = 0.5 * (fft_size * fft_size / frame_hop) ** 0.5
# window_length, F, 2 (real+imag)
kernel = torch.rfft(torch.eye(fft_size) / S_, 1)[:frame_len]
# 2, F, window_length
kernel = torch.transpose(kernel, 0, 2) * window
# 2F, 1, window_length
kernel = torch.reshape(kernel, (fft_size + 2, 1, frame_len))
return kernel
class STFTBase(nn.Module):
"""
Base layer for (i)STFT
NOTE:
1) Recommend sqrt_hann window with 2**N frame length, because it
could achieve perfect reconstruction after overlap-add
2) Now haven't consider padding problems yet
"""
def __init__(self,
frame_len,
frame_hop,
window="sqrt_hann",
num_fft=None):
super(STFTBase, self).__init__()
K = init_kernel(
frame_len,
frame_hop,
num_fft=num_fft,
window=window)
self.K = nn.Parameter(K, requires_grad=False)
self.stride = frame_hop
self.window = window
def freeze(self):
self.K.requires_grad = False
def unfreeze(self):
self.K.requires_grad = True
def check_nan(self):
num_nan = torch.sum(torch.isnan(self.K))
if num_nan:
raise RuntimeError(
"detect nan in STFT kernels: {:d}".format(num_nan))
def extra_repr(self):
return "window={0}, stride={1}, requires_grad={2}, kernel_size={3[0]}x{3[2]}".format(
self.window, self.stride, self.K.requires_grad, self.K.shape)
class STFT(STFTBase):
"""
Short-time Fourier Transform as a Layer
"""
def __init__(self, *args, **kwargs):
super(STFT, self).__init__(*args, **kwargs)
def forward(self, x):
"""
Accept raw waveform and output magnitude and phase
x: input signal, N x 1 x S or N x S
m: magnitude, N x F x T
p: phase, N x F x T
"""
if x.dim() not in [2, 3]:
raise RuntimeError("Expect 2D/3D tensor, but got {:d}D".format(
x.dim()))
self.check_nan()
# if N x S, reshape N x 1 x S
if x.dim() == 2:
x = torch.unsqueeze(x, 1)
# N x 2F x T
c = F.conv1d(x, self.K, stride=self.stride, padding=0)
# N x F x T
r, i = torch.chunk(c, 2, dim=1)
m = (r ** 2 + i ** 2) ** 0.5
p = torch.atan2(i, r)
return m, p
class iSTFT(STFTBase):
"""
Inverse Short-time Fourier Transform as a Layer
"""
def __init__(self, *args, **kwargs):
super(iSTFT, self).__init__(*args, **kwargs)
def forward(self, m, p, squeeze=False):
"""
Accept phase & magnitude and output raw waveform
m, p: N x F x T
s: N x C x S
"""
if p.dim() != m.dim() or p.dim() not in [2, 3]:
raise RuntimeError("Expect 2D/3D tensor, but got {:d}D".format(
p.dim()))
self.check_nan()
# if F x T, reshape 1 x F x T
if p.dim() == 2:
p = torch.unsqueeze(p, 0)
m = torch.unsqueeze(m, 0)
r = m * torch.cos(p)
i = m * torch.sin(p)
# N x 2F x T
c = torch.cat([r, i], dim=1)
# N x 2F x T
s = F.conv_transpose1d(c, self.K, stride=self.stride, padding=0)
if squeeze:
s = torch.squeeze(s)
return s
class GlobalLayerNorm(nn.Module):
'''
Calculate Global Layer Normalization
dim: (int or list or torch.Size) –
input shape from an expected input of size
eps: a value added to the denominator for numerical stability.
elementwise_affine: a boolean value that when set to True,
this module has learnable per-element affine parameters
initialized to ones (for weights) and zeros (for biases).
'''
def __init__(self, dim, eps=1e-05, elementwise_affine=True):
super(GlobalLayerNorm, self).__init__()
self.dim = dim
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = nn.Parameter(torch.ones(self.dim, 1))
self.bias = nn.Parameter(torch.zeros(self.dim, 1))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
def forward(self, x):
# x = N x C x L
# N x 1 x 1
# cln: mean,var N x 1 x L
# gln: mean,var N x 1 x 1
if x.dim() != 3:
raise RuntimeError("{} accept 3D tensor as input".format(
self.__name__))
mean = torch.mean(x, (1, 2), keepdim=True)
var = torch.mean((x - mean) ** 2, (1, 2), keepdim=True)
# N x C x L
if self.elementwise_affine:
x = self.weight * (x - mean) / torch.sqrt(var + self.eps) + self.bias
else:
x = (x - mean) / torch.sqrt(var + self.eps)
return x
class CumulativeLayerNorm(nn.LayerNorm):
'''
Calculate Cumulative Layer Normalization
dim: you want to norm dim
elementwise_affine: learnable per-element affine parameters
'''
def __init__(self, dim, elementwise_affine=True):
super(CumulativeLayerNorm, self).__init__(
dim, elementwise_affine=elementwise_affine)
def forward(self, x):
# x: N x C x L
# N x L x C
x = torch.transpose(x, 1, 2)
# N x L x C == only channel norm
x = super().forward(x)
# N x C x L
x = torch.transpose(x, 1, 2)
return x
def select_norm(norm, dim):
if norm == 'gln':
return GlobalLayerNorm(dim, elementwise_affine=True)
if norm == 'cln':
return CumulativeLayerNorm(dim, elementwise_affine=True)
if norm == 'ln':
return nn.GroupNorm(1, dim)
else:
return nn.BatchNorm1d(dim)
class Conv1D(nn.Module):
'''
Build the Conv1D structure
causal: if True is causal setting
'''
def __init__(self, in_channels=256, out_channels=512,
kernel_size=3, dilation=1, norm='gln', causal=False):
super(Conv1D, self).__init__()
self.causal = causal
self.conv1x1 = nn.Conv1d(in_channels, out_channels, kernel_size=1)
self.PReLu1 = nn.PReLU()
self.norm1 = select_norm(norm, out_channels)
self.pad = (dilation * (kernel_size - 1)
) // 2 if not causal else dilation * (kernel_size - 1)
self.dwconv = nn.Conv1d(out_channels, out_channels, kernel_size=kernel_size,
groups=out_channels, padding=self.pad, dilation=dilation)
self.PReLu2 = nn.PReLU()
self.norm2 = select_norm(norm, out_channels)
self.end_conv1x1 = nn.Conv1d(out_channels, in_channels, 1)
def forward(self, x):
"""
Input:
x: [B x C x T], B is batch size, T is times
Returns:
x: [B, C, T]
"""
# B x C x T -> B x C_o x T_o
x_conv = self.conv1x1(x)
x_conv = self.PReLu1(x_conv)
x_conv = self.norm1(x_conv)
# B x C_o x T_o
x_conv = self.dwconv(x_conv)
x_conv = self.PReLu2(x_conv)
x_conv = self.norm2(x_conv)
# B x C_o x T_o -> B x C x T
if self.causal:
x_conv = x_conv[:, :, :-self.pad]
x_conv = self.end_conv1x1(x_conv)
return x + x_conv
class Conv1D_emb(nn.Module):
'''
Build the Conv1D structure with embedding
causal: if True is causal setting
'''
def __init__(self, in_channels=256, emb_channels=128, out_channels=512,
kernel_size=3, dilation=1, norm='gln', causal=False, fusion='concat', usingEmb=True, usingTsd=False):
super(Conv1D_emb, self).__init__()
self.causal = causal
self.usingTsd = usingTsd
self.usingEmb = usingEmb
self.fusion = fusion # concat, add, multiply
if usingEmb:
if fusion == 'concat':
if not usingTsd:
self.conv1x1 = nn.Conv1d(in_channels + emb_channels, out_channels, kernel_size=1)
else:
self.conv1x1 = nn.Conv1d(in_channels + emb_channels + 1, out_channels, kernel_size=1)
elif fusion == 'add':
self.preCNN = nn.Conv1d(emb_channels, in_channels, kernel_size=1)
if not usingTsd:
self.conv1x1 = nn.Conv1d(in_channels, out_channels, kernel_size=1)
else:
self.conv1x1 = nn.Conv1d(in_channels + 1, out_channels, kernel_size=1)
elif fusion == 'multiply':
self.preCNN = nn.Conv1d(emb_channels, in_channels, kernel_size=1)
if not usingTsd:
self.conv1x1 = nn.Conv1d(in_channels, out_channels, kernel_size=1)
else:
self.conv1x1 = nn.Conv1d(in_channels + 1, out_channels, kernel_size=1)
else:
self.conv1x1 = nn.Conv1d(in_channels, out_channels, kernel_size=1)
self.PReLu1 = nn.PReLU()
self.norm1 = select_norm(norm, out_channels)
self.pad = (dilation * (kernel_size - 1)
) // 2 if not causal else dilation * (kernel_size - 1)
self.dwconv = nn.Conv1d(out_channels, out_channels, kernel_size=kernel_size,
groups=out_channels, padding=self.pad, dilation=dilation)
self.PReLu2 = nn.PReLU()
self.norm2 = select_norm(norm, out_channels)
self.end_conv1x1 = nn.Conv1d(out_channels, in_channels, 1)
def forward(self, x, emb=None, tsd=None):
"""
Input:
x: [B x C x T], B is batch size, T is times
emb: [B x C']
tsd: [B x 1 x T]
Returns:
x: [B, C, T]
"""
T = x.shape[-1]
emb = torch.unsqueeze(emb, -1)
# B x C' X T
emb = emb.repeat(1, 1, T)
# B x (C + C') X T
if self.usingEmb:
if self.fusion == 'concat':
if not self.usingTsd:
x_ = torch.cat([x, emb], 1)
else:
x_ = torch.cat([x, emb, tsd], 1)
elif self.fusion == 'add':
x_ = self.PReLu1(self.preCNN(emb)) + x
if not self.usingTsd:
x_ = x_
else:
x_ = torch.cat([x_, tsd], 1)
elif self.fusion == 'multiply':
x_ = self.PReLu1(self.preCNN(emb)) * x
if not self.usingTsd:
x_ = x_
else:
x_ = torch.cat([x_, tsd], 1)
else:
x_ = x
# B x (C + C') X T -> B x C_o x T_o
x_conv = self.conv1x1(x_)
x_conv = self.PReLu1(x_conv)
x_conv = self.norm1(x_conv)
# B x C_o x T_o
x_conv = self.dwconv(x_conv)
x_conv = self.PReLu2(x_conv)
x_conv = self.norm2(x_conv)
# B x C_o x T_o -> B x C x T
if self.causal:
x_conv = x_conv[:, :, :-self.pad]
x_conv = self.end_conv1x1(x_conv)
return x + x_conv
class ExtractionNet(nn.Module):
'''
TasNet Separation part
LayerNorm -> 1x1Conv -> 1-D Conv .... -> output
'''
def __init__(self, conv1d_block=8, in_channels=64, out_channels=128, emb_channels=128, final_channels=257,
out_sp_channels=512, kernel_size=3, norm='gln', causal=False, num_spks=1, fusion='concat', usingEmb=[True,True,True], usingTsd=[False,False,False]):
super(ExtractionNet, self).__init__()
self.conv1x1 = nn.Conv1d(in_channels, out_channels, 1)
self.conv_block_1_front = Conv1D_emb(in_channels=out_channels, emb_channels=emb_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, dilation=1, norm=norm, causal=causal, fusion=fusion, usingEmb=usingEmb[0], usingTsd=usingTsd[0])
self.conv_block_1_back = self._Sequential_block(conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, norm=norm, causal=causal)
self.conv_block_2_front = Conv1D_emb(in_channels=out_channels, emb_channels=emb_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, dilation=1, norm=norm, causal=causal, fusion=fusion, usingEmb=usingEmb[1], usingTsd=usingTsd[1])
self.conv_block_2_back = self._Sequential_block(conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, norm=norm, causal=causal)
self.conv_block_3_front = Conv1D_emb(in_channels=out_channels, emb_channels=emb_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, dilation=1, norm=norm, causal=causal, fusion=fusion, usingEmb=usingEmb[2], usingTsd=usingTsd[2])
self.conv_block_3_back = self._Sequential_block(conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, norm=norm, causal=causal)
self.PReLu = nn.PReLU()
self.norm = select_norm('cln', in_channels)
self.end_conv1x1 = nn.Conv1d(out_channels, num_spks * final_channels, 1)
self.activation = nn.Sigmoid()
self.num_spks = num_spks
def _Sequential_block(self, num_blocks, **block_kwargs):
'''
Sequential 1-D Conv Block
input:
num_block: how many blocks in every repeats
**block_kwargs: parameters of Conv1D_Block
'''
Conv1D_lists = [Conv1D(
**block_kwargs, dilation=(2 ** i)) for i in range(num_blocks)]
return nn.Sequential(*Conv1D_lists)
def _Sequential(self, num_repeats, num_blocks, **block_kwargs):
'''
Sequential repeats
input:
num_repeats: Number of repeats
num_blocks: Number of block in every repeats
**block_kwargs: parameters of Conv1D_Block
'''
repeats_lists = [self._Sequential_block(
num_blocks, **block_kwargs) for i in range(num_repeats)]
return nn.Sequential(*repeats_lists)
def forward(self, x, emb=None, tsd=None):
"""
Input:
x: [B x C x T], B is batch size, T is times
emb: [B x C x T], B is batch size, T is times
Returns:
x: [num_spks, B, N, T]
"""
# B x C x T
x = self.norm(x)
x = self.conv1x1(x)
x = self.PReLu(x)
# B x C x T
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block_1_front(x, emb, tsd)
x = self.conv_block_1_back(x)
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block_2_front(x, emb, tsd)
x = self.conv_block_2_back(x)
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block_3_front(x, emb, tsd)
x = self.conv_block_3_back(x)
x = F.dropout(x, p=0.2, training=self.training)
# B x N x T
x = self.PReLu(x)
x = self.end_conv1x1(x)
x = self.activation(x)
return x
class TSENet(nn.Module):
'''
TSENet module
N Number of filters in autoencoder
B Number of channels in bottleneck and the residual paths’ 1 × 1-conv blocks
H Number of channels in convolutional blocks
P Kernel size in convolutional blocks
X Number of convolutional blocks in each repeat
R Number of repeats
'''
def __init__(self,
N=512,
B=128,
H=512,
P=3,
X=8,
R=3,
norm="gln",
num_spks=1,
causal=False,
cls_num=50,
nFrameLen=512,
nFrameShift=256,
nFFT=512,
fusion='concat',
usingEmb=[True,True,True],
usingTsd=[False,False,False],
CNN10_settings=[16000,1024,320,64,50,8000,527,512,128],
fixCNN10=False,
fixTSDNet=True,
pretrainedCNN10=None,
pretrainedTSDNet=None,
threshold=0.5,
):
super(TSENet, self).__init__()
self.device = torch.device('cuda')
self.stft = STFT(frame_len=nFrameLen, frame_hop=nFrameShift, num_fft=nFFT)
self.istft = iSTFT(frame_len=nFrameLen, frame_hop=nFrameShift, num_fft=nFFT)
self.front_CNN = nn.Conv1d(nFrameShift+1, N, 1)
self.PReLu = nn.PReLU()
self.extractor = ExtractionNet(conv1d_block=X, in_channels=N,
out_channels=B, final_channels=nFrameShift + 1, out_sp_channels=H, kernel_size=P,
norm=norm, causal=causal, num_spks=num_spks, fusion=fusion,
usingEmb=usingEmb, usingTsd=usingTsd)
self.num_spks = num_spks
self.conditioner = CNN10(sample_rate=CNN10_settings[0], window_size=CNN10_settings[1],
hop_size=CNN10_settings[2], mel_bins=CNN10_settings[3], fmin=CNN10_settings[4], fmax=CNN10_settings[5],
classes_num=CNN10_settings[6])
self.cls1 = nn.Linear(CNN10_settings[7], CNN10_settings[8])
self.cls2 = nn.Linear(CNN10_settings[8], cls_num)
self.fixCNN10 = fixCNN10
self.fixTSDNet = fixTSDNet
self.pretrainedCNN10 = pretrainedCNN10
self.pretrainedTSDNet = pretrainedTSDNet
self.usingEmb = usingEmb
self.usingTsd = usingTsd
self.threshold = threshold
self.init_conditioner()
self.emb_fc = nn.Linear(CNN10_settings[7], CNN10_settings[8])
self.onehot = nn.Embedding(cls_num, CNN10_settings[8])
if usingTsd[0] or usingTsd[1] or usingTsd[2]:
self.tsdnet = TSDNet(nFrameLen=nFrameLen, nFrameShift=nFrameShift, cls_num=cls_num, CNN10_settings=CNN10_settings)
self.init_TSDNet()
self.epsilon = 1e-20
def init_conditioner(self):
if self.pretrainedCNN10:
device = torch.device('cuda')
checkpoint = torch.load(self.pretrainedCNN10, map_location=device)
self.conditioner.load_state_dict(checkpoint['model'])
if self.fixCNN10:
for p in self.conditioner.parameters():
p.requires_grad = False
def init_TSDNet(self):
if self.pretrainedTSDNet:
device = torch.device('cuda')
dicts = torch.load(self.pretrainedTSDNet, map_location=device)
self.tsdnet.load_state_dict(dicts["model_state_dict"])
if self.fixTSDNet:
for p in self.tsdnet.parameters():
p.requires_grad = False
def forward(self, x, ref, cls_index, label=None, inf=False):
"""
Input:
x: [B, T], B is batch size, T is times
ref: [B, T], B is batch size, T is times
Returns:
audio: [B, T]
"""
# B x T -> B x C x T
x_magnitude, x_phase = self.stft(x)
x_encoder = torch.log(x_magnitude ** 2 + self.epsilon) # bs, 257, 249
if not inf:
label_magnitude, label_phase = self.stft(label)
if self.usingTsd[0] or self.usingTsd[1] or self.usingTsd[2]:
_, _, out_tsd_up = self.tsdnet(x, ref)
tsdMask = torch.zeros(x_magnitude.shape[0], x_magnitude.shape[2]).cuda()
tsdMask[out_tsd_up > self.threshold] = 1.
tsdMask = tsdMask[:, None, :]
else:
tsdMask = None
# B x T -> B x C -> B x C x T
out_enc = self.conditioner(ref)
emb = self.emb_fc(out_enc)
emb = self.PReLu(emb)
x_cls = self.PReLu(self.cls1(out_enc))
x_cls = F.dropout(x_cls, p=0.5, training=self.training)
x_cls = self.cls2(x_cls)
x_cls = F.log_softmax(x_cls, dim=-1)
emb_onehot = self.onehot(cls_index)
emb_onehot = F.dropout(emb_onehot, p=0.2, training=self.training)
x_encoder = self.PReLu(self.front_CNN(x_encoder))
# mask = self.extractor(x_encoder, emb, tsdMask)
mask = self.extractor(x_encoder, emb_onehot, tsdMask)
x_ex = x_magnitude * mask
gt = label_magnitude / (x_magnitude + self.epsilon) * torch.cos(label_phase - x_phase) # PSM
gt = torch.clamp(gt, min=0., max=1.) # Truncated to [0,1]
audio_encoder = self.istft(x_ex, x_phase)
audio = [audio_encoder[:, 0]]
return audio, mask, gt, x_cls, emb, emb_onehot
class TSDNet(nn.Module):
'''
TSDNet module
'''
def __init__(self, nFrameLen=512, nFrameShift=256, cls_num=41, CNN10_settings=[16000,1024,320,64,50,8000,527,512,128], pretrainedCNN10=None):
super(TSDNet, self).__init__()
self.PReLu = nn.PReLU()
self.conditioner = CNN10(sample_rate=16000, window_size=1024,
hop_size=320, mel_bins=64, fmin=50, fmax=8000,
classes_num=527)
self.cls1 = nn.Linear(128, 128)
self.cls2 = nn.Linear(128, cls_num)
self.pretrainedCNN10 = pretrainedCNN10
self.init_ref()
self.emb_fc = nn.Linear(512, 128)
# print(CNN10_settings)
self.tsd = TSD2(sample_rate=CNN10_settings[0], window_size=nFrameLen,
hop_size=nFrameShift, mel_bins=CNN10_settings[3], fmin=CNN10_settings[4], fmax=CNN10_settings[5])
# print('self.tsd ',self.tsd)
self.init_fc_layer(self.cls1) # new add
self.init_fc_layer(self.cls2) # new add
self.init_fc_layer(self.emb_fc) # new add
# assert 1==2
def init_ref(self):
if self.pretrainedCNN10:
device = torch.device('cuda')
checkpoint = torch.load(self.pretrainedCNN10, map_location=device)
self.conditioner.load_state_dict(checkpoint['model'])
def init_rnn_layer(self, layer):
for name, param in layer.named_parameters():
if name.startswith("weight"):
nn.init.kaiming_normal_(param)
else:
nn.init.zeros_(param)
def init_fc_layer(self, layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.kaiming_normal_(layer.weight)
nn.init.constant_(layer.bias, 0.)
def forward(self, x, ref):
"""
Input:
x: [B, T], B is batch size, T is times
Returns:
"""
out_enc = self.conditioner(ref)
emb = self.emb_fc(out_enc)
emb = self.PReLu(emb)
out_tsd_time, out_tsd_up, _ = self.tsd(x, emb)
x_cls = self.PReLu(self.cls1(emb))
x_cls = F.dropout(x_cls, p=0.5, training=self.training)
x_cls = self.cls2(x_cls)
x_cls = F.log_softmax(x_cls, dim=-1)
return x_cls, out_tsd_time, out_tsd_up
class TSDNet_tse(nn.Module):
'''
TSDNet module
'''
def __init__(self, nFrameLen=512, nFrameShift=256, cls_num=41, CNN10_settings=[16000,1024,320,64,50,8000,527,512,128], pretrainedCNN10=None, use_frame=False, only_ref=True):
super(TSDNet_tse, self).__init__()
self.PReLu = nn.PReLU()
self.conditioner = CNN10(sample_rate=16000, window_size=1024,
hop_size=320, mel_bins=64, fmin=50, fmax=8000,
classes_num=527)
self.cls1 = nn.Linear(128, 128)
self.cls2 = nn.Linear(128, cls_num)
self.pretrainedCNN10 = pretrainedCNN10
self.init_ref()
self.emb_fc = nn.Linear(512, 128)
self.only_ref = only_ref
# print(CNN10_settings)
self.tsd = TSD2_tse(sample_rate=CNN10_settings[0], window_size=nFrameLen,
hop_size=nFrameShift, mel_bins=CNN10_settings[3], fmin=CNN10_settings[4], fmax=CNN10_settings[5], use_frame=use_frame)
# print('self.tsd ',self.tsd)
# assert 1==2
self.init_fc_layer(self.cls1) # new add
self.init_fc_layer(self.cls2) # new add
self.init_fc_layer(self.emb_fc) # new add
def init_ref(self):
if self.pretrainedCNN10:
device = torch.device('cuda')
checkpoint = torch.load(self.pretrainedCNN10, map_location=device)
self.conditioner.load_state_dict(checkpoint['model'])
def init_rnn_layer(self, layer):
for name, param in layer.named_parameters():
if name.startswith("weight"):
nn.init.kaiming_normal_(param)
else:
nn.init.zeros_(param)
def init_fc_layer(self, layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.kaiming_normal_(layer.weight)
nn.init.constant_(layer.bias, 0.)
def forward(self, x, ref, tse_audio):
"""
Input:
x: [B, T], B is batch size, T is times
Returns:
"""
# clip level condition
# clip_ref = torch.cat([ref, tse_audio],dim=1)
if self.only_ref:
clip_ref = ref # 只用ref
else:
clip_ref = torch.cat([ref, tse_audio],dim=1)
clip_enc = self.conditioner(clip_ref)
#print('clip_enc ',clip_enc.shape)
clip_emb = self.emb_fc(clip_enc)
clip_emb = self.PReLu(clip_emb)
# frame level condition
frame_enc = self.conditioner.extract_frame(tse_audio)
frame_emb = self.emb_fc(frame_enc)
frame_emb = self.PReLu(frame_emb)
out_tsd_time, out_tsd_up = self.tsd(x, clip_emb, frame_emb)
x_cls = self.PReLu(self.cls1(clip_emb))
x_cls = F.dropout(x_cls, p=0.5, training=self.training)
x_cls = self.cls2(x_cls)
x_cls = F.log_softmax(x_cls, dim=-1)
return x_cls, out_tsd_time, out_tsd_up
class TSDNet_one_hot(nn.Module):
'''
TSDNet module
'''
def __init__(self, nFrameLen=512, nFrameShift=256, cls_num=41, CNN10_settings=[16000,1024,320,64,50,8000,527,512,128], pretrainedCNN10=None):
super(TSDNet_one_hot, self).__init__()
self.PReLu = nn.PReLU()
# self.conditioner = CNN10(sample_rate=16000, window_size=1024,
# hop_size=320, mel_bins=64, fmin=50, fmax=8000,
# classes_num=527)
# self.cls1 = nn.Linear(128, 128)
# self.cls2 = nn.Linear(128, cls_num)
# self.pretrainedCNN10 = pretrainedCNN10
# self.init_ref()
# self.emb_fc = nn.Linear(512, 128)
self.conditioner_one_hot = nn.Embedding(cls_num,128)
# print(CNN10_settings)
self.tsd = TSD(sample_rate=CNN10_settings[0], window_size=nFrameLen,
hop_size=nFrameShift, mel_bins=CNN10_settings[3], fmin=CNN10_settings[4], fmax=CNN10_settings[5])
# print('self.tsd ',self.tsd)
# assert 1==2
def init_ref(self):
if self.pretrainedCNN10:
device = torch.device('cuda')
checkpoint = torch.load(self.pretrainedCNN10, map_location=device)
self.conditioner.load_state_dict(checkpoint['model'])
def forward(self, x, ref, onehot=None):
"""
Input:
x: [B, T], B is batch size, T is times
Returns:
"""
# out_enc = self.conditioner(ref)
# emb = self.emb_fc(out_enc)
# emb = self.PReLu(emb)
emb_onehot = self.conditioner_one_hot(onehot)
# print('emb_onehot ',emb_onehot.shape)
# assert 1==2
out_tsd_time, out_tsd_up, sim_cos = self.tsd(x, emb_onehot)
# x_cls = self.PReLu(self.cls1(emb))
# x_cls = F.dropout(x_cls, p=0.5, training=self.training)
# x_cls = self.cls2(x_cls)
# x_cls = F.log_softmax(x_cls, dim=-1)
x_cls = torch.zeros(1).cuda()
return x_cls, out_tsd_time, out_tsd_up, sim_cos # st,ed
class TSDNet_plus_one_hot(nn.Module):
'''
TSDNet module
'''
def __init__(self, nFrameLen=512, nFrameShift=256, cls_num=41, CNN10_settings=[16000,1024,320,64,50,8000,527,512,128], pretrainedCNN10=None):
super(TSDNet_plus_one_hot, self).__init__()
self.PReLu = nn.PReLU()
# self.conditioner = CNN10(sample_rate=16000, window_size=1024,
# hop_size=320, mel_bins=64, fmin=50, fmax=8000,
# classes_num=527)
# self.cls1 = nn.Linear(128, 128)
# self.cls2 = nn.Linear(128, cls_num)
# self.pretrainedCNN10 = pretrainedCNN10
# self.init_ref()
# self.emb_fc = nn.Linear(512, 128)
self.conditioner_one_hot = nn.Embedding(cls_num,128)
# print(CNN10_settings)
self.tsd = TSD_plus_sim(sample_rate=CNN10_settings[0], window_size=nFrameLen,
hop_size=nFrameShift, mel_bins=CNN10_settings[3], fmin=CNN10_settings[4], fmax=CNN10_settings[5])
# print('self.tsd ',self.tsd)
# assert 1==2
def init_ref(self):
if self.pretrainedCNN10:
device = torch.device('cuda')
checkpoint = torch.load(self.pretrainedCNN10, map_location=device)
self.conditioner.load_state_dict(checkpoint['model'])
def forward(self, x, ref,onehot=None):
"""
Input:
x: [B, T], B is batch size, T is times
Returns:
"""
# out_enc = self.conditioner(ref)
# emb = self.emb_fc(out_enc)
# emb = self.PReLu(emb)
emb_onehot = self.conditioner_one_hot(onehot)
# print('emb_onehot ',emb_onehot.shape)
# assert 1==2
out_tsd_up, out_tsd_time,sim_cos = self.tsd(x, emb_onehot)
# x_cls = self.PReLu(self.cls1(emb))
# x_cls = F.dropout(x_cls, p=0.5, training=self.training)
# x_cls = self.cls2(x_cls)
# x_cls = F.log_softmax(x_cls, dim=-1)
x_cls = torch.zeros(1).cuda()
return x_cls, out_tsd_time, out_tsd_up, sim_cos
if __name__ == "__main__":
conv = Conv_TasNet().cuda()
# encoder = Encoder(16, 512)
x = torch.randn(4, 64000).cuda()
label = torch.randn(4, 64000).cuda()
ref = torch.randn(4, 64000).cuda()
audio, lps, lab = conv(x, ref, label)
print(audio[0].shape)
# print("{:.3f}".format(check_parameters(conv)))
| 31,331 | 38.067332 | 177 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/model/model_tf.py | import torch
from torch import nn
import torch.nn.functional as F
import sys
import pickle
sys.path.append('../')
from utils.util import check_parameters
from model.PANNS import ResNet38, CNN10
from model.tsd import TSD
def init_kernel(frame_len,
frame_hop,
num_fft=None,
window="sqrt_hann"):
if window != "sqrt_hann":
raise RuntimeError("Now only support sqrt hanning window in order "
"to make signal perfectly reconstructed")
if not num_fft:
# FFT points
fft_size = 2 ** math.ceil(math.log2(frame_len))
else:
fft_size = num_fft
# window [window_length]
window = torch.hann_window(frame_len) ** 0.5
S_ = 0.5 * (fft_size * fft_size / frame_hop) ** 0.5
# window_length, F, 2 (real+imag)
kernel = torch.rfft(torch.eye(fft_size) / S_, 1)[:frame_len]
# 2, F, window_length
kernel = torch.transpose(kernel, 0, 2) * window
# 2F, 1, window_length
kernel = torch.reshape(kernel, (fft_size + 2, 1, frame_len))
return kernel
class STFTBase(nn.Module):
"""
Base layer for (i)STFT
NOTE:
1) Recommend sqrt_hann window with 2**N frame length, because it
could achieve perfect reconstruction after overlap-add
2) Now haven't consider padding problems yet
"""
def __init__(self,
frame_len,
frame_hop,
window="sqrt_hann",
num_fft=None):
super(STFTBase, self).__init__()
K = init_kernel(
frame_len,
frame_hop,
num_fft=num_fft,
window=window)
self.K = nn.Parameter(K, requires_grad=False)
self.stride = frame_hop
self.window = window
def freeze(self):
self.K.requires_grad = False
def unfreeze(self):
self.K.requires_grad = True
def check_nan(self):
num_nan = torch.sum(torch.isnan(self.K))
if num_nan:
raise RuntimeError(
"detect nan in STFT kernels: {:d}".format(num_nan))
def extra_repr(self):
return "window={0}, stride={1}, requires_grad={2}, kernel_size={3[0]}x{3[2]}".format(
self.window, self.stride, self.K.requires_grad, self.K.shape)
class STFT(STFTBase):
"""
Short-time Fourier Transform as a Layer
"""
def __init__(self, *args, **kwargs):
super(STFT, self).__init__(*args, **kwargs)
def forward(self, x):
"""
Accept raw waveform and output magnitude and phase
x: input signal, N x 1 x S or N x S
m: magnitude, N x F x T
p: phase, N x F x T
"""
if x.dim() not in [2, 3]:
raise RuntimeError("Expect 2D/3D tensor, but got {:d}D".format(
x.dim()))
self.check_nan()
# if N x S, reshape N x 1 x S
if x.dim() == 2:
x = torch.unsqueeze(x, 1)
# N x 2F x T
c = F.conv1d(x, self.K, stride=self.stride, padding=0)
# N x F x T
r, i = torch.chunk(c, 2, dim=1)
m = (r ** 2 + i ** 2) ** 0.5
p = torch.atan2(i, r)
return m, p
class iSTFT(STFTBase):
"""
Inverse Short-time Fourier Transform as a Layer
"""
def __init__(self, *args, **kwargs):
super(iSTFT, self).__init__(*args, **kwargs)
def forward(self, m, p, squeeze=False):
"""
Accept phase & magnitude and output raw waveform
m, p: N x F x T
s: N x C x S
"""
if p.dim() != m.dim() or p.dim() not in [2, 3]:
raise RuntimeError("Expect 2D/3D tensor, but got {:d}D".format(
p.dim()))
self.check_nan()
# if F x T, reshape 1 x F x T
if p.dim() == 2:
p = torch.unsqueeze(p, 0)
m = torch.unsqueeze(m, 0)
r = m * torch.cos(p)
i = m * torch.sin(p)
# N x 2F x T
c = torch.cat([r, i], dim=1)
# N x 2F x T
s = F.conv_transpose1d(c, self.K, stride=self.stride, padding=0)
if squeeze:
s = torch.squeeze(s)
return s
class GlobalLayerNorm(nn.Module):
'''
Calculate Global Layer Normalization
dim: (int or list or torch.Size) –
input shape from an expected input of size
eps: a value added to the denominator for numerical stability.
elementwise_affine: a boolean value that when set to True,
this module has learnable per-element affine parameters
initialized to ones (for weights) and zeros (for biases).
'''
def __init__(self, dim, eps=1e-05, elementwise_affine=True):
super(GlobalLayerNorm, self).__init__()
self.dim = dim
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = nn.Parameter(torch.ones(self.dim, 1))
self.bias = nn.Parameter(torch.zeros(self.dim, 1))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
def forward(self, x):
# x = N x C x L
# N x 1 x 1
# cln: mean,var N x 1 x L
# gln: mean,var N x 1 x 1
if x.dim() != 3:
raise RuntimeError("{} accept 3D tensor as input".format(
self.__name__))
mean = torch.mean(x, (1, 2), keepdim=True)
var = torch.mean((x - mean) ** 2, (1, 2), keepdim=True)
# N x C x L
if self.elementwise_affine:
x = self.weight * (x - mean) / torch.sqrt(var + self.eps) + self.bias
else:
x = (x - mean) / torch.sqrt(var + self.eps)
return x
class CumulativeLayerNorm(nn.LayerNorm):
'''
Calculate Cumulative Layer Normalization
dim: you want to norm dim
elementwise_affine: learnable per-element affine parameters
'''
def __init__(self, dim, elementwise_affine=True):
super(CumulativeLayerNorm, self).__init__(
dim, elementwise_affine=elementwise_affine)
def forward(self, x):
# x: N x C x L
# N x L x C
x = torch.transpose(x, 1, 2)
# N x L x C == only channel norm
x = super().forward(x)
# N x C x L
x = torch.transpose(x, 1, 2)
return x
def select_norm(norm, dim):
if norm == 'gln':
return GlobalLayerNorm(dim, elementwise_affine=True)
if norm == 'cln':
return CumulativeLayerNorm(dim, elementwise_affine=True)
if norm == 'ln':
return nn.GroupNorm(1, dim)
else:
return nn.BatchNorm1d(dim)
class Conv1D(nn.Module):
'''
Build the Conv1D structure
causal: if True is causal setting
'''
def __init__(self, in_channels=256, out_channels=512,
kernel_size=3, dilation=1, norm='gln', causal=False):
super(Conv1D, self).__init__()
self.causal = causal
self.conv1x1 = nn.Conv1d(in_channels, out_channels, kernel_size=1)
self.PReLu1 = nn.PReLU()
self.norm1 = select_norm(norm, out_channels)
self.pad = (dilation * (kernel_size - 1)
) // 2 if not causal else dilation * (kernel_size - 1)
self.dwconv = nn.Conv1d(out_channels, out_channels, kernel_size=kernel_size,
groups=out_channels, padding=self.pad, dilation=dilation)
self.PReLu2 = nn.PReLU()
self.norm2 = select_norm(norm, out_channels)
self.end_conv1x1 = nn.Conv1d(out_channels, in_channels, 1)
def forward(self, x):
"""
Input:
x: [B x C x T], B is batch size, T is times
Returns:
x: [B, C, T]
"""
# B x C x T -> B x C_o x T_o
x_conv = self.conv1x1(x)
x_conv = self.PReLu1(x_conv)
x_conv = self.norm1(x_conv)
# B x C_o x T_o
x_conv = self.dwconv(x_conv)
x_conv = self.PReLu2(x_conv)
x_conv = self.norm2(x_conv)
# B x C_o x T_o -> B x C x T
if self.causal:
x_conv = x_conv[:, :, :-self.pad]
x_conv = self.end_conv1x1(x_conv)
return x + x_conv
class Conv1D_emb(nn.Module):
'''
Build the Conv1D structure with embedding
causal: if True is causal setting
'''
def __init__(self, in_channels=256, emb_channels=128, out_channels=512,
kernel_size=3, dilation=1, norm='gln', causal=False):
super(Conv1D_emb, self).__init__()
self.causal = causal
self.conv1x1 = nn.Conv1d(in_channels+emb_channels, out_channels, kernel_size=1)
self.PReLu1 = nn.PReLU()
self.norm1 = select_norm(norm, out_channels)
self.pad = (dilation * (kernel_size - 1)
) // 2 if not causal else dilation * (kernel_size - 1)
self.dwconv = nn.Conv1d(out_channels, out_channels, kernel_size=kernel_size,
groups=out_channels, padding=self.pad, dilation=dilation)
self.PReLu2 = nn.PReLU()
self.norm2 = select_norm(norm, out_channels)
self.end_conv1x1 = nn.Conv1d(out_channels, in_channels, 1)
def forward(self, x, emb):
"""
Input:
x: [B x C x T], B is batch size, T is times
emb: [B x C']
Returns:
x: [B, C, T]
"""
T = x.shape[-1]
emb = torch.unsqueeze(emb, -1)
# B x C' X T
emb = emb.repeat(1, 1, T)
# B x (C + C') X T
x_ = torch.cat([x, emb], 1)
# B x (C + C') X T -> B x C_o x T_o
x_conv = self.conv1x1(x_)
x_conv = self.PReLu1(x_conv)
x_conv = self.norm1(x_conv)
# B x C_o x T_o
x_conv = self.dwconv(x_conv)
x_conv = self.PReLu2(x_conv)
x_conv = self.norm2(x_conv)
# B x C_o x T_o -> B x C x T
if self.causal:
x_conv = x_conv[:, :, :-self.pad]
x_conv = self.end_conv1x1(x_conv)
return x + x_conv
class Encoder(nn.Module):
'''
Conv-Tasnet Encoder part
kernel_size: the length of filters
out_channels: the number of filters
'''
def __init__(self, kernel_size=2, out_channels=64):
super(Encoder, self).__init__()
self.conv1d = nn.Conv1d(in_channels=1, out_channels=out_channels,
kernel_size=kernel_size, stride=kernel_size // 2, groups=1)
def forward(self, x):
"""
Input:
x: [B, T], B is batch size, T is times
Returns:
x: [B, C, T_out]
T_out is the number of time steps
"""
# B x T -> B x 1 x T
x = torch.unsqueeze(x, dim=1)
# B x 1 x T -> B x C x T_out
x = self.conv1d(x)
x = F.relu(x)
return x
class Decoder(nn.ConvTranspose1d):
'''
Decoder of the TasNet
This module can be seen as the gradient of Conv1d with respect to its input.
It is also known as a fractionally-strided convolution
or a deconvolution (although it is not an actual deconvolution operation).
'''
def __init__(self, *args, **kwargs):
super(Decoder, self).__init__(*args, **kwargs)
def forward(self, x):
"""
x: N x L or N x C x L
"""
if x.dim() not in [2, 3]:
raise RuntimeError("{} accept 2/3D tensor as input".format(
self.__name__))
x = super().forward(x if x.dim() == 3 else torch.unsqueeze(x, 1))
if torch.squeeze(x).dim() == 1:
x = torch.squeeze(x, dim=1)
else:
x = torch.squeeze(x)
return x
class Separation_TasNet(nn.Module):
'''
TasNet Separation part
LayerNorm -> 1x1Conv -> 1-D Conv .... -> output
'''
def __init__(self, repeats=3, conv1d_block=8, in_channels=64, out_channels=128, emb_channels=128, final_channels=257,
out_sp_channels=512, kernel_size=3, norm='gln', causal=False, num_spks=2):
super(Separation_TasNet, self).__init__()
self.conv1x1 = nn.Conv1d(in_channels, out_channels, 1)
# self.conv1d_list = self._Sequential(
# repeats, conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
# kernel_size=kernel_size, norm=norm, causal=causal)
self.conv_block_1_front = Conv1D_emb(in_channels=out_channels, emb_channels=emb_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, dilation=1, norm=norm, causal=causal)
self.conv_block_1_back = self._Sequential_block(conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, norm=norm, causal=causal)
self.conv_block_2_front = Conv1D_emb(in_channels=out_channels, emb_channels=emb_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, dilation=1, norm=norm, causal=causal)
self.conv_block_2_back = self._Sequential_block(conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, norm=norm, causal=causal)
self.conv_block_3_front = Conv1D_emb(in_channels=out_channels, emb_channels=emb_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, dilation=1, norm=norm, causal=causal)
self.conv_block_3_back = self._Sequential_block(conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, norm=norm, causal=causal)
# self.conv_block_4_front = Conv1D_emb(in_channels=out_channels, emb_channels=emb_channels, out_channels=out_sp_channels,
# kernel_size=kernel_size, dilation=1, norm=norm, causal=causal)
# self.conv_block_4_back = self._Sequential_block(conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
# kernel_size=kernel_size, norm=norm, causal=causal)
self.PReLu = nn.PReLU()
self.norm = select_norm('cln', in_channels)
self.end_conv1x1 = nn.Conv1d(out_channels, num_spks * final_channels, 1)
# self.activation = nn.Sigmoid()
self.num_spks = num_spks
def _Sequential_block(self, num_blocks, **block_kwargs):
'''
Sequential 1-D Conv Block
input:
num_block: how many blocks in every repeats
**block_kwargs: parameters of Conv1D_Block
'''
Conv1D_lists = [Conv1D(
**block_kwargs, dilation=(2 ** i)) for i in range(num_blocks)]
return nn.Sequential(*Conv1D_lists)
def _Sequential(self, num_repeats, num_blocks, **block_kwargs):
'''
Sequential repeats
input:
num_repeats: Number of repeats
num_blocks: Number of block in every repeats
**block_kwargs: parameters of Conv1D_Block
'''
repeats_lists = [self._Sequential_block(
num_blocks, **block_kwargs) for i in range(num_repeats)]
return nn.Sequential(*repeats_lists)
def forward(self, x, emb):
"""
Input:
x: [B x C x T], B is batch size, T is times
emb: [B x C x T], B is batch size, T is times
Returns:
x: [num_spks, B, N, T]
"""
# B x C x T
x = self.norm(x)
x = self.conv1x1(x)
# B x C x T
x = self.conv_block_1_front(x, emb)
x = self.conv_block_1_back(x)
x = self.conv_block_2_front(x, emb)
x = self.conv_block_2_back(x)
x = self.conv_block_3_front(x, emb)
x = self.conv_block_3_back(x)
# x = self.conv_block_4_front(x, emb)
# x = self.conv_block_4_back(x)
# B x N x T
x = self.PReLu(x)
x = self.end_conv1x1(x)
# x = self.activation(x)
return x
class TSDNet(nn.Module):
'''
TSDNet module
'''
def __init__(self, nFrameLen=512, nFrameShift=256, cls_num=50):
super(TSDNet, self).__init__()
self.PReLu = nn.PReLU()
self.encoder_ref = CNN10(sample_rate=16000, window_size=1024,
hop_size=320, mel_bins=64, fmin=50, fmax=8000,
classes_num=527)
self.cls1 = nn.Linear(128, 128)
self.cls2 = nn.Linear(128, cls_num)
self.init_ref()
self.emb_fc = nn.Linear(512, 128)
self.tsd = TSD(sample_rate=16000, window_size=nFrameLen,
hop_size=nFrameShift, mel_bins=64, fmin=50, fmax=8000)
def init_ref(self):
device = torch.device('cuda')
checkpoint_path = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/model/Cnn10_mAP=0.380.pth'
checkpoint = torch.load(checkpoint_path, map_location=device)
self.encoder_ref.load_state_dict(checkpoint['model'])
def forward(self, x, ref):
"""
Input:
x: [B, T], B is batch size, T is times
Returns:
"""
out_enc = self.encoder_ref(ref)
emb = self.emb_fc(out_enc)
emb = self.PReLu(emb)
out_tsd_up, out_tsd_time = self.tsd(x, emb)
x_cls = self.PReLu(self.cls1(emb))
x_cls = F.dropout(x_cls, p=0.5, training=self.training)
x_cls = self.cls2(x_cls)
x_cls = F.log_softmax(x_cls, dim=-1)
return x_cls, out_tsd_time
if __name__ == "__main__":
conv = TSDNet().cuda()
# encoder = Encoder(16, 512)
x = torch.randn(4, 64000).cuda()
label = torch.randn(4, 64000).cuda()
ref = torch.randn(4, 64000).cuda()
audio, lps, lab = conv(x, ref, label)
print(audio[0].shape)
# print("{:.3f}".format(check_parameters(conv)))
| 17,741 | 34.342629 | 129 | py |
Tim-TSENet | Tim-TSENet-main/TSDNET/model/tsd.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from torchlibrosa.augmentation import SpecAugmentation
def init_layer(layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.)
def init_bn(bn):
"""Initialize a Batchnorm layer. """
bn.bias.data.fill_(0.)
bn.weight.data.fill_(1.)
def init_weights(m):
if isinstance(m, (nn.Conv2d, nn.Conv1d)):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def frame_shift(features):
batch_size, _, _, _ = features.shape
shifted_feature = []
for idx in range(batch_size):
shift = int(random.gauss(0, 10))
shifted_feature.append(torch.roll(features[idx], shift, dims=2))
return torch.stack(shifted_feature)
class TimeShift(nn.Module):
def __init__(self, mean, std):
super().__init__()
self.mean = mean
self.std = std
def forward(self, x):
if self.training:
shift = torch.empty(1).normal_(self.mean, self.std).int().item()
x = torch.roll(x, shift, dims=2)
return x
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.conv2 = nn.Conv2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.init_weight()
def init_weight(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
x = F.relu_(self.bn2(self.conv2(x)))
if pool_type == 'max':
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg':
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg+max':
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
else:
raise Exception('Incorrect argument!')
return x
class Block2D(nn.Module):
def __init__(self, cin, cout, kernel_size=3, padding=1):
super().__init__()
self.block = nn.Sequential(
nn.BatchNorm2d(cin),
nn.Conv2d(cin,
cout,
kernel_size=kernel_size,
padding=padding,
bias=False),
nn.LeakyReLU(inplace=True, negative_slope=0.1))
def forward(self, x):
return self.block(x)
class Cnn10(nn.Module):
def __init__(self,scale=2):
super(Cnn10, self).__init__()
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.scale = scale
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
"""
Input: (batch_size, data_length)"""
if self.scale == 8:
pool_size1 = (2,2)
pool_size2 = (2,2)
pool_size3 = (2,4)
pool_size4 = (1,4)
elif self.scale == 4:
pool_size1 = (2,2)
pool_size2 = (2,2)
pool_size3 = (1,4)
pool_size4 = (1,4)
elif self.scale == 2:
pool_size1 = (2,2)
pool_size2 = (1,2)
pool_size3 = (1,4)
pool_size4 = (1,4)
else:
pool_size1 = (1,2)
pool_size2 = (1,2)
pool_size3 = (1,4)
pool_size4 = (1,4)
x = self.conv_block1(input, pool_size=pool_size1, pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=pool_size2, pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=pool_size3, pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block4(x, pool_size=pool_size4, pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
return x
class conv1d(nn.Module):
def __init__(self, nin, nout, kernel_size=3, stride=1, padding='VALID', dilation=1):
super(conv1d, self).__init__()
if padding == 'VALID':
dconv_pad = 0
elif padding == 'SAME':
dconv_pad = dilation * ((kernel_size - 1) // 2)
else:
raise ValueError("Padding Mode Error!")
self.conv = nn.Conv1d(nin, nout, kernel_size=kernel_size, stride=stride, padding=dconv_pad)
self.act = nn.ReLU()
self.init_layer(self.conv)
def init_layer(self, layer): # relu
"""Initialize a Linear or Convolutional layer. """
nn.init.kaiming_normal_(layer.weight)
nn.init.constant_(layer.bias, 0.1)
def forward(self, x):
out = self.act(self.conv(x))
return out
class Fusion(nn.Module):
def __init__(self, inputdim_1, inputdim_2, n_fac):
super().__init__()
self.fuse_layer1 = conv1d(inputdim_1, inputdim_1*n_fac,1) # 128*4
self.fuse_layer2 = conv1d(inputdim_2, inputdim_1*n_fac,1) # 128*4
# self.fuse_layer1.apply(init_weights) # 2022/2/12 new add to solve the problem of initiaze
# self.fuse_layer2.apply(init_weights) # 2022/2/12 new add to solve the problem of initiaze
self.avg_pool = nn.AvgPool1d(n_fac, stride=n_fac) # 沿着最后一个维度进行pooling
def forward(self,embedding, mix_embed):
embedding = embedding.permute(0,2,1)
fuse1_out = self.fuse_layer1(embedding) # [2, 501, 2560] ,512*5, 1D卷积融合,spk_embeding ,扩大其维度
fuse1_out = fuse1_out.permute(0,2,1)
mix_embed = mix_embed.permute(0,2,1)
fuse2_out = self.fuse_layer2(mix_embed) # [2, 501, 2560] ,512*5, 1D卷积融合,spk_embeding ,扩大其维度
fuse2_out = fuse2_out.permute(0,2,1)
as_embs = torch.mul(fuse1_out, fuse2_out) # 相乘 [2, 501, 2560]
# (10, 501, 512)
as_embs = self.avg_pool(as_embs) # [2, 501, 512] 相当于 2560//5
return as_embs
class TSD(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin, fmax):
super(TSD, self).__init__()
window = 'hann'
center = False
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db,
freeze_parameters=True)
self.bn0 = nn.BatchNorm2d(mel_bins)
self.features = nn.Sequential(
Block2D(1, 32),
nn.LPPool2d(4, (2, 4)),
Block2D(32, 128),
Block2D(128, 128),
nn.LPPool2d(4, (1, 4)),
Block2D(128, 128),
Block2D(128, 128),
nn.LPPool2d(4, (1, 4)),
nn.Dropout(0.3),
)
self.gru = nn.GRU(128, 128, bidirectional=True, batch_first=True) # 先用一个gru试试
self.fc = nn.Linear(256, 256)
self.fusion = Fusion(128,4)
self.outputlayer = nn.Linear(256, 2)
self.cos = nn.CosineSimilarity(dim=2, eps=1e-6)
self.features.apply(init_weights)
self.outputlayer.apply(init_weights)
self.bn0.apply(init_bn)
def forward(self, input, emb):
"""
Input: (batch_size, data_length)"""
# print('input ',input.shape)
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins) # torch.Size([32, 1, 624, 128])
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
# print('x ',x.shape)
batch, ch, time, dim = x.shape # (b,1,t,d)
x = self.features(x)
x = x.transpose(1, 2).contiguous().flatten(-2) # (b,156,128)
# print('x ',x.shape)
# assert 1==2
emb = emb.unsqueeze(1)
emb = emb.repeat(1, x.shape[1], 1)
sim_cos = self.cos(x,emb)
# print('x ',x.shape)
# print('emb ',emb.shape)
# print('sim_cos ',sim_cos)
# assert 1==2
x = self.fusion(emb,x) # 512
#x = torch.cat((x, emb), dim=2) # [B, T, 128 + emb_dim]
if not hasattr(self, '_flattened'):
self.gru.flatten_parameters()
x, _ = self.gru(x) # torch.Size([16, 161, 256])
x = self.fc(x)
decision_time = torch.softmax(self.outputlayer(x),dim=2) # x torch.Size([16, 156, 2])
decision_up = torch.nn.functional.interpolate(
decision_time.transpose(1, 2), # [16, 2, 156]
time, # 501
mode='linear',
align_corners=False).transpose(1, 2) # (16, 624, 2)
return decision_time[:,:,0], decision_up[:,:,0], sim_cos
class TSD2(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin, fmax):
super(TSD2, self).__init__()
window = 'hann'
center = False
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db,
freeze_parameters=True)
self.bn0 = nn.BatchNorm2d(mel_bins)
self.features = nn.Sequential(
Block2D(1, 32),
nn.LPPool2d(4, (2, 4)),
Block2D(32, 128),
Block2D(128, 128),
nn.LPPool2d(4, (1, 4)),
Block2D(128, 128),
Block2D(128, 128),
nn.LPPool2d(4, (1, 4)),
nn.Dropout(0.3),
)
self.gru = nn.GRU(128, 128, 2, bidirectional=True, batch_first=True) # 先用一个gru试试
self.fc = nn.Linear(256, 256)
self.fusion = Fusion(128,128,4)
self.outputlayer = nn.Linear(256, 2)
self.cos = nn.CosineSimilarity(dim=2, eps=1e-6)
self.features.apply(init_weights)
self.fc.apply(init_weights) # 2022/2/12 new add to solve the problem of initiaze
self.init_rnn_layer(self.gru) # 2022/2/12 new add to solve the problem of initiaze
self.outputlayer.apply(init_weights)
self.bn0.apply(init_bn)
def init_rnn_layer(self, layer):
for name, param in layer.named_parameters():
if name.startswith("weight"):
nn.init.kaiming_normal_(param)
else:
nn.init.zeros_(param)
def init_fc_layer(self, layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.kaiming_normal_(layer.weight)
nn.init.constant_(layer.bias, 0.)
def forward(self, input, emb):
"""
Input: (batch_size, data_length)"""
# print('input ',input.shape)
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins) # torch.Size([32, 1, 624, 128])
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
# print('x ',x.shape)
batch, ch, time, dim = x.shape # (b,1,t,d)
x = self.features(x)
x = x.transpose(1, 2).contiguous().flatten(-2) # (b,156,128)
# print('x ',x.shape)
# assert 1==2
emb = emb.unsqueeze(1)
emb = emb.repeat(1, x.shape[1], 1)
sim_cos = self.cos(x,emb)
# print('x ',x.shape)
# print('emb ',emb.shape)
# print('sim_cos ',sim_cos)
# assert 1==2
x = self.fusion(emb,x) # 512
#x = torch.cat((x, emb), dim=2) # [B, T, 128 + emb_dim]
if not hasattr(self, '_flattened'):
self.gru.flatten_parameters()
x, _ = self.gru(x) # torch.Size([16, 161, 256])
x = self.fc(x)
decision_time = torch.softmax(self.outputlayer(x),dim=2) # x torch.Size([16, 156, 2])
decision_up = torch.nn.functional.interpolate(
decision_time.transpose(1, 2), # [16, 2, 156]
time, # 501
mode='linear',
align_corners=False).transpose(1, 2) # (16, 624, 2)
return decision_time[:,:,0], decision_up[:,:,0], sim_cos
class TSD2_tse(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin, fmax, use_frame):
super(TSD2_tse, self).__init__()
window = 'hann'
center = False
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db,
freeze_parameters=True)
self.bn0 = nn.BatchNorm2d(mel_bins)
self.features = nn.Sequential(
Block2D(1, 32),
nn.LPPool2d(4, (2, 4)),
Block2D(32, 128),
Block2D(128, 128),
nn.LPPool2d(4, (1, 4)),
Block2D(128, 128),
Block2D(128, 128),
nn.LPPool2d(4, (1, 4)),
nn.Dropout(0.3),
)
self.use_frame = use_frame
if self.use_frame:
self.gru = nn.GRU(128, 128, 2, bidirectional=True, batch_first=True)
self.fusion = Fusion(128,256,4) # embed,mix
else:
self.gru = nn.GRU(128, 128, 2, bidirectional=True, batch_first=True)
self.fusion = Fusion(128,128,4)
self.fc = nn.Linear(256, 256)
self.outputlayer = nn.Linear(256, 2)
self.cos = nn.CosineSimilarity(dim=2, eps=1e-6)
self.fc.apply(init_weights) # 2022/2/12 new add to solve the problem of initiaze
self.init_rnn_layer(self.gru) # 2022/2/12 new add to solve the problem of initiaze
self.features.apply(init_weights)
self.outputlayer.apply(init_weights)
self.bn0.apply(init_bn)
def init_rnn_layer(self, layer):
for name, param in layer.named_parameters():
if name.startswith("weight"):
nn.init.kaiming_normal_(param)
else:
nn.init.zeros_(param)
def init_fc_layer(self, layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.kaiming_normal_(layer.weight)
nn.init.constant_(layer.bias, 0.)
def forward(self, input, emb, frame_emb):
"""
Input: (batch_size, data_length)"""
# print('input ',input.shape)
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins) # torch.Size([32, 1, 624, 128])
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
# print('x ',x.shape)
batch, ch, time, dim = x.shape # (b,1,t,d)
x = self.features(x)
x = x.transpose(1, 2).contiguous().flatten(-2) # (b,312,128)
frame_emb_up = torch.nn.functional.interpolate(
frame_emb.transpose(1, 2), # [b, 250, 128]
x.shape[1], # 501
mode='linear',
align_corners=False).transpose(1, 2)
# print('frame_emb_up ',frame_emb_up.shape)
# assert 1==2
# if we decide use frame level
if self.use_frame:
# sim_cos = self.cos(x,emb)
x = torch.cat((x, frame_emb_up), dim=2)
# else:
# sim_cos = self.cos(x,emb)
# assert 1==2
emb = emb.unsqueeze(1)
emb = emb.repeat(1, x.shape[1], 1)
# print('x ',x.shape)
# print('emb ',emb.shape)
# print('sim_cos ',sim_cos)
# assert 1==2
x = self.fusion(emb,x) # 512
#x = torch.cat((x, emb), dim=2) # [B, T, 128 + emb_dim]
if not hasattr(self, '_flattened'):
self.gru.flatten_parameters()
x, _ = self.gru(x) # torch.Size([16, 161, 256])
x = self.fc(x)
decision_time = torch.softmax(self.outputlayer(x),dim=2) # x torch.Size([16, 156, 2])
decision_up = torch.nn.functional.interpolate(
decision_time.transpose(1, 2), # [16, 2, 156]
time, # 501
mode='linear',
align_corners=False).transpose(1, 2) # (16, 624, 2)
return decision_time[:,:,0], decision_up[:,:,0]
class TSD_L(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin, fmax):
super(TSD_L, self).__init__()
window = 'hann'
center = False
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db,
freeze_parameters=True)
self.bn0 = nn.BatchNorm2d(mel_bins)
self.features = nn.Sequential(
Block2D(1, 32),
nn.LPPool2d(4, (2, 4)),
Block2D(32, 128),
Block2D(128, 128),
nn.LPPool2d(4, (2, 4)),
Block2D(128, 128),
Block2D(128, 128),
nn.LPPool2d(4, (2, 4)),
nn.Dropout(0.3),
)
self.gru = nn.GRU(256, 256, bidirectional=True, batch_first=True)
self.fc = nn.Linear(512, 256)
self.outputlayer = nn.Linear(256, 2)
self.cos = nn.CosineSimilarity(dim=2, eps=1e-6)
self.features.apply(init_weights)
self.outputlayer.apply(init_weights)
self.bn0.apply(init_bn)
def forward(self, input, emb):
"""
Input: (batch_size, data_length)"""
# print('input ',input.shape)
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins) # torch.Size([32, 1, 624, 128])
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
# print('x ',x.shape)
batch, ch, time, dim = x.shape # (b,1,t,d)
x = self.features(x)
x = x.transpose(1, 2).contiguous().flatten(-2) # (b,156,128)
# print('x ',x.shape)
# assert 1==2
emb = emb.unsqueeze(1)
emb = emb.repeat(1, x.shape[1], 1)
sim_cos = self.cos(x,emb)
# print('x ',x.shape)
# print('emb ',emb.shape)
# print('sim_cos ',sim_cos)
# assert 1==2
x = torch.cat((x, emb), dim=2) # [B, T, 128 + emb_dim]
if not hasattr(self, '_flattened'):
self.gru.flatten_parameters()
x, _ = self.gru(x) # torch.Size([16, 161, 256])
x = self.fc(x)
decision_time = torch.softmax(self.outputlayer(x),dim=2) # x torch.Size([16, 156, 2])
# decision_up = torch.nn.functional.interpolate(
# decision_time.transpose(1, 2), # [16, 2, 156]
# time, # 501
# mode='linear',
# align_corners=False).transpose(1, 2) # (16, 624, 2)
return decision_time[:,:,0], decision_time[:,:,0],sim_cos
class TSD_plus(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin, fmax):
super(TSD_plus, self).__init__()
window = 'hann'
center = False
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db,
freeze_parameters=True)
self.bn0 = nn.BatchNorm2d(mel_bins)
self.features = Cnn10()
self.gru = nn.GRU(640, 512, bidirectional=True, batch_first=True)
self.fc = nn.Linear(1024, 256)
self.outputlayer = nn.Linear(256, 2)
self.features.apply(init_weights)
self.outputlayer.apply(init_weights)
self.bn0.apply(init_bn)
def forward(self, input, emb):
"""
Input: (batch_size, data_length)"""
# print('input ',input.shape)
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins) # torch.Size([32, 1, 624, 128])
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
# print('x ',x.shape)
batch, ch, time, dim = x.shape # (b,1,t,d)
x = self.features(x)
x = x.transpose(1, 2).contiguous().flatten(-2) # (b,156,128)
# print('x ',x.shape) # 512
# assert 1==2
emb = emb.unsqueeze(1)
emb = emb.repeat(1, x.shape[1], 1)
x = torch.cat((x, emb), dim=2) # [B, T, 128 + emb_dim]
if not hasattr(self, '_flattened'):
self.gru.flatten_parameters()
x, _ = self.gru(x) # torch.Size([16, 161, 256])
x = self.fc(x)
decision_time = torch.softmax(self.outputlayer(x),dim=2) # x torch.Size([16, 156, 2])
decision_up = torch.nn.functional.interpolate(
decision_time.transpose(1, 2), # [16, 2, 156]
time, # 501
mode='linear',
align_corners=False).transpose(1, 2) # (16, 624, 2)
return decision_up[:,:,0], decision_time[:,:,0],x
class TSD_plus_sim(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin, fmax):
super(TSD_plus_sim, self).__init__()
window = 'hann'
center = False
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db,
freeze_parameters=True)
self.spec_augmenter = SpecAugmentation(time_drop_width=60, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(mel_bins)
self.time_shift = TimeShift(0, 50)
self.features = Cnn10()
self.gru = nn.GRU(640, 512, bidirectional=True, batch_first=True)
self.sim_fc = nn.Linear(512,128)
self.fc = nn.Linear(1024, 256)
self.outputlayer = nn.Linear(256, 2)
self.cos = nn.CosineSimilarity(dim=2, eps=1e-6)
self.features.apply(init_weights)
self.outputlayer.apply(init_weights)
self.bn0.apply(init_bn)
def forward(self, input, emb):
"""
Input: (batch_size, data_length)"""
# print('input ',input.shape)
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins) # torch.Size([32, 1, 624, 128])
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.training:
# print('x, ', x.shape)
x = self.time_shift(x)
x = self.spec_augmenter(x)
# print('x ',x.shape)
batch, ch, time, dim = x.shape # (b,1,t,d)
x = self.features(x)
x = x.transpose(1, 2).contiguous().flatten(-2) # (b,156,128)
# print('x ',x.shape) # 512
# assert 1==2
emb = emb.unsqueeze(1)
emb = emb.repeat(1, x.shape[1], 1)
x_sim = self.sim_fc(x)
sim_cos = self.cos(x_sim, emb)
x = torch.cat((x, emb), dim=2) # [B, T, 128 + emb_dim]
if not hasattr(self, '_flattened'):
self.gru.flatten_parameters()
x, _ = self.gru(x) # torch.Size([16, 161, 256])
x = self.fc(x)
decision_time = torch.softmax(self.outputlayer(x),dim=2) # x torch.Size([16, 156, 2])
decision_up = torch.nn.functional.interpolate(
decision_time.transpose(1, 2), # [16, 2, 156]
time, # 501
mode='linear',
align_corners=False).transpose(1, 2) # (16, 624, 2)
return decision_up[:,:,0], decision_time[:,:,0],sim_cos
class TSD_IS(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin, fmax):
super().__init__()
window = 'hann'
center = False
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db,
freeze_parameters=True)
self.bn0 = nn.BatchNorm2d(mel_bins)
self.features = nn.Sequential(
Block2D(1, 64),
nn.MaxPool2d((2, 4)),
Block2D(64, 64),
nn.MaxPool2d((1, 4)),
Block2D(64, 64),
nn.MaxPool2d((1, 4)))
# with torch.no_grad():
# rnn_input_dim = self.features(torch.randn(1, 1, 500,inputdim)).shape
# rnn_input_dim = rnn_input_dim[1] * rnn_input_dim[-1]
self.gru = nn.GRU(64, 62, bidirectional=True, batch_first=True)
self.gru2 = nn.GRU(124+128, 124, bidirectional=True, batch_first=True)
# self.fc = nn.Linear(248,2)
self.outputlayer = nn.Linear(248, 2)
self.cos = nn.CosineSimilarity(dim=2, eps=1e-6)
self.features.apply(init_weights)
self.bn0.apply(init_bn)
self.outputlayer.apply(init_weights)
def forward(self,input,embedding):
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins) # torch.Size([32, 1, 624, 128])
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
batch, ch, time, dim = x.shape # (b,1,t,d)
# x = x.unsqueeze(1) # (b,1,t,d)
# print('x ',x.shape)
x = self.features(x) #
if not hasattr(self, '_flattened'):
self.gru.flatten_parameters()
self.gru2.flatten_parameters()
x = x.transpose(1, 2).contiguous().flatten(-2) # 重新拷贝一份x,之后推平-2:-1之间的维度 # (b,250,64)
# print('x ',x.shape)
# assert 1==2
x, _ = self.gru(x)
embedding = embedding.unsqueeze(1)
embedding = embedding.repeat(1, x.shape[1], 1)
sim_cos = torch.zeros(1).cuda()
x = torch.cat((x, embedding), dim=2) # [B, T, 128 + emb_dim]
x, _ = self.gru2(x) # x torch.Size([16, 125, 256])
decision_time = torch.softmax(self.outputlayer(x),dim=2) # x torch.Size([16, 125, 2])
decision_up = torch.nn.functional.interpolate(
decision_time.transpose(1, 2), # [16, 2, 125]
time, # 501
mode='linear',
align_corners=False).transpose(1, 2) # 从125插值回 501 ?--> (16,501,2)
return decision_up[:,:,0], decision_time[:,:,0],sim_cos
class TSD_regresion_two_cls(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin, fmax):
super(TSD_regresion_two_cls, self).__init__()
window = 'hann'
center = False
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db,
freeze_parameters=True)
self.bn0 = nn.BatchNorm2d(mel_bins)
self.features = nn.Sequential(
Block2D(1, 32),
nn.LPPool2d(4, (2, 4)),
Block2D(32, 128),
Block2D(128, 128),
nn.LPPool2d(4, (1, 4)),
Block2D(128, 128),
Block2D(128, 128),
nn.LPPool2d(4, (1, 4)),
nn.Dropout(0.3),
)
self.gru = nn.GRU(256, 256, bidirectional=True, batch_first=True)
self.fc = nn.Linear(512, 64)
self.fc2 = nn.Linear(64,16)
self.st_fc = nn.Linear(16*312, 10) #
self.ed_fc = nn.Linear(16*312,11)
self.PReLu1 = nn.PReLU()
# self.outputlayer = nn.Linear(16*312, 2)
self.cos = nn.CosineSimilarity(dim=2, eps=1e-6)
self.features.apply(init_weights)
# self.outputlayer.apply(init_weights)
self.bn0.apply(init_bn)
def forward(self, input, emb):
"""
Input: (batch_size, data_length)"""
# print('input ',input.shape)
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins) # torch.Size([32, 1, 624, 128])
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
# print('x ',x.shape)
batch, ch, time, dim = x.shape # (b,1,t,d)
x = self.features(x)
x = x.transpose(1, 2).contiguous().flatten(-2) # (b,156,128)
# print('x ',x.shape)
emb = emb.unsqueeze(1)
emb = emb.repeat(1, x.shape[1], 1)
sim_cos = self.cos(x,emb)
# print('x ',x.shape)
# print('emb ',emb.shape)
# print('sim_cos ',sim_cos)
# assert 1==2
x = torch.cat((x, emb), dim=2) # [B, T, 128 + emb_dim]
if not hasattr(self, '_flattened'):
self.gru.flatten_parameters()
x, _ = self.gru(x) # torch.Size([16, 161, 256])
# print('x ',x.shape)
# assert 1==2
x = self.fc(x)
x = self.PReLu1(x)
x = self.fc2(x)
x = x.flatten(-2)
x_st = self.st_fc(x)
x_ed = self.ed_fc(x)
decision_time = x_st
decision_up = x_ed
return decision_up, decision_time, sim_cos
class TSD_regresion(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin, fmax):
super(TSD_regresion, self).__init__()
window = 'hann'
center = False
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db,
freeze_parameters=True)
self.bn0 = nn.BatchNorm2d(mel_bins)
self.features = nn.Sequential(
Block2D(1, 32),
nn.LPPool2d(4, (2, 4)),
Block2D(32, 128),
Block2D(128, 128),
nn.LPPool2d(4, (1, 4)),
Block2D(128, 128),
Block2D(128, 128),
nn.LPPool2d(4, (1, 4)),
nn.Dropout(0.3),
)
self.gru = nn.GRU(256, 256, bidirectional=True, batch_first=True)
self.fc = nn.Linear(512, 64)
self.fc2 = nn.Linear(64,16)
self.PReLu1 = nn.PReLU()
self.outputlayer = nn.Linear(16*312, 2)
self.cos = nn.CosineSimilarity(dim=2, eps=1e-6)
self.features.apply(init_weights)
self.outputlayer.apply(init_weights)
self.bn0.apply(init_bn)
def forward(self, input, emb):
"""
Input: (batch_size, data_length)"""
# print('input ',input.shape)
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins) # torch.Size([32, 1, 624, 128])
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
# print('x ',x.shape)
batch, ch, time, dim = x.shape # (b,1,t,d)
x = self.features(x)
x = x.transpose(1, 2).contiguous().flatten(-2) # (b,156,128)
# print('x ',x.shape)
emb = emb.unsqueeze(1)
emb = emb.repeat(1, x.shape[1], 1)
sim_cos = self.cos(x,emb)
# print('x ',x.shape)
# print('emb ',emb.shape)
# print('sim_cos ',sim_cos)
# assert 1==2
x = torch.cat((x, emb), dim=2) # [B, T, 128 + emb_dim]
if not hasattr(self, '_flattened'):
self.gru.flatten_parameters()
x, _ = self.gru(x) # torch.Size([16, 161, 256])
# print('x ',x.shape)
# assert 1==2
x = self.fc(x)
x = self.PReLu1(x)
x = self.fc2(x)
x = x.flatten(-2)
x = self.outputlayer(x)
decision_time = x
decision_up = x
return decision_up, decision_time, sim_cos | 37,974 | 41.100887 | 109 | py |
SOF-VSR | SOF-VSR-master/TIP/modules.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
class SOFVSR(nn.Module):
def __init__(self, cfg, n_frames=3, is_training=True):
super(SOFVSR, self).__init__()
self.scale = cfg.scale
self.is_training = is_training
self.OFR = OFRnet(scale=cfg.scale, channels=320)
self.SR = SRnet(scale=cfg.scale, channels=320, n_frames=n_frames)
def forward(self, x):
b, n_frames, c, h, w = x.size() # x: b*n*c*h*w
idx_center = (n_frames - 1) // 2
# motion estimation
flow_L1 = []
flow_L2 = []
flow_L3 = []
input = []
for idx_frame in range(n_frames):
if idx_frame != idx_center:
input.append(torch.cat((x[:,idx_frame,:,:,:], x[:,idx_center,:,:,:]), 1))
optical_flow_L1, optical_flow_L2, optical_flow_L3 = self.OFR(torch.cat(input, 0))
optical_flow_L1 = optical_flow_L1.view(-1, b, 2, h//2, w//2)
optical_flow_L2 = optical_flow_L2.view(-1, b, 2, h, w)
optical_flow_L3 = optical_flow_L3.view(-1, b, 2, h*self.scale, w*self.scale)
# motion compensation
draft_cube = []
draft_cube.append(x[:, idx_center, :, :, :])
for idx_frame in range(n_frames):
if idx_frame == idx_center:
flow_L1.append([])
flow_L2.append([])
flow_L3.append([])
if idx_frame != idx_center:
if idx_frame < idx_center:
idx = idx_frame
if idx_frame > idx_center:
idx = idx_frame - 1
flow_L1.append(optical_flow_L1[idx, :, :, :, :])
flow_L2.append(optical_flow_L2[idx, :, :, :, :])
flow_L3.append(optical_flow_L3[idx, :, :, :, :])
for i in range(self.scale):
for j in range(self.scale):
draft = optical_flow_warp(x[:, idx_frame, :, :, :],
optical_flow_L3[idx, :, :, i::self.scale, j::self.scale] / self.scale)
draft_cube.append(draft)
draft_cube = torch.cat(draft_cube, 1)
# super-resolution
SR = self.SR(draft_cube)
if self.is_training:
return flow_L1, flow_L2, flow_L3, SR
if not self.is_training:
return SR
class OFRnet(nn.Module):
def __init__(self, scale, channels):
super(OFRnet, self).__init__()
self.pool = nn.AvgPool2d(2)
self.scale = scale
## RNN part
self.RNN1 = nn.Sequential(
nn.Conv2d(4, channels, 3, 1, 1, bias=False),
nn.LeakyReLU(0.1, inplace=True),
CasResB(3, channels)
)
self.RNN2 = nn.Sequential(
nn.Conv2d(channels, 2, 3, 1, 1, bias=False),
)
# SR part
SR = []
SR.append(CasResB(3, channels))
if self.scale == 4:
SR.append(nn.Conv2d(channels, 64 * 4, 1, 1, 0, bias=False))
SR.append(nn.PixelShuffle(2))
SR.append(nn.LeakyReLU(0.1, inplace=True))
SR.append(nn.Conv2d(64, 64 * 4, 1, 1, 0, bias=False))
SR.append(nn.PixelShuffle(2))
SR.append(nn.LeakyReLU(0.1, inplace=True))
elif self.scale == 3:
SR.append(nn.Conv2d(channels, 64 * 9, 1, 1, 0, bias=False))
SR.append(nn.PixelShuffle(3))
SR.append(nn.LeakyReLU(0.1, inplace=True))
elif self.scale == 2:
SR.append(nn.Conv2d(channels, 64 * 4, 1, 1, 0, bias=False))
SR.append(nn.PixelShuffle(2))
SR.append(nn.LeakyReLU(0.1, inplace=True))
SR.append(nn.Conv2d(64, 2, 3, 1, 1, bias=False))
self.SR = nn.Sequential(*SR)
def __call__(self, x): # x: b*2*h*w
#Part 1
x_L1 = self.pool(x)
b, c, h, w = x_L1.size()
input_L1 = torch.cat((x_L1, torch.zeros(b, 2, h, w).cuda()), 1)
optical_flow_L1 = self.RNN2(self.RNN1(input_L1))
optical_flow_L1_upscaled = F.interpolate(optical_flow_L1, scale_factor=2, mode='bilinear', align_corners=False) * 2
#Part 2
x_L2 = optical_flow_warp(torch.unsqueeze(x[:, 0, :, :], 1), optical_flow_L1_upscaled)
input_L2 = torch.cat((x_L2, torch.unsqueeze(x[:, 1, :, :], 1), optical_flow_L1_upscaled), 1)
optical_flow_L2 = self.RNN2(self.RNN1(input_L2)) + optical_flow_L1_upscaled
#Part 3
x_L3 = optical_flow_warp(torch.unsqueeze(x[:, 0, :, :], 1), optical_flow_L2)
input_L3 = torch.cat((x_L3, torch.unsqueeze(x[:, 1, :, :], 1), optical_flow_L2), 1)
optical_flow_L3 = self.SR(self.RNN1(input_L3)) + \
F.interpolate(optical_flow_L2, scale_factor=self.scale, mode='bilinear', align_corners=False) * self.scale
return optical_flow_L1, optical_flow_L2, optical_flow_L3
class SRnet(nn.Module):
def __init__(self, scale, channels, n_frames):
super(SRnet, self).__init__()
body = []
body.append(nn.Conv2d(1 * scale ** 2 * (n_frames-1) + 1, channels, 3, 1, 1, bias=False))
body.append(nn.LeakyReLU(0.1, inplace=True))
body.append(CasResB(8, channels))
if scale == 4:
body.append(nn.Conv2d(channels, 64 * 4, 1, 1, 0, bias=False))
body.append(nn.PixelShuffle(2))
body.append(nn.LeakyReLU(0.1, inplace=True))
body.append(nn.Conv2d(64, 64 * 4, 1, 1, 0, bias=False))
body.append(nn.PixelShuffle(2))
body.append(nn.LeakyReLU(0.1, inplace=True))
elif scale == 3:
body.append(nn.Conv2d(channels, 64 * 9, 1, 1, 0, bias=False))
body.append(nn.PixelShuffle(3))
body.append(nn.LeakyReLU(0.1, inplace=True))
elif scale == 2:
body.append(nn.Conv2d(channels, 64 * 4, 1, 1, 0, bias=False))
body.append(nn.PixelShuffle(2))
body.append(nn.LeakyReLU(0.1, inplace=True))
body.append(nn.Conv2d(64, 1, 3, 1, 1, bias=True))
self.body = nn.Sequential(*body)
def __call__(self, x):
out = self.body(x)
return out
class ResB(nn.Module):
def __init__(self, channels):
super(ResB, self).__init__()
self.body = nn.Sequential(
nn.Conv2d(channels//2, channels//2, 1, 1, 0, bias=False),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(channels//2, channels//2, 3, 1, 1, bias=False, groups=channels//2),
nn.Conv2d(channels // 2, channels // 2, 1, 1, 0, bias=False),
nn.LeakyReLU(0.1, inplace=True),
)
def forward(self, x):
input = x[:, x.shape[1]//2:, :, :]
out = torch.cat((x[:, :x.shape[1]//2, :, :], self.body(input)), 1)
return channel_shuffle(out, 2)
class CasResB(nn.Module):
def __init__(self, n_ResB, channels):
super(CasResB, self).__init__()
body = []
for i in range(n_ResB):
body.append(ResB(channels))
self.body = nn.Sequential(*body)
def forward(self, x):
return self.body(x)
def channel_shuffle(x, groups):
b, c, h, w = x.size()
x = x.view(b, groups, c//groups, h, w)
x = x.permute(0, 2, 1, 3, 4).contiguous()
x = x.view(b, -1, h, w)
return x
def optical_flow_warp(image, image_optical_flow):
"""
Arguments
image_ref: reference images tensor, (b, c, h, w)
image_optical_flow: optical flow to image_ref (b, 2, h, w)
"""
b, _ , h, w = image.size()
grid = np.meshgrid(range(w), range(h))
grid = np.stack(grid, axis=-1).astype(np.float64)
grid[:, :, 0] = grid[:, :, 0] * 2 / (w - 1) -1
grid[:, :, 1] = grid[:, :, 1] * 2 / (h - 1) -1
grid = grid.transpose(2, 0, 1)
grid = np.tile(grid, (b, 1, 1, 1))
grid = Variable(torch.Tensor(grid))
if image_optical_flow.is_cuda == True:
grid = grid.cuda()
flow_0 = torch.unsqueeze(image_optical_flow[:, 0, :, :] * 31 / (w - 1), dim=1)
flow_1 = torch.unsqueeze(image_optical_flow[:, 1, :, :] * 31 / (h - 1), dim=1)
grid = grid + torch.cat((flow_0, flow_1),1)
grid = grid.transpose(1, 2)
grid = grid.transpose(3, 2)
output = F.grid_sample(image, grid, padding_mode='border')
return output | 8,356 | 37.334862 | 132 | py |
SOF-VSR | SOF-VSR-master/TIP/data_utils.py | from PIL import Image
from torch.utils.data.dataset import Dataset
from modules import optical_flow_warp
import numpy as np
import os
import torch
import random
class TrainsetLoader(Dataset):
def __init__(self, cfg):
super(TrainsetLoader).__init__()
self.trainset_dir = cfg.trainset_dir
self.scale = cfg.scale
self.patch_size = cfg.patch_size
self.n_iters = cfg.n_iters * cfg.batch_size
self.video_list = os.listdir(cfg.trainset_dir)
self.degradation = cfg.degradation
def __getitem__(self, idx):
idx_video = random.randint(0, len(self.video_list)-1)
idx_frame = random.randint(0, 28) # #frames of training videos is 31, 31-3=28
lr_dir = self.trainset_dir + '/' + self.video_list[idx_video] + '/lr_x' + str(self.scale) + '_' + self.degradation
hr_dir = self.trainset_dir + '/' + self.video_list[idx_video] + '/hr'
# read HR & LR frames
LR0 = Image.open(lr_dir + '/lr' + str(idx_frame) + '.png')
LR1 = Image.open(lr_dir + '/lr' + str(idx_frame + 1) + '.png')
LR2 = Image.open(lr_dir + '/lr' + str(idx_frame + 2) + '.png')
HR0 = Image.open(hr_dir + '/hr' + str(idx_frame) + '.png')
HR1 = Image.open(hr_dir + '/hr' + str(idx_frame + 1) + '.png')
HR2 = Image.open(hr_dir + '/hr' + str(idx_frame + 2) + '.png')
LR0 = np.array(LR0, dtype=np.float32) / 255.0
LR1 = np.array(LR1, dtype=np.float32) / 255.0
LR2 = np.array(LR2, dtype=np.float32) / 255.0
HR0 = np.array(HR0, dtype=np.float32) / 255.0
HR1 = np.array(HR1, dtype=np.float32) / 255.0
HR2 = np.array(HR2, dtype=np.float32) / 255.0
# extract Y channel for LR inputs
HR0 = rgb2y(HR0)
HR1 = rgb2y(HR1)
HR2 = rgb2y(HR2)
LR0 = rgb2y(LR0)
LR1 = rgb2y(LR1)
LR2 = rgb2y(LR2)
# crop patchs randomly
HR0, HR1, HR2, LR0, LR1, LR2 = random_crop(HR0, HR1, HR2, LR0, LR1, LR2, self.patch_size, self.scale)
HR0 = HR0[:, :, np.newaxis]
HR1 = HR1[:, :, np.newaxis]
HR2 = HR2[:, :, np.newaxis]
LR0 = LR0[:, :, np.newaxis]
LR1 = LR1[:, :, np.newaxis]
LR2 = LR2[:, :, np.newaxis]
HR = np.concatenate((HR0, HR1, HR2), axis=2)
LR = np.concatenate((LR0, LR1, LR2), axis=2)
# data augmentation
LR, HR = augmentation()(LR, HR)
return toTensor(LR), toTensor(HR)
def __len__(self):
return self.n_iters
class TestsetLoader(Dataset):
def __init__(self, cfg, video_name):
super(TestsetLoader).__init__()
self.dataset_dir = cfg.testset_dir + '/' + video_name
self.degradation = cfg.degradation
self.scale = cfg.scale
self.frame_list = os.listdir(self.dataset_dir + '/lr_x' + str(self.scale) + '_' + self.degradation)
def __getitem__(self, idx):
dir = self.dataset_dir + '/lr_x' + str(self.scale) + '_' + self.degradation
LR0 = Image.open(dir + '/' + 'lr_' + str(idx+1).rjust(2, '0') + '.png')
LR1 = Image.open(dir + '/' + 'lr_' + str(idx+2).rjust(2, '0') + '.png')
LR2 = Image.open(dir + '/' + 'lr_' + str(idx+3).rjust(2, '0') + '.png')
W, H = LR1.size
# H and W should be divisible by 2
W = int(W // 2) * 2
H = int(H // 2) * 2
LR0 = LR0.crop([0, 0, W, H])
LR1 = LR1.crop([0, 0, W, H])
LR2 = LR2.crop([0, 0, W, H])
LR1_bicubic = LR1.resize((W*self.scale, H*self.scale), Image.BICUBIC)
LR1_bicubic = np.array(LR1_bicubic, dtype=np.float32) / 255.0
LR0 = np.array(LR0, dtype=np.float32) / 255.0
LR1 = np.array(LR1, dtype=np.float32) / 255.0
LR2 = np.array(LR2, dtype=np.float32) / 255.0
# extract Y channel for LR inputs
LR0_y, _, _ = rgb2ycbcr(LR0)
LR1_y, _, _ = rgb2ycbcr(LR1)
LR2_y, _, _ = rgb2ycbcr(LR2)
LR0_y = LR0_y[:, :, np.newaxis]
LR1_y = LR1_y[:, :, np.newaxis]
LR2_y = LR2_y[:, :, np.newaxis]
LR = np.concatenate((LR0_y, LR1_y, LR2_y), axis=2)
LR = toTensor(LR)
# generate Cr, Cb channels using bicubic interpolation
_, SR_cb, SR_cr = rgb2ycbcr(LR1_bicubic)
return LR, SR_cb, SR_cr
def __len__(self):
return len(self.frame_list) - 2
class augmentation(object):
def __call__(self, input, target):
if random.random()<0.5:
input = input[:, ::-1, :]
target = target[:, ::-1, :]
if random.random()<0.5:
input = input[::-1, :, :]
target = target[::-1, :, :]
if random.random()<0.5:
input = input.transpose(1, 0, 2)
target = target.transpose(1, 0, 2)
return np.ascontiguousarray(input), np.ascontiguousarray(target)
def random_crop(HR0, HR1, HR2, LR0, LR1, LR2, patch_size_lr, scale):
h_hr, w_hr = HR0.shape
h_lr = h_hr // scale
w_lr = w_hr // scale
idx_h = random.randint(10, h_lr - patch_size_lr - 10)
idx_w = random.randint(10, w_lr - patch_size_lr - 10)
h_start_hr = (idx_h - 1) * scale
h_end_hr = (idx_h - 1 + patch_size_lr) * scale
w_start_hr = (idx_w - 1) * scale
w_end_hr = (idx_w - 1 + patch_size_lr) * scale
h_start_lr = idx_h - 1
h_end_lr = idx_h - 1 + patch_size_lr
w_start_lr = idx_w - 1
w_end_lr = idx_w - 1 + patch_size_lr
HR0 = HR0[h_start_hr:h_end_hr, w_start_hr:w_end_hr]
HR1 = HR1[h_start_hr:h_end_hr, w_start_hr:w_end_hr]
HR2 = HR2[h_start_hr:h_end_hr, w_start_hr:w_end_hr]
LR0 = LR0[h_start_lr:h_end_lr, w_start_lr:w_end_lr]
LR1 = LR1[h_start_lr:h_end_lr, w_start_lr:w_end_lr]
LR2 = LR2[h_start_lr:h_end_lr, w_start_lr:w_end_lr]
return HR0, HR1, HR2, LR0, LR1, LR2
def toTensor(img):
img = torch.from_numpy(img.transpose((2, 0, 1)))
return img
def rgb2ycbcr(img_rgb):
## the range of img_rgb should be (0, 1)
img_y = 0.257 * img_rgb[:, :, 0] + 0.504 * img_rgb[:, :, 1] + 0.098 * img_rgb[:, :, 2] + 16 / 255.0
img_cb = -0.148 * img_rgb[:, :, 0] - 0.291 * img_rgb[:, :, 1] + 0.439 * img_rgb[:, :, 2] + 128 / 255.0
img_cr = 0.439 * img_rgb[:, :, 0] - 0.368 * img_rgb[:, :, 1] - 0.071 * img_rgb[:, :, 2] + 128 / 255.0
return img_y, img_cb, img_cr
def ycbcr2rgb(img_ycbcr):
## the range of img_ycbcr should be (0, 1)
img_r = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) + 1.596 * (img_ycbcr[:, :, 2] - 128 / 255.0)
img_g = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) - 0.392 * (img_ycbcr[:, :, 1] - 128 / 255.0) - 0.813 * (img_ycbcr[:, :, 2] - 128 / 255.0)
img_b = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) + 2.017 * (img_ycbcr[:, :, 1] - 128 / 255.0)
img_r = img_r[:, :, np.newaxis]
img_g = img_g[:, :, np.newaxis]
img_b = img_b[:, :, np.newaxis]
img_rgb = np.concatenate((img_r, img_g, img_b), 2)
return img_rgb
def rgb2y(img_rgb):
## the range of img_rgb should be (0, 1)
image_y = 0.257 * img_rgb[:, :, 0] + 0.504 * img_rgb[:, :, 1] + 0.098 * img_rgb[:, :, 2] +16 / 255.0
return image_y
def OFR_loss(x0, x1, optical_flow):
warped = optical_flow_warp(x0, optical_flow)
loss = torch.mean(torch.abs(x1 - warped)) + 0.1 * L1_regularization(optical_flow)
return loss
def L1_regularization(image):
b, _, h, w = image.size()
reg_x_1 = image[:, :, 0:h-1, 0:w-1] - image[:, :, 1:, 0:w-1]
reg_y_1 = image[:, :, 0:h-1, 0:w-1] - image[:, :, 0:h-1, 1:]
reg_L1 = torch.abs(reg_x_1) + torch.abs(reg_y_1)
return torch.sum(reg_L1) / (b*(h-1)*(w-1))
| 7,589 | 36.389163 | 143 | py |
SOF-VSR | SOF-VSR-master/TIP/train.py | from torch.autograd import Variable
from torch.utils.data import DataLoader
from modules import SOFVSR
from data_utils import TrainsetLoader, OFR_loss
import torch.backends.cudnn as cudnn
import argparse
import torch
import numpy as np
import torch.nn.functional as F
import os
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--degradation", type=str, default='BI')
parser.add_argument("--scale", type=int, default=4)
parser.add_argument('--gpu_mode', type=bool, default=True)
parser.add_argument('--patch_size', type=int, default=32)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--n_iters', type=int, default=200000, help='number of iterations to train')
parser.add_argument('--trainset_dir', type=str, default='data/train')
return parser.parse_args()
def main(cfg):
# model
net = SOFVSR(cfg, is_training=True)
if cfg.gpu_mode:
net.cuda()
cudnn.benchmark = True
# dataloader
train_set = TrainsetLoader(cfg)
train_loader = DataLoader(train_set, num_workers=4, batch_size=cfg.batch_size, shuffle=True)
# train
optimizer = torch.optim.Adam(net.parameters(), lr=1e-3)
milestones = [80000, 160000]
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=0.1)
criterion = torch.nn.MSELoss()
loss_list = []
for idx_iter, (LR, HR) in enumerate(train_loader):
scheduler.step()
# data
b, n_frames, h_lr, w_lr = LR.size()
idx_center = (n_frames - 1) // 2
LR, HR = Variable(LR), Variable(HR)
if cfg.gpu_mode:
LR = LR.cuda()
HR = HR.cuda()
LR = LR.view(b, -1, 1, h_lr, w_lr)
HR = HR.view(b, -1, 1, h_lr * cfg.scale, w_lr * cfg.scale)
# inference
flow_L1, flow_L2, flow_L3, SR = net(LR)
# loss
loss_SR = criterion(SR, HR[:, idx_center, :, :, :])
loss_OFR = torch.zeros(1).cuda()
for i in range(n_frames):
if i != idx_center:
loss_L1 = OFR_loss(F.avg_pool2d(LR[:, i, :, :, :], kernel_size=2),
F.avg_pool2d(LR[:, idx_center, :, :, :], kernel_size=2),
flow_L1[i])
loss_L2 = OFR_loss(LR[:, i, :, :, :], LR[:, idx_center, :, :, :], flow_L2[i])
loss_L3 = OFR_loss(HR[:, i, :, :, :], HR[:, idx_center, :, :, :], flow_L3[i])
loss_OFR = loss_OFR + loss_L3 + 0.2 * loss_L2 + 0.1 * loss_L1
loss = loss_SR + 0.01 * loss_OFR / (n_frames - 1)
loss_list.append(loss.data.cpu())
# backwards
optimizer.zero_grad()
loss.backward()
optimizer.step()
# save checkpoint
if idx_iter % 5000 == 0:
print('Iteration---%6d, loss---%f' % (idx_iter + 1, np.array(loss_list).mean()))
save_path = 'log/' + cfg.degradation + '_x' + str(cfg.scale)
save_name = cfg.degradation + '_x' + str(cfg.scale) + '_iter' + str(idx_iter) + '.pth'
if not os.path.exists(save_path):
os.mkdir(save_path)
torch.save(net.state_dict(), save_path + '/' + save_name)
loss_list = []
if __name__ == '__main__':
cfg = parse_args()
main(cfg)
| 3,333 | 31.686275 | 100 | py |
SOF-VSR | SOF-VSR-master/TIP/demo_Vid4.py | from torch.autograd import Variable
from torch.utils.data import DataLoader
from data_utils import TestsetLoader, ycbcr2rgb
from modules import SOFVSR
from torchvision.transforms import ToPILImage
import numpy as np
import os
import argparse
import torch
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--degradation", type=str, default='BD')
parser.add_argument("--scale", type=int, default=4)
parser.add_argument('--gpu_mode', type=bool, default=True)
parser.add_argument('--testset_dir', type=str, default='data/test/Vid4')
parser.add_argument('--chop_forward', type=bool, default=False)
return parser.parse_args()
def chop_forward(x, model, scale, shave=16, min_size=5000, nGPUs=1):
# divide into 4 patches
b, n, c, h, w = x.size()
h_half, w_half = h // 2, w // 2
h_size, w_size = h_half + shave, w_half + shave
inputlist = [
x[:, :, :, 0:h_size, 0:w_size],
x[:, :, :, 0:h_size, (w - w_size):w],
x[:, :, :, (h - h_size):h, 0:w_size],
x[:, :, :, (h - h_size):h, (w - w_size):w]]
if w_size * h_size < min_size:
outputlist = []
for i in range(0, 4, nGPUs):
input_batch = torch.cat(inputlist[i:(i + nGPUs)], dim=0)
output_batch = model(input_batch)
outputlist.append(output_batch.data)
else:
outputlist = [
chop_forward(patch, model, scale, shave, min_size, nGPUs) \
for patch in inputlist]
h, w = scale * h, scale * w
h_half, w_half = scale * h_half, scale * w_half
h_size, w_size = scale * h_size, scale * w_size
shave *= scale
output = Variable(x.data.new(1, 1, h, w), volatile=True)
output[:, :, 0:h_half, 0:w_half] = outputlist[0][:, :, 0:h_half, 0:w_half]
output[:, :, 0:h_half, w_half:w] = outputlist[1][:, :, 0:h_half, (w_size - w + w_half):w_size]
output[:, :, h_half:h, 0:w_half] = outputlist[2][:, :, (h_size - h + h_half):h_size, 0:w_half]
output[:, :, h_half:h, w_half:w] = outputlist[3][:, :, (h_size - h + h_half):h_size, (w_size - w + w_half):w_size]
return output
def main(cfg):
# model
net = SOFVSR(cfg, is_training=False)
ckpt = torch.load('./log/' + cfg.degradation + '_x' + str(cfg.scale) + '.pth')
net.load_state_dict(ckpt)
if cfg.gpu_mode:
net.cuda()
with torch.no_grad():
video_list = os.listdir(cfg.testset_dir)
for idx_video in range(len(video_list)):
video_name = video_list[idx_video]
# dataloader
test_set = TestsetLoader(cfg, video_name)
test_loader = DataLoader(test_set, num_workers=1, batch_size=1, shuffle=False)
for idx_iter, (LR_y_cube, SR_cb, SR_cr) in enumerate(test_loader):
# data
b, n_frames, h_lr, w_lr = LR_y_cube.size()
LR_y_cube = Variable(LR_y_cube)
LR_y_cube = LR_y_cube.view(b, -1, 1, h_lr, w_lr)
if cfg.gpu_mode:
LR_y_cube = LR_y_cube.cuda()
if cfg.chop_forward:
# crop borders to ensure each patch can be divisible by 2
_, _, _, h, w = LR_y_cube.size()
h = int(h//16) * 16
w = int(w//16) * 16
LR_y_cube = LR_y_cube[:, :, :, :h, :w]
SR_cb = SR_cb[:, :h * cfg.scale, :w * cfg.scale]
SR_cr = SR_cr[:, :h * cfg.scale, :w * cfg.scale]
SR_y = chop_forward(LR_y_cube, net, cfg.scale).squeeze(0)
else:
SR_y = net(LR_y_cube).squeeze(0)
else:
SR_y = net(LR_y_cube).squeeze(0)
SR_y = np.array(SR_y.data.cpu())
SR_ycbcr = np.concatenate((SR_y, SR_cb, SR_cr), axis=0).transpose(1,2,0)
SR_rgb = ycbcr2rgb(SR_ycbcr) * 255.0
SR_rgb = np.clip(SR_rgb, 0, 255)
SR_rgb = ToPILImage()(np.round(SR_rgb).astype(np.uint8))
if not os.path.exists('results/Vid4'):
os.mkdir('results/Vid4')
if not os.path.exists('results/Vid4/' + cfg.degradation + '_x' + str(cfg.scale)):
os.mkdir('results/Vid4/' + cfg.degradation + '_x' + str(cfg.scale))
if not os.path.exists('results/Vid4/' + cfg.degradation + '_x' + str(cfg.scale) + '/' + video_name):
os.mkdir('results/Vid4/' + cfg.degradation + '_x' + str(cfg.scale) + '/' + video_name)
SR_rgb.save('results/Vid4/' + cfg.degradation + '_x' + str(cfg.scale) + '/' + video_name + '/sr_' + str(idx_iter+2).rjust(2,'0') + '.png')
if __name__ == '__main__':
cfg = parse_args()
main(cfg) | 4,835 | 39.3 | 154 | py |
SOF-VSR | SOF-VSR-master/ACCV/modules.py | import torch
import torch.nn as nn
import numpy as np
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib.pyplot as plt
def optical_flow_warp(image, image_optical_flow):
"""
Arguments
image_ref: reference images tensor, (b, c, h, w)
image_optical_flow: optical flow to image_ref (b, 2, h, w)
"""
b, _ , h, w = image.size()
grid = np.meshgrid(range(w), range(h))
grid = np.stack(grid, axis=-1).astype(np.float64)
grid[:, :, 0] = grid[:, :, 0] * 2 / (w - 1) -1
grid[:, :, 1] = grid[:, :, 1] * 2 / (h - 1) -1
grid = grid.transpose(2, 0, 1)
grid = np.tile(grid, (b, 1, 1, 1))
grid = Variable(torch.Tensor(grid))
if image_optical_flow.is_cuda == True:
grid = grid.cuda()
flow_0 = torch.unsqueeze(image_optical_flow[:, 0, :, :] * 31 / (w - 1), dim=1)
flow_1 = torch.unsqueeze(image_optical_flow[:, 1, :, :] * 31 / (h - 1), dim=1)
grid = grid + torch.cat((flow_0, flow_1),1)
grid = grid.transpose(1, 2)
grid = grid.transpose(3, 2)
output = F.grid_sample(image, grid, padding_mode='border')
return output
class make_dense(nn.Module):
def __init__(self, channels_in, channels_out, kernel_size=3):
super(make_dense, self).__init__()
self.leaky_relu = nn.LeakyReLU(0.1, inplace=True)
self.conv = nn.Conv2d(channels_in, channels_out, kernel_size=kernel_size, padding=(kernel_size - 1) // 2,
bias=False)
def forward(self, x):
out = self.leaky_relu(self.conv(x))
out = torch.cat((x, out), 1)
return out
class RDB(nn.Module):
def __init__(self, nDenselayer, channels, growth):
super(RDB, self).__init__()
modules = []
channels_buffer = channels
for i in range(nDenselayer):
modules.append(make_dense(channels_buffer, growth))
channels_buffer += growth
self.dense_layers = nn.Sequential(*modules)
self.conv_1x1 = nn.Conv2d(channels_buffer, channels, kernel_size=1, padding=0, bias=False)
def forward(self, x):
out = self.dense_layers(x)
out = self.conv_1x1(out)
out = out + x
return out
class OFRnet(nn.Module):
def __init__(self, upscale_factor, is_training):
super(OFRnet, self).__init__()
self.pool = nn.AvgPool2d(kernel_size = 2)
self.upsample = nn.Upsample(scale_factor = 2, mode = 'bilinear')
self.final_upsample = nn.Upsample(scale_factor = upscale_factor, mode='bilinear')
self.shuffle = nn.PixelShuffle(upscale_factor)
self.upscale_factor = upscale_factor
self.is_training = is_training
# Level 1
self.conv_L1_1 = nn.Conv2d(2, 32, 3, 1, 1, bias=False)
self.RDB1_1 = RDB(4, 32, 32)
self.RDB1_2 = RDB(4, 32, 32)
self.bottleneck_L1 = nn.Conv2d(64, 2, 3, 1, 1, bias=False)
self.conv_L1_2 = nn.Conv2d(2, 2, 3, 1, 1, bias=True)
# Level 2
self.conv_L2_1 = nn.Conv2d(6, 32, 3, 1, 1, bias=False)
self.RDB2_1 = RDB(4, 32, 32)
self.RDB2_2 = RDB(4, 32, 32)
self.bottleneck_L2 = nn.Conv2d(64, 2, 3, 1, 1, bias=False)
self.conv_L2_2 = nn.Conv2d(2, 2, 3, 1, 1, bias=True)
# Level 3
self.conv_L3_1 = nn.Conv2d(6, 32, 3, 1, 1, bias=False)
self.RDB3_1 = RDB(4, 32, 32)
self.RDB3_2 = RDB(4, 32, 32)
self.bottleneck_L3 = nn.Conv2d(64, 2*upscale_factor**2, 3, 1, 1, bias=False)
self.conv_L3_2 = nn.Conv2d(2*upscale_factor**2, 2*upscale_factor**2, 3, 1, 1, bias=True)
def forward(self, x):
# Level 1
x_L1 = self.pool(x)
_, _, h, w = x_L1.size()
input_L1 = self.conv_L1_1(x_L1)
buffer_1 = self.RDB1_1(input_L1)
buffer_2 = self.RDB1_2(buffer_1)
buffer = torch.cat((buffer_1, buffer_2), 1)
optical_flow_L1 = self.bottleneck_L1(buffer)
optical_flow_L1 = self.conv_L1_2(optical_flow_L1)
optical_flow_L1_upscaled = self.upsample(optical_flow_L1) # *2
if self.is_training is True:
x_L1_res = optical_flow_warp(torch.unsqueeze(x_L1[:, 0, :, :], dim=1), optical_flow_L1) - torch.unsqueeze(x_L1[:, 1, :, :], dim=1)
# Level 2
x_L2 = optical_flow_warp(torch.unsqueeze(x[:, 0, :, :], dim=1), optical_flow_L1_upscaled)
x_L2_res = torch.unsqueeze(x[:, 1, :, :], dim=1) - x_L2
x_L2 = torch.cat((x, x_L2, x_L2_res,optical_flow_L1_upscaled), 1)
input_L2 = self.conv_L2_1(x_L2)
buffer_1 = self.RDB2_1(input_L2)
buffer_2 = self.RDB2_2(buffer_1)
buffer = torch.cat((buffer_1, buffer_2), 1)
optical_flow_L2 = self.bottleneck_L2(buffer)
optical_flow_L2 = self.conv_L2_2(optical_flow_L2)
optical_flow_L2 = optical_flow_L2 + optical_flow_L1_upscaled
if self.is_training is True:
x_L2_res = optical_flow_warp(torch.unsqueeze(x_L2[:, 0, :, :], dim=1), optical_flow_L2) - torch.unsqueeze(x_L2[:, 1, :, :], dim=1)
# Level 3
x_L3 = optical_flow_warp(torch.unsqueeze(x[:, 0, :, :], dim=1), optical_flow_L2)
x_L3_res = torch.unsqueeze(x[:, 1, :, :], dim=1) - x_L3
x_L3 = torch.cat((x, x_L3, x_L3_res, optical_flow_L2), 1)
input_L3 = self.conv_L3_1(x_L3)
buffer_1 = self.RDB3_1(input_L3)
buffer_2 = self.RDB3_2(buffer_1)
buffer = torch.cat((buffer_1, buffer_2), 1)
optical_flow_L3 = self.bottleneck_L3(buffer)
optical_flow_L3 = self.conv_L3_2(optical_flow_L3)
optical_flow_L3 = self.shuffle(optical_flow_L3) + self.final_upsample(optical_flow_L2) # *4
if self.is_training is False:
return optical_flow_L3
if self.is_training is True:
return x_L1_res, x_L2_res, optical_flow_L1, optical_flow_L2, optical_flow_L3
class SRnet(nn.Module):
def __init__(self, upscale_factor, is_training):
super(SRnet, self).__init__()
self.conv = nn.Conv2d(35, 64, 3, 1, 1, bias=False)
self.RDB_1 = RDB(5, 64, 32)
self.RDB_2 = RDB(5, 64, 32)
self.RDB_3 = RDB(5, 64, 32)
self.RDB_4 = RDB(5, 64, 32)
self.RDB_5 = RDB(5, 64, 32)
self.bottleneck = nn.Conv2d(384, upscale_factor ** 2, 1, 1, 0, bias=False)
self.conv_2 = nn.Conv2d(upscale_factor ** 2, upscale_factor ** 2, 3, 1, 1, bias=True)
self.shuffle = nn.PixelShuffle(upscale_factor=upscale_factor)
self.is_training = is_training
def forward(self, x):
input = self.conv(x)
buffer_1 = self.RDB_1(input)
buffer_2 = self.RDB_2(buffer_1)
buffer_3 = self.RDB_3(buffer_2)
buffer_4 = self.RDB_4(buffer_3)
buffer_5 = self.RDB_5(buffer_4)
output = torch.cat((buffer_1, buffer_2, buffer_3, buffer_4, buffer_5, input), 1)
output = self.bottleneck(output)
output = self.conv_2(output)
output = self.shuffle(output)
return output
class SOFVSR(nn.Module):
def __init__(self, upscale_factor, is_training=False):
super(SOFVSR, self).__init__()
self.upscale_factor = upscale_factor
self.is_training = is_training
self.OFRnet = OFRnet(upscale_factor=upscale_factor, is_training=is_training)
self.SRnet = SRnet(upscale_factor=upscale_factor, is_training=is_training)
def forward(self, x):
input_01 = torch.cat((torch.unsqueeze(x[:, 0, :, :], dim=1), torch.unsqueeze(x[:, 1, :, :], dim=1)), 1)
input_21 = torch.cat((torch.unsqueeze(x[:, 2, :, :], dim=1), torch.unsqueeze(x[:, 1, :, :], dim=1)), 1)
if self.is_training is False:
flow_01_L3 = self.OFRnet(input_01)
flow_21_L3 = self.OFRnet(input_21)
if self.is_training is True:
res_01_L1, res_01_L2, flow_01_L1, flow_01_L2, flow_01_L3 = self.OFRnet(input_01)
res_21_L1, res_21_L2, flow_21_L1, flow_21_L2, flow_21_L3 = self.OFRnet(input_21)
draft_cube = x
for i in range(self.upscale_factor):
for j in range(self.upscale_factor):
draft_01 = optical_flow_warp(torch.unsqueeze(x[:, 0, :, :], dim=1), flow_01_L3[:, :, i::self.upscale_factor, j::self.upscale_factor]/self.upscale_factor)
draft_21 = optical_flow_warp(torch.unsqueeze(x[:, 2, :, :], dim=1), flow_21_L3[:, :, i::self.upscale_factor, j::self.upscale_factor]/self.upscale_factor)
draft_cube = torch.cat((draft_cube, draft_01, draft_21),1)
output = self.SRnet(draft_cube)
if self.is_training is False:
return torch.squeeze(output)
if self.is_training is True:
return (res_01_L1, res_01_L2, flow_01_L1, flow_01_L2, flow_01_L3), \
(res_21_L1, res_21_L2, flow_21_L1, flow_21_L2, flow_21_L3), output
| 8,788 | 46.766304 | 169 | py |
SOF-VSR | SOF-VSR-master/ACCV/data_utils.py | import numpy as np
from PIL import Image
import os
import torch
from torch.utils.data.dataset import Dataset
import math
import random
class TrainsetLoader(Dataset):
def __init__(self, trainset_dir, upscale_factor, patch_size, n_iters):
super(TrainsetLoader).__init__()
self.trainset_dir = trainset_dir
self.upscale_factor = upscale_factor
self.patch_size = patch_size
self.n_iters = n_iters
self.video_list = os.listdir(trainset_dir)
def __getitem__(self, idx):
idx_video = random.randint(0, self.video_list.__len__()-1)
idx_frame = random.randint(0, 28)
lr_dir = self.trainset_dir + '/' + self.video_list[idx_video] + '/lr_x' + str(self.upscale_factor) + '_BI'
hr_dir = self.trainset_dir + '/' + self.video_list[idx_video] + '/hr'
# read HR & LR frames
LR0 = Image.open(lr_dir + '/lr' + str(idx_frame) + '.png')
LR1 = Image.open(lr_dir + '/lr' + str(idx_frame + 1) + '.png')
LR2 = Image.open(lr_dir + '/lr' + str(idx_frame + 2) + '.png')
HR0 = Image.open(hr_dir + '/hr' + str(idx_frame) + '.png')
HR1 = Image.open(hr_dir + '/hr' + str(idx_frame + 1) + '.png')
HR2 = Image.open(hr_dir + '/hr' + str(idx_frame + 2) + '.png')
LR0 = np.array(LR0, dtype=np.float32) / 255.0
LR1 = np.array(LR1, dtype=np.float32) / 255.0
LR2 = np.array(LR2, dtype=np.float32) / 255.0
HR0 = np.array(HR0, dtype=np.float32) / 255.0
HR1 = np.array(HR1, dtype=np.float32) / 255.0
HR2 = np.array(HR2, dtype=np.float32) / 255.0
# extract Y channel for LR inputs
HR0 = rgb2y(HR0)
HR1 = rgb2y(HR1)
HR2 = rgb2y(HR2)
LR0 = rgb2y(LR0)
LR1 = rgb2y(LR1)
LR2 = rgb2y(LR2)
# crop patchs randomly
HR0, HR1, HR2, LR0, LR1, LR2 = random_crop(HR0, HR1, HR2, LR0, LR1, LR2, self.patch_size, self.upscale_factor)
HR0 = HR0[:, :, np.newaxis]
HR1 = HR1[:, :, np.newaxis]
HR2 = HR2[:, :, np.newaxis]
LR0 = LR0[:, :, np.newaxis]
LR1 = LR1[:, :, np.newaxis]
LR2 = LR2[:, :, np.newaxis]
HR = np.concatenate((HR0, HR1, HR2), axis=2)
LR = np.concatenate((LR0, LR1, LR2), axis=2)
# data augmentation
LR, HR = augmentation()(LR, HR)
return toTensor(LR), toTensor(HR)
def __len__(self):
return self.n_iters
class TestsetLoader(Dataset):
def __init__(self, dataset_dir, upscale_factor):
super(TestsetLoader).__init__()
self.dataset_dir = dataset_dir
self.upscale_factor = upscale_factor
self.frame_list = os.listdir(self.dataset_dir + '/lr_x' + str(self.upscale_factor))
def __getitem__(self, idx):
dir = self.dataset_dir + '/lr_x' + str(self.upscale_factor)
LR0 = Image.open(dir + '/' + 'lr_' + str(idx+1).rjust(2, '0') + '.png')
LR1 = Image.open(dir + '/' + 'lr_' + str(idx+2).rjust(2, '0') + '.png')
LR2 = Image.open(dir + '/' + 'lr_' + str(idx+3).rjust(2, '0') + '.png')
W, H = LR1.size
# H and W should be divisible by 2
W = int(W // 2) * 2
H = int(H // 2) * 2
LR0 = LR0.crop([0, 0, W, H])
LR1 = LR1.crop([0, 0, W, H])
LR2 = LR2.crop([0, 0, W, H])
LR1_bicubic = LR1.resize((W*self.upscale_factor, H*self.upscale_factor), Image.BICUBIC)
LR1_bicubic = np.array(LR1_bicubic, dtype=np.float32) / 255.0
LR0 = np.array(LR0, dtype=np.float32) / 255.0
LR1 = np.array(LR1, dtype=np.float32) / 255.0
LR2 = np.array(LR2, dtype=np.float32) / 255.0
# extract Y channel for LR inputs
LR0_y, _, _ = rgb2ycbcr(LR0)
LR1_y, _, _ = rgb2ycbcr(LR1)
LR2_y, _, _ = rgb2ycbcr(LR2)
LR0_y = LR0_y[:, :, np.newaxis]
LR1_y = LR1_y[:, :, np.newaxis]
LR2_y = LR2_y[:, :, np.newaxis]
LR = np.concatenate((LR0_y, LR1_y, LR2_y), axis=2)
LR = toTensor(LR)
# generate Cr, Cb channels using bicubic interpolation
_, SR_cb, SR_cr = rgb2ycbcr(LR1_bicubic)
return LR, SR_cb, SR_cr
def __len__(self):
return self.frame_list.__len__() - 2
class augmentation(object):
def __call__(self, input, target):
if random.random()<0.5:
input = input[:, ::-1, :]
target = target[:, ::-1, :]
if random.random()<0.5:
input = input[::-1, :, :]
target = target[::-1, :, :]
if random.random()<0.5:
input = input.transpose(1, 0, 2)
target = target.transpose(1, 0, 2)
return np.ascontiguousarray(input), np.ascontiguousarray(target)
def random_crop(HR0, HR1, HR2, LR0, LR1, LR2, patch_size_lr, upscale_factor):
h_hr, w_hr = HR0.shape
h_lr = h_hr // upscale_factor
w_lr = w_hr // upscale_factor
idx_h = random.randint(10, h_lr - patch_size_lr - 10)
idx_w = random.randint(10, w_lr - patch_size_lr - 10)
h_start_hr = (idx_h - 1) * upscale_factor
h_end_hr = (idx_h - 1 + patch_size_lr) * upscale_factor
w_start_hr = (idx_w - 1) * upscale_factor
w_end_hr = (idx_w - 1 + patch_size_lr) * upscale_factor
h_start_lr = idx_h - 1
h_end_lr = idx_h - 1 + patch_size_lr
w_start_lr = idx_w - 1
w_end_lr = idx_w - 1 + patch_size_lr
HR0 = HR0[h_start_hr:h_end_hr, w_start_hr:w_end_hr]
HR1 = HR1[h_start_hr:h_end_hr, w_start_hr:w_end_hr]
HR2 = HR2[h_start_hr:h_end_hr, w_start_hr:w_end_hr]
LR0 = LR0[h_start_lr:h_end_lr, w_start_lr:w_end_lr]
LR1 = LR1[h_start_lr:h_end_lr, w_start_lr:w_end_lr]
LR2 = LR2[h_start_lr:h_end_lr, w_start_lr:w_end_lr]
return HR0, HR1, HR2, LR0, LR1, LR2
def toTensor(img):
img = torch.from_numpy(img.transpose((2, 0, 1)))
return img
def rgb2ycbcr(img_rgb):
## the range of img_rgb should be (0, 1)
img_y = 0.257 * img_rgb[:, :, 0] + 0.504 * img_rgb[:, :, 1] + 0.098 * img_rgb[:, :, 2] + 16 / 255.0
img_cb = -0.148 * img_rgb[:, :, 0] - 0.291 * img_rgb[:, :, 1] + 0.439 * img_rgb[:, :, 2] + 128 / 255.0
img_cr = 0.439 * img_rgb[:, :, 0] - 0.368 * img_rgb[:, :, 1] - 0.071 * img_rgb[:, :, 2] + 128 / 255.0
return img_y, img_cb, img_cr
def ycbcr2rgb(img_ycbcr):
## the range of img_ycbcr should be (0, 1)
img_r = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) + 1.596 * (img_ycbcr[:, :, 2] - 128 / 255.0)
img_g = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) - 0.392 * (img_ycbcr[:, :, 1] - 128 / 255.0) - 0.813 * (img_ycbcr[:, :, 2] - 128 / 255.0)
img_b = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) + 2.017 * (img_ycbcr[:, :, 1] - 128 / 255.0)
img_r = img_r[:, :, np.newaxis]
img_g = img_g[:, :, np.newaxis]
img_b = img_b[:, :, np.newaxis]
img_rgb = np.concatenate((img_r, img_g, img_b), 2)
return img_rgb
def rgb2y(img_rgb):
## the range of img_rgb should be (0, 1)
image_y = 0.257 * img_rgb[:, :, 0] + 0.504 * img_rgb[:, :, 1] + 0.098 * img_rgb[:, :, 2] +16 / 255.0
return image_y
| 6,999 | 40.666667 | 143 | py |
SOF-VSR | SOF-VSR-master/ACCV/train.py | import os
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
from modules import SOFVSR, optical_flow_warp
import argparse
from data_utils import TrainsetLoader
import numpy as np
import matplotlib.pyplot as plt
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--upscale_factor", type=int, default=4)
parser.add_argument('--gpu_mode', type=bool, default=False)
parser.add_argument('--patch_size', type=int, default=32)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--n_iters', type=int, default=300000, help='number of iterations to train')
parser.add_argument('--trainset_dir', type=str, default='data/train')
return parser.parse_args()
def main(cfg):
use_gpu = cfg.gpu_mode
net = SOFVSR(cfg.upscale_factor, is_training=True)
if use_gpu:
net.cuda()
cudnn.benchmark = True
train_set = TrainsetLoader(cfg.trainset_dir, cfg.upscale_factor, cfg.patch_size, cfg.n_iters*cfg.batch_size)
train_loader = DataLoader(train_set, num_workers=4, batch_size=cfg.batch_size, shuffle=True)
# train
optimizer = torch.optim.Adam(net.parameters(), lr=1e-4)
criterion_L2 = torch.nn.MSELoss()
if use_gpu:
criterion_L2 = criterion_L2.cuda()
milestones = [50000, 100000, 150000, 200000, 250000]
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=0.5)
loss_list = []
for idx_iter, (LR, HR) in enumerate(train_loader):
scheduler.step()
LR, HR = Variable(LR), Variable(HR)
if use_gpu:
LR = LR.cuda()
HR = HR.cuda()
(res_01_L1, res_01_L2, flow_01_L1, flow_01_L2, flow_01_L3), (
res_21_L1, res_21_L2, flow_21_L1, flow_21_L2, flow_21_L3), SR = net(LR)
warped_01 = optical_flow_warp(torch.unsqueeze(HR[:, 0, :, :], dim=1), flow_01_L3)
warped_21 = optical_flow_warp(torch.unsqueeze(HR[:, 2, :, :], dim=1), flow_21_L3)
# losses
loss_SR = criterion_L2(SR, torch.unsqueeze(HR[:, 1, :, :], 1))
loss_OFR_1 = 1 * (criterion_L2(warped_01, torch.unsqueeze(HR[:, 1, :, :], 1)) + 0.01 * L1_regularization(flow_01_L3)) + \
0.25 * (torch.mean(res_01_L2 ** 2) + 0.01 * L1_regularization(flow_01_L2)) + \
0.125 * (torch.mean(res_01_L1 ** 2) + 0.01 * L1_regularization(flow_01_L1))
loss_OFR_2 = 1 * (criterion_L2(warped_21, torch.unsqueeze(HR[:, 1, :, :], 1)) + 0.01 * L1_regularization(flow_21_L3)) + \
0.25 * (torch.mean(res_21_L2 ** 2) + 0.01 * L1_regularization(flow_21_L2)) + \
0.125 * (torch.mean(res_21_L1 ** 2) + 0.01 * L1_regularization(flow_21_L1))
loss = loss_SR + 0.01 * (loss_OFR_1 + loss_OFR_2) / 2
loss_list.append(loss.data.cpu())
optimizer.zero_grad()
loss.backward()
optimizer.step()
# save checkpoint
if idx_iter % 5000 == 0:
print('Iteration---%6d, loss---%f' % (idx_iter + 1, np.array(loss_list).mean()))
torch.save(net.state_dict(), 'log/BI_x' + str(cfg.upscale_factor) + '_iter' + str(idx_iter) + '.pth')
loss_list = []
def L1_regularization(image):
b, _, h, w = image.size()
reg_x_1 = image[:, :, 0:h-1, 0:w-1] - image[:, :, 1:, 0:w-1]
reg_y_1 = image[:, :, 0:h-1, 0:w-1] - image[:, :, 0:h-1, 1:]
reg_L1 = torch.abs(reg_x_1) + torch.abs(reg_y_1)
return torch.sum(reg_L1) / (b*(h-1)*(w-1))
if __name__ == '__main__':
cfg = parse_args()
main(cfg)
| 3,641 | 38.16129 | 129 | py |
SOF-VSR | SOF-VSR-master/ACCV/demo_Vid4.py | import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
from data_utils import TestsetLoader, ycbcr2rgb
import numpy as np
from torchvision.transforms import ToPILImage
import os
import argparse
from modules import SOFVSR
import math
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--video_name", type=str, default="calendar")
parser.add_argument("--upscale_factor", type=int, default=4)
parser.add_argument('--gpu_mode', type=bool, default=False)
parser.add_argument('--chop_forward', type=bool, default=False)
return parser.parse_args()
def chop_forward(x, model, scale, shave=16, min_size=5000, nGPUs=1):
b, c, h, w = x.size()
h_half, w_half = h // 2, w // 2
h_size, w_size = h_half + shave, w_half + shave
inputlist = [
x[:, :, 0:h_size, 0:w_size],
x[:, :, 0:h_size, (w - w_size):w],
x[:, :, (h - h_size):h, 0:w_size],
x[:, :, (h - h_size):h, (w - w_size):w]]
if w_size * h_size < min_size:
outputlist = []
for i in range(0, 4, nGPUs):
input_batch = torch.cat(inputlist[i:(i + nGPUs)], dim=0)
output_batch = model(input_batch)
outputlist.append(output_batch.data)
else:
outputlist = [
chop_forward(patch, model, scale, shave, min_size, nGPUs) \
for patch in inputlist]
h, w = scale * h, scale * w
h_half, w_half = scale * h_half, scale * w_half
h_size, w_size = scale * h_size, scale * w_size
shave *= scale
output = Variable(x.data.new(h, w), volatile=True)
output[0:h_half, 0:w_half] = outputlist[0][0:h_half, 0:w_half]
output[0:h_half, w_half:w] = outputlist[1][0:h_half, (w_size - w + w_half):w_size]
output[h_half:h, 0:w_half] = outputlist[2][(h_size - h + h_half):h_size, 0:w_half]
output[h_half:h, w_half:w] = outputlist[3][(h_size - h + h_half):h_size, (w_size - w + w_half):w_size]
return output
def main(cfg):
video_name = cfg.video_name
upscale_factor = cfg.upscale_factor
use_gpu = cfg.gpu_mode
test_set = TestsetLoader('data/test/'+ video_name, upscale_factor)
test_loader = DataLoader(test_set, num_workers=1, batch_size=1, shuffle=False)
net = SOFVSR(upscale_factor=upscale_factor)
ckpt = torch.load('./log/SOFVSR_x' + str(upscale_factor) + '.pth')
net.load_state_dict(ckpt)
if use_gpu:
net.cuda()
for idx_iter, (LR_y_cube, SR_cb, SR_cr) in enumerate(test_loader):
LR_y_cube = Variable(LR_y_cube)
if use_gpu:
LR_y_cube = LR_y_cube.cuda()
if cfg.chop_forward:
# crop borders to ensure each patch can be divisible by 2
_, _, h, w = LR_y_cube.size()
h = int(h//16) * 16
w = int(w//16) * 16
LR_y_cube = LR_y_cube[:, :, :h, :w]
SR_cb = SR_cb[:, :h * upscale_factor, :w * upscale_factor]
SR_cr = SR_cr[:, :h * upscale_factor, :w * upscale_factor]
SR_y = chop_forward(LR_y_cube, net, cfg.upscale_factor)
else:
SR_y = net(LR_y_cube)
else:
SR_y = net(LR_y_cube)
SR_y = np.array(SR_y.data)
SR_y = SR_y[np.newaxis, :, :]
SR_ycbcr = np.concatenate((SR_y, SR_cb, SR_cr), axis=0).transpose(1,2,0)
SR_rgb = ycbcr2rgb(SR_ycbcr) * 255.0
SR_rgb = np.clip(SR_rgb, 0, 255)
SR_rgb = ToPILImage()(SR_rgb.astype(np.uint8))
if not os.path.exists('results/' + video_name):
os.mkdir('results/' + video_name)
SR_rgb.save('results/'+video_name+'/sr_'+ str(idx_iter+2).rjust(2,'0') + '.png')
if __name__ == '__main__':
cfg = parse_args()
main(cfg) | 3,762 | 37.010101 | 106 | py |
SOF-VSR | SOF-VSR-master/ACCV/metrics/evaluation.py | import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from math import exp
from math import log10
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)])
return gauss / gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average=True):
mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)
mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq
sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq
sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2
C1 = 0.01 ** 2
C2 = 0.03 ** 2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
def ssim(img1, img2, upscale_factor, window_size=11, size_average=True):
_, channel, h, w = img1.size()
img1_ = img1[:, :, 6 + upscale_factor: h - 6 - upscale_factor, 6 + upscale_factor: w - 6 - upscale_factor]
img2_ = img2[:, :, 6 + upscale_factor: h - 6 - upscale_factor, 6 + upscale_factor: w - 6 - upscale_factor]
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1_.get_device())
window = window.type_as(img1_)
return _ssim(img1_, img2_, window, window_size, channel, size_average)
def psnr(img1, img2, upscale_factor):
_, _, h, w = img1.size()
img1_ = img1[:, :, 6 + upscale_factor: h - 6 - upscale_factor, 6 + upscale_factor: w - 6 - upscale_factor]
img2_ = img2[:, :, 6 + upscale_factor: h - 6 - upscale_factor, 6 + upscale_factor: w - 6 - upscale_factor]
mse = torch.sum((img1_ - img2_) ** 2) / img1_.numel()
psnr = 10 * log10(1 / mse)
return psnr
| 2,423 | 40.793103 | 114 | py |
finmag | finmag-master/doc/conf.py | # -*- coding: utf-8 -*-
#
# Finmag documentation build configuration file, created by
# sphinx-quickstart on Thu Feb 23 12:34:28 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('modules/'))
sys.path.insert(0, os.path.abspath('../src/finmag/'))
sys.path.insert(0, os.path.abspath('../src/finmag/sim/'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.1'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode',
'ipynb_sphinxext']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Finmag'
copyright = u'2012, Finmag-team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'tailored_tutorials']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'finmag_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['.']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'A FEniCS based Micromagnetics Solver'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logo.jpeg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Finmagdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': '',
}
# Add macros to LaTeX preamble.
# This will work with make latexpdf, but not make html.
# It is not possible to access the Mathjax configuration from sphynx.
with open("latex_macros.sty") as f:
for macro in f:
latex_elements["preamble"] += macro + "\n"
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Finmag.tex', u'Finmag Documentation',
u'Finmag-team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'finmag', u'Finmag Documentation',
[u'Finmag-team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Finmag', u'Finmag Documentation',
u'Finmag-team', 'Finmag', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| 8,297 | 31.541176 | 80 | py |
CVD-Physiological-Measurement | CVD-Physiological-Measurement-master/test.py | ########################################################
# This is an example of the training and test procedure
# You need to adjust the training and test dataloader based on your data
# CopyRight @ Xuesong Niu
########################################################
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import os
import shutil
import sys
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import scipy.io as sio
import torchvision.models as models
from torch.optim.lr_scheduler import MultiStepLR
sys.path.append('..');
from utils.database.Pixelmap import PixelMap_fold_STmap
from utils.model.model_disentangle import HR_disentangle_cross;
from utils.loss.loss_cross import Cross_loss;
from utils.loss.loss_r import Neg_Pearson;
from utils.loss.loss_SNR import SNR_loss;
batch_size_num = 2;
epoch_num = 70;
learning_rate = 0.001;
test_batch_size = 5;
toTensor = transforms.ToTensor();
resize = transforms.Resize(size = (320,320));
#######################################################
lambda_hr = 1;
lambda_img = 0.0000025;
lambda_low_rank = 10;
lambda_ecg = 0.02;
lambda_snr = 1;
lambda_cross_fhr = 0.000005;
lambda_cross_fn = 0.000005;
lambda_cross_hr = 1;
video_length = 300;
########################################################################
### This is only a simple toy example dataloader (utils/database/PixelMap.py)
### This dataloader do not include the cross-validation division and training/test division.
### You need to adjust your dataloader based on your own data.
### parameter: root_dir: location of the MSTmaps
### VerticalFlip: random vertical flip for data augmentation
########################################################################
train_dataset = PixelMap_fold_STmap(root_dir='./MSTmaps/',
Training = True, transform=transforms.Compose([resize, toTensor]), VerticalFlip = True,
video_length = video_length);
train_loader = DataLoader(train_dataset, batch_size=batch_size_num,
shuffle=True, num_workers=4);
test_dataset = PixelMap_fold_STmap(root_dir='./MSTmaps/',
Training = False, transform=transforms.Compose([resize, toTensor]), VerticalFlip = False,
video_length = video_length);
test_loader = DataLoader(test_dataset, batch_size=test_batch_size,
shuffle=False, num_workers=4);
#########################################################################
#########################################################################
#########################################################################
net = HR_disentangle_cross();
net.cuda();
#########################################################################
lossfunc_HR = nn.L1Loss();
lossfunc_img = nn.L1Loss();
lossfunc_cross = Cross_loss(lambda_cross_fhr = lambda_cross_fhr, lambda_cross_fn = lambda_cross_fn, lambda_cross_hr = lambda_cross_hr);
lossfunc_ecg = Neg_Pearson(downsample_mode = 0);
lossfunc_SNR = SNR_loss(clip_length = video_length, loss_type = 7);
optimizer = torch.optim.Adam([{'params': net.parameters(), 'lr': 0.0005}]);
def train():
net.train();
train_loss = 0;
for batch_idx, (data, bpm, fps, bvp, idx) in enumerate(train_loader):
data = Variable(data);
bvp = Variable(bvp);
bpm = Variable(bpm.view(-1,1));
fps = Variable(fps.view(-1,1));
data, bpm = data.cuda(), bpm.cuda();
fps = fps.cuda()
bvp = bvp.cuda()
print(bvp)
feat_hr, feat_n, output, img_out, feat_hrf1, feat_nf1, hrf1, idx1, feat_hrf2, feat_nf2, hrf2, idx2, ecg, ecg1, ecg2 = net(data);
loss_hr = lossfunc_HR(output, bpm)*lambda_hr;
loss_img = lossfunc_img(data, img_out)*lambda_img;
loss_ecg = lossfunc_ecg(ecg, bvp)*lambda_ecg;
print(loss_ecg)
loss_SNR, tmp = lossfunc_SNR(ecg, bpm, fps, pred = output, flag = None)*lambda_snr;
loss = loss_hr + loss_ecg + loss_img + loss_SNR;
loss_cross, loss_hr1, loss_hr2, loss_fhr1, loss_fhr2, loss_fn1, loss_fn2, loss_hr_dis1, loss_hr_dis2 = lossfunc_cross(feat_hr, feat_n, output,
feat_hrf1, feat_nf1,
hrf1, idx1,
feat_hrf2, feat_nf2,
hrf2, idx2, bpm)
loss = loss + loss_cross;
train_loss += loss.item();
optimizer.zero_grad()
loss.backward()
optimizer.step();
print('Train epoch: {:.0f}, it: {:.0f}, loss: {:.4f}, loss_hr: {:.4f}, loss_img: {:.4f}, loss_cross: {:.4f}, loss_snr: {:.4f}'.format(epoch, batch_idx,
loss, loss_hr, loss_img, loss_cross, loss_SNR));
def test():
net.eval()
test_loss = 0;
for (data, hr, fps, bvp, idx) in test_loader:
data = Variable(data);
hr = Variable(hr.view(-1,1));
data, hr = data.cuda(), hr.cuda();
feat_hr, feat_n, output, img_out, feat_hrf1, feat_nf1, hrf1, idx1, feat_hrf2, feat_nf2, hrf2, idx2, ecg, ecg1, ecg2 = net(data);
loss = lossfunc_HR(output, hr);
test_loss += loss.item();
begin_epoch = 1;
scheduler = MultiStepLR(optimizer, milestones=[30,80], gamma=0.5)
for epoch in range(begin_epoch, epoch_num + 1):
if epoch > 20:
train_dataset.transform = transforms.Compose([resize, toTensor]);
train_dataset.VerticalFlip = False;
train_loader = DataLoader(train_dataset, batch_size=batch_size_num,
shuffle=True, num_workers=4);
train();
test();
| 6,206 | 39.835526 | 159 | py |
CVD-Physiological-Measurement | CVD-Physiological-Measurement-master/utils/database/Pixelmap.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import os
import shutil
import numpy as np
from torch.utils.data import Dataset, DataLoader
import scipy.io as sio
from PIL import Image
import torchvision.transforms.functional as transF
import random;
# from skimage import io, transform
class PixelMap_fold_STmap(Dataset):
def __init__(self, root_dir, Training=True, transform=None, VerticalFlip = False, video_length = 300):
self.train = Training;
self.root_dir = root_dir;
self.transform = transform;
self.video_length = video_length;
self.VerticalFlip = VerticalFlip;
def __len__(self):
count = 0;
for fn in os.listdir(self.root_dir):
count = count + 1;
return count;
def __getitem__(self, idx):
dir_idx = idx + 1;
img_name1 = str(dir_idx) + '/img_rgb.png';
img_name2 = str(dir_idx) + '/img_yuv.png';
img_path1 = os.path.join(self.root_dir, img_name1);
img_path2 = os.path.join(self.root_dir, img_name2);
feature_map1 = Image.open(img_path1).convert('RGB');
feature_map2 = Image.open(img_path2).convert('RGB');
if self.VerticalFlip:
if random.random() < 0.5:
feature_map1 = transF.vflip(feature_map1);
feature_map2 = transF.vflip(feature_map2);
if self.transform:
feature_map1 = self.transform(feature_map1)
feature_map2 = self.transform(feature_map2)
feature_map = torch.cat((feature_map1, feature_map2), dim = 0);
bpm_path = self.root_dir + str(dir_idx) + '/bpm.mat';
bpm = sio.loadmat(bpm_path)['bpm'];
bpm = bpm.astype('float32');
fps_path = self.root_dir + str(dir_idx) + '/fps.mat';
fps = sio.loadmat(fps_path)['fps'];
fps = fps.astype('float32');
bvp_path = self.root_dir + str(dir_idx) + '/bvp.mat';
bvp = sio.loadmat(bvp_path)['bvp'];
bvp = bvp.astype('float32');
bvp = bvp[0];
return (feature_map, bpm, fps, bvp, idx); | 2,154 | 30.231884 | 106 | py |
CVD-Physiological-Measurement | CVD-Physiological-Measurement-master/utils/loss/loss_cross.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable, Function
import os
import shutil
import numpy as np
import scipy.io as sio
from scipy.stats import norm
class Cross_loss(nn.Module):
def __init__(self, lambda_cross_fhr = 0.000005, lambda_cross_fn = 0.000005, lambda_cross_hr = 1):
super(Cross_loss, self).__init__()
self.lossfunc_HR = nn.L1Loss();
self.lossfunc_feat = nn.L1Loss();
self.lambda_fhr = lambda_cross_fhr;
self.lambda_fn = lambda_cross_fn;
self.lambda_hr = lambda_cross_hr;
def forward(self, feat_hr, feat_n, hr, feat_hrf1, feat_nf1, hrf1, idx1, feat_hrf2, feat_nf2, hrf2, idx2, gt):
loss_hr1 = self.lossfunc_HR(hrf1, gt[idx1, :]);
loss_hr2 = self.lossfunc_HR(hrf2, gt[idx2, :]);
loss_fhr1 = self.lossfunc_feat(feat_hrf1, feat_hr[idx1, :, :, :]);
loss_fhr2 = self.lossfunc_feat(feat_hrf2, feat_hr[idx2, :, :, :]);
loss_fn1 = self.lossfunc_feat(feat_nf1, feat_n[idx1, :, :, :]);
loss_fn2 = self.lossfunc_feat(feat_nf2, feat_n[idx2, :, :, :]);
loss_hr_dis1 = self.lossfunc_HR(hrf1, hr[idx1, :]);
loss_hr_dis2 = self.lossfunc_HR(hrf2, hr[idx2, :]);
loss = self.lambda_hr * (loss_hr1 + loss_hr2) / 2 + self.lambda_fhr * (loss_fhr1 + loss_fhr2) / 2 + self.lambda_fn * (loss_fn1 + loss_fn2) / 2;
return loss, loss_hr1, loss_hr2, loss_fhr1, loss_fhr2, loss_fn1, loss_fn2, loss_hr_dis1, loss_hr_dis2
| 1,533 | 36.414634 | 151 | py |
CVD-Physiological-Measurement | CVD-Physiological-Measurement-master/utils/loss/loss_SNR.py | import math
import torch
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
class SNR_loss(nn.Module):
def __init__(self, clip_length = 300, delta = 3, loss_type = 1, use_wave = False):
super(SNR_loss, self).__init__()
self.clip_length = clip_length;
self.time_length = 300;
self.delta = delta;
self.delta_distribution = [0.4, 0.25, 0.05];
self.low_bound = 40;
self.high_bound = 150;
self.bpm_range = torch.arange(self.low_bound, self.high_bound, dtype = torch.float).cuda()
self.bpm_range = self.bpm_range / 60.0;
self.pi = 3.14159265;
two_pi_n = Variable(2 * self.pi * torch.arange(0, self.time_length, dtype = torch.float))
hanning = Variable(torch.from_numpy(np.hanning(self.time_length)).type(torch.FloatTensor), requires_grad=True).view(1, -1)
self.two_pi_n = two_pi_n.cuda();
self.hanning = hanning.cuda();
self.cross_entropy = nn.CrossEntropyLoss();
self.nll = nn.NLLLoss();
self.l1 = nn.L1Loss();
self.loss_type = loss_type;
self.eps = 0.0001;
self.lambda_l1 = 0.1;
self.use_wave = use_wave;
def forward(self, wave, gt, fps, pred = None, flag = None): # all variable operation
if flag is not None:
idx = flag.eq(1);
wave = wave[idx,:];
gt = gt[idx,:];
fps = fps[idx,:];
pred = pred[idx,:];
if(gt.shape[0] == 0):
loss = 0.0;
return loss, 0;
hr = torch.mul(gt, fps);
hr = hr*60/self.clip_length;
hr[hr.ge(self.high_bound)] = self.high_bound-1;
hr[hr.le(self.low_bound)] = self.low_bound;
if pred is not None:
pred = torch.mul(pred, fps);
pred = pred * 60 / self.clip_length;
batch_size = wave.shape[0];
f_t = self.bpm_range / fps;
preds = wave * self.hanning;
preds = preds.view(batch_size, 1, -1);
f_t = f_t.view(batch_size, -1, 1);
tmp = self.two_pi_n.repeat(batch_size, 1);
tmp = tmp.view(batch_size, 1, -1)
complex_absolute = torch.sum(preds * torch.sin(f_t*tmp), dim=-1) ** 2 \
+ torch.sum(preds * torch.cos(f_t*tmp), dim=-1) ** 2
target = hr - self.low_bound;
target = target.type(torch.long).view(batch_size);
whole_max_val, whole_max_idx = complex_absolute.max(1)
whole_max_idx = whole_max_idx + self.low_bound;
if self.loss_type == 1:
loss = self.cross_entropy(complex_absolute, target);
elif self.loss_type == 7:
norm_t = (torch.ones(batch_size).cuda() / torch.sum(complex_absolute, dim = 1));
norm_t = norm_t.view(-1,1);
complex_absolute = complex_absolute * norm_t;
loss = self.cross_entropy(complex_absolute, target);
idx_l = target - self.delta;
idx_l[idx_l.le(0)] = 0;
idx_r = target + self.delta;
idx_r[idx_r.ge(self.high_bound - self.low_bound - 1)] = self.high_bound - self.low_bound - 1;
loss_snr = 0.0;
for i in range(0, batch_size):
loss_snr = loss_snr + 1 - torch.sum(complex_absolute[i, idx_l[i]:idx_r[i]]);
loss_snr = loss_snr / batch_size;
loss = loss + loss_snr;
return loss, whole_max_idx
| 3,482 | 31.858491 | 130 | py |
CVD-Physiological-Measurement | CVD-Physiological-Measurement-master/utils/loss/loss_r.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable, Function
import os
import shutil
import numpy as np
import scipy.io as sio
from scipy.stats import norm
class Neg_Pearson(nn.Module): # Pearson range [-1, 1] so if < 0, abs|loss| ; if >0, 1- loss
def __init__(self, downsample_mode = 0):
super(Neg_Pearson, self).__init__()
self.downsample_mode = downsample_mode;
return
def forward(self, preds, labels): # all variable operation
loss = 0.0
for i in range(preds.shape[0]):
a = preds[i,:];
b = labels[i,:];
if self.downsample_mode == 1:
b = b[0::2]
sum_x = torch.sum(a) # x
sum_y = torch.sum(b) # y
sum_xy = torch.sum(torch.mul(a, b)) # xy
sum_x2 = torch.sum(torch.mul(a, a)) # x^2
sum_y2 = torch.sum(torch.mul(b, b)) # y^2
N = preds.shape[1]
pearson = (N * sum_xy - sum_x * sum_y)/(torch.sqrt((N*sum_x2-sum_x*sum_x)*(N*sum_y2-sum_y*sum_y)))
loss += 1 - pearson
if not preds.shape[0] == 0:
loss = loss / preds.shape[0]
return loss | 1,249 | 29.487805 | 110 | py |
CVD-Physiological-Measurement | CVD-Physiological-Measurement-master/utils/model/resnet.py | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
from torch.autograd import Variable
import torch
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, ave_size=7, num_output = 1):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(ave_size, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.num_output = num_output
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
conv1 = self.maxpool(x)
conv2 = self.layer1(conv1)
conv3 = self.layer2(conv2) # B*128*28*28
conv4 = self.layer3(conv3) # B*256*14*14
conv5 = self.layer4(conv4) # B*512*7*7
x = self.avgpool(conv5)
feat = x.view(x.size(0), -1)
x = self.fc(feat)
if self.num_output == 34:
return x, conv3, conv4
else:
return x;
class ResNet_layer4(nn.Module):
def __init__(self, block, layers, num_classes=1000, ave_size=7):
self.inplanes = 256
super(ResNet_layer4, self).__init__()
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(ave_size, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
conv5 = self.layer4(x) # B*512*7*7
x = self.avgpool(conv5)
feat = x.view(x.size(0), -1)
x = self.fc(feat)
return x
class ResNet_layer34(nn.Module):
def __init__(self, block, layers, num_classes=1000, ave_size=7):
self.inplanes = 128
super(ResNet_layer34, self).__init__()
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(ave_size, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
conv4 = self.layer3(x) # B*256*14*14
conv5 = self.layer4(conv4) # B*512*7*7
x = self.avgpool(conv5)
feat = x.view(x.size(0), -1)
x = self.fc(feat)
return x, feat
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(**kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
class ResNet_part(nn.Module):
def __init__(self, block, layers, num_classes=1000, ave_size=7, num_output = 1):
self.inplanes = 64
super(ResNet_part, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
conv1 = self.maxpool(x)
conv2 = self.layer1(conv1)
conv3 = self.layer2(conv2)
return conv3
def resnet18_part(**kwargs):
model = ResNet_part(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
class ResNet_part1(nn.Module):
def __init__(self, block, layers, num_classes=1000, ave_size=7, num_output = 1):
self.inplanes = 64
super(ResNet_part1, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
conv1 = self.maxpool(x)
conv2 = self.layer1(conv1)
return conv2
def resnet18_part1(**kwargs):
model = ResNet_part1(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def resnet34_part(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet_part(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
ckp_path = '../model/pretrain/step_390000.model'
checkpoint = torch.load(ckp_path)
pretrained_dict = checkpoint['net_state_dict'];
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model
class ResNet_part_cov3(nn.Module):
def __init__(self, block, layers, num_classes=1000, ave_size=7, num_output = 1):
self.inplanes = 64
super(ResNet_part_cov3, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
# print(self.inplanes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
conv1 = self.maxpool(x)
conv2 = self.layer1(conv1)
conv3 = self.layer2(conv2)
return conv3
def resnet34_part_cov3(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet_part_cov3(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
ckp_path = '../model/pretrain/step_390000.model'
checkpoint = torch.load(ckp_path)
pretrained_dict = checkpoint['net_state_dict'];
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model | 16,942 | 33.577551 | 87 | py |
CVD-Physiological-Measurement | CVD-Physiological-Measurement-master/utils/model/model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import os, sys
import shutil
import numpy as np
import scipy.io as sio
sys.path.append('..');
from utils.model.resnet import resnet18, resnet_small;
from utils.model.resnet_stconv import resnet18_stconv;
import time
| 353 | 16.7 | 54 | py |
CVD-Physiological-Measurement | CVD-Physiological-Measurement-master/utils/model/model_disentangle.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import os, sys
import shutil
import numpy as np
import scipy.io as sio
sys.path.append('..');
from utils.model.resnet import resnet18, resnet18_part;
import time
class ResidualBlock(nn.Module):
"""Residual Block."""
def __init__(self, dim_in, dim_out):
super(ResidualBlock, self).__init__()
self.main = nn.Sequential(
nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(dim_out, affine=True),
nn.ReLU(inplace=True),
nn.Conv2d(dim_out, dim_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(dim_out, affine=True))
def forward(self, x):
return x + self.main(x)
class Generator(nn.Module):
def __init__(self, conv_dim=64, repeat_num=2, img_mode = 3, up_time = 3):
super(Generator, self).__init__()
curr_dim = conv_dim;
# Bottleneck
layers = []
for i in range(repeat_num):
layers.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim))
# Up-Sampling
for i in range(up_time):
layers.append(nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=3, stride=2, padding=1, output_padding = 1, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim//2, affine=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = curr_dim // 2
self.main = nn.Sequential(*layers)
layers = []
if img_mode == 3:
layers.append(nn.Conv2d(curr_dim, 6, kernel_size=7, stride=1, padding=3, bias=False))
elif img_mode == 1:
layers.append(nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False))
elif img_mode == 4:
layers.append(nn.Conv2d(curr_dim, 9, kernel_size=7, stride=1, padding=3, bias=False))
elif img_mode == 0:
layers.append(nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.Tanh())
self.img_reg = nn.Sequential(*layers)
def forward(self, x):
features = self.main(x)
x = self.img_reg(features);
return x
class HR_estimator_multi_task_STmap(nn.Module):
def __init__(self, video_length = 300):
super(HR_estimator_multi_task_STmap, self).__init__()
self.extractor = resnet18(pretrained=False, num_classes=1, num_output=34);
self.extractor.avgpool = nn.AdaptiveAvgPool2d((1,1))
self.extractor.conv1 = nn.Conv2d(6, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.feature_pool = nn.AdaptiveAvgPool2d((1, 10));
self.upsample1 = nn.Sequential(
nn.ConvTranspose2d(in_channels=256, out_channels=64, kernel_size=[1, 3], stride=[1, 3],
padding=[0, 0]), # [1, 128, 32]
nn.BatchNorm2d(64),
nn.ELU(),
)
self.upsample2 = nn.Sequential(
nn.ConvTranspose2d(in_channels=64, out_channels=32, kernel_size=[1, 5], stride=[1, 5],
padding=[0, 0]), # [1, 128, 32]
nn.BatchNorm2d(32),
nn.ELU(),
)
self.video_length = video_length;
self.poolspa = nn.AdaptiveAvgPool2d((1, int(self.video_length)))
self.ecg_conv = nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0)
def forward(self, x):
hr, feat_out, feat = self.extractor(x);
x = self.feature_pool(feat);
x = self.upsample1(x);
x = self.upsample2(x);
x = self.poolspa(x);
x = self.ecg_conv(x)
ecg = x.view(-1, int(self.video_length));
return hr, ecg, feat_out;
class HR_disentangle(nn.Module):
def __init__(self, video_length = 300, decov_num = 1):
super(HR_disentangle, self).__init__()
self.extractor = HR_estimator_multi_task_STmap();
self.Noise_encoder = resnet18_part()
self.Noise_encoder.conv1 = nn.Conv2d(6, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.decoder = Generator(conv_dim=128, repeat_num=decov_num, img_mode = 3)
self.video_length = video_length;
self.poolspa = nn.AdaptiveAvgPool2d((1, int(self.video_length/2)))
self.ecg_conv = nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0)
def forward(self, img):
hr, ecg, feat_hr = self.extractor(img);
feat_n = self.Noise_encoder(img);
feat = feat_hr + feat_n;
img = self.decoder(feat);
return feat_hr, feat_n, hr, img, ecg
class HR_disentangle_cross(nn.Module):
def __init__(self, video_length = 300):
super(HR_disentangle_cross, self).__init__()
self.encoder_decoder = HR_disentangle(decov_num = 1);
self.video_length = video_length;
self.poolspa = nn.AdaptiveAvgPool2d((1, int(self.video_length)))
self.ecg_conv = nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0)
def forward(self, img):
batch_size = img.size(0);
feat_hr, feat_n, hr, img_out, ecg = self.encoder_decoder(img);
idx1 = torch.randint(batch_size, (batch_size,))
idx2 = torch.randint(batch_size, (batch_size,))
idx1 = idx1.long();
idx2 = idx2.long();
feat_hr1 = feat_hr[idx1, :, :, :];
feat_hr2 = feat_hr[idx2, :, :, :];
feat_n1 = feat_n[idx1, :, :, :];
feat_n2 = feat_n[idx2, :, :, :];
featf1 = feat_hr1 + feat_n2;
featf2 = feat_hr2 + feat_n1;
imgf1 = self.encoder_decoder.decoder(featf1);
imgf2 = self.encoder_decoder.decoder(featf2);
feat_hrf1, feat_nf2, hrf1, img_outf1, ecg1 = self.encoder_decoder(imgf1);
feat_hrf2, feat_nf1, hrf2, img_outf2, ecg2 = self.encoder_decoder(imgf2);
return feat_hr, feat_n, hr, img_out, feat_hrf1, feat_nf1, hrf1, idx1, feat_hrf2, feat_nf2, hrf2, idx2, ecg, ecg1, ecg2
| 5,936 | 34.76506 | 136 | py |
CREPE | CREPE-master/crepe_prod_eval_cyclip.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import ast
import argparse
import logging
import os
from PIL import Image, ImageFile
from dataclasses import dataclass
from time import time
import json
import torch
import torchvision.transforms.functional as TF
from pkgs.openai.clip import load
from torch import nn
from torch.utils.data import DataLoader, Dataset
import numpy as np
import pandas as pd
from crepe_eval_utils import BaseCsvDataset, get_one2many_rank, get_one2many_metrics, DataInfo
from crepe_params import setup_args
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
def collator(batch):
texts = []
images = torch.stack([x[0] for x in batch], dim=0)
texts = torch.cat([x[1] for x in batch], dim=0)
attention_masks = torch.cat([x[2] for x in batch], dim=0)
return images, texts, attention_masks
### DATASET CONSTRUCTION
class CsvDataset(BaseCsvDataset):
def __init__(self, input_filename, args, processor):
super().__init__(input_filename, args)
self.processor = processor
def __getitem__(self, idx):
raw_image = self.get_image_by_id(self.images[idx])
if self.crop:
raw_image = TF.crop(raw_image, self.ys[idx], self.xs[idx], self.heights[idx], self.widths[idx])
image = torch.tensor(self.processor.process_image(raw_image))
return_dict = self.processor.process_text([str(self.captions[idx])] + list(self.hard_negs[idx]))
input_ids = return_dict['input_ids']
attention_mask = return_dict['attention_mask']
return image, input_ids, attention_mask
def get_data(args, retrieval_data_path, processor):
# Get CSVDataset
input_filename = retrieval_data_path
dataset = CsvDataset(
input_filename,
args,
processor)
num_samples = len(dataset)
sampler = None
shuffle=False
dataloader = DataLoader(
dataset,
batch_size=16,
shuffle=shuffle,
num_workers=1,
pin_memory=True,
sampler=sampler,
drop_last=False,
collate_fn=collator
)
dataloader.num_samples = num_samples
dataloader.num_batches = len(dataloader)
return DataInfo(dataloader)
### EVALUATION
def evaluate(model, data, complexity, negative_type):
metrics = {}
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dataloader = data.dataloader
# num_samples = 0
# samples_per_val = dataloader.num_samples
# cumulative_loss = 0.0
# all_image_features, all_text_features = [], []
one2many = dataloader.dataset.one2many
if one2many:
all_ranks = []
with torch.no_grad():
for i, batch in enumerate(dataloader):
images, texts, attention_mask = batch
images = images.to(device=device, non_blocking=True)
texts = texts.to(device=device, non_blocking=True)
attention_mask = attention_mask.to(device=device, non_blocking=True)
if one2many:
image_emb = model.get_image_features(images)
image_emb /= image_emb.norm(dim = -1, keepdim = True)
text_emb = model.get_text_features(input_ids = texts, attention_mask = attention_mask)
text_emb /= text_emb.norm(dim = -1, keepdim = True)
set_size = text_emb.shape[0] // image_emb.shape[0]
for j in range(image_emb.shape[0]):
curr_image_emb = image_emb[j:j+1, :]
curr_text_emb = text_emb[j*set_size:(j+1)*set_size, :]
rank = get_one2many_rank(curr_image_emb, curr_text_emb)
all_ranks.append(rank)
print(f'Processed example {i*16}')
metrics = get_one2many_metrics(np.array(all_ranks))
# Alter output here
logging.info(
"\t".join([f"{k}: {round(v, 4):.4f}" for k, v in metrics.items()])
)
return metrics
def main():
args = setup_args()
if args.output_dir:
output_dir = os.path.join(args.output_dir, 'cyclip')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Load the model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model, processor = load(name = args.model_name, pretrained = args.pretrained)
checkpoint = torch.load('best.pt', map_location=device)
state_dict = checkpoint['state_dict']
if(next(iter(state_dict.items()))[0].startswith("module")):
state_dict = {key[len("module."):]: value for key, value in state_dict.items()}
model.load_state_dict(state_dict)
model = model.to(device)
model.eval()
for hard_neg_type in args.hard_neg_types:
all_metrics = {}
# Iterate over each complexity
for i in range(4, 13):
print('\n' + '*' * 45 + f' Evaluating on complexity {i} ' + '*' * 45 + '\n')
start_time = time()
retrieval_data_path = os.path.join(args.input_dir, f'{hard_neg_type}/prod_vg_hard_negs_{hard_neg_type}_complexity_{i}.csv')
data = get_data(args, retrieval_data_path, processor)
metrics = evaluate(model, data, i, hard_neg_type)
print(f'Complexity {i} took {time() - start_time} seconds')
all_metrics[i] = metrics
if args.output_dir:
output = os.path.join(output_dir, f'productivity_cyclip_{args.model_name}_{hard_neg_type}_metrics.json')
print("saving results to:", output)
with open(output, 'w') as f:
json.dump(all_metrics, f)
if __name__ == "__main__":
main()
| 5,815 | 32.045455 | 135 | py |
CREPE | CREPE-master/crepe_prod_eval_albef.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from PIL import Image
from time import time
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import transforms
import torch.nn.functional as F
import torchvision.transforms.functional as TF
import numpy as np
import json
# ALBEF:
# from torchmultimodal.transforms.flava_transform import FLAVAImageTransform
import ruamel.yaml as yaml
from models.model_retrieval import ALBEF
from models.vit import interpolate_pos_embed
# from transformers import BertTokenizer
from models.tokenization_bert import BertTokenizer
from crepe_eval_utils import BaseCsvDataset, get_one2many_rank, get_one2many_metrics, DataInfo
from crepe_params import setup_args
import pandas as pd
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
max_text_length = 512
TEXT_DEFAULT_TOKENIZER = "bert-base-uncased"
text_tokenizer = BertTokenizer.from_pretrained(TEXT_DEFAULT_TOKENIZER)
def collator(batch):
images = torch.stack([x[0] for x in batch], dim=0)
texts = torch.cat([x[1] for x in batch], dim=0)
masks = torch.cat([x[2] for x in batch], dim=0)
return images, texts, masks
### DATASET CONSTRUCTION
def default_text_transform(texts):
# Expect a list of texts
tokenized_texts = []
attention_masks = []
start_time = time()
for text in texts:
tokenized = text_tokenizer(text, padding="max_length",
max_length=max_text_length, truncation=True, return_tensors='pt')
tokenized_texts.append(tokenized['input_ids'])
attention_masks.append(tokenized['attention_mask'])
tokenized_texts = torch.cat(tokenized_texts, dim=0)
attention_masks = torch.cat(attention_masks, dim=0)
return tokenized_texts, attention_masks
class CsvDataset(BaseCsvDataset):
def __init__(self, input_filename, args, config):
super().__init__(input_filename, args)
# albef transform:
normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
test_transform = transforms.Compose([
transforms.Resize((config['image_res'],config['image_res']),interpolation=Image.BICUBIC),
transforms.ToTensor(),
normalize,
])
self.image_transform = test_transform
self.text_transform = default_text_transform
def __getitem__(self, idx):
raw_image = self.get_image_by_id(self.images[idx])
if self.crop:
raw_image = TF.crop(raw_image, self.ys[idx], self.xs[idx], self.heights[idx], self.widths[idx])
image = self.transforms(raw_image)
texts, attn_mask = self.text_transform([str(self.captions[idx])] + list(self.hard_negs[idx]))
return image, texts, attn_mask
def get_data(args, retrieval_data_path, config):
# Get CSVDataset
input_filename = retrieval_data_path
dataset = CsvDataset(
input_filename,
args,
config=config)
num_samples = len(dataset)
sampler = None
shuffle=False
dataloader = DataLoader(
dataset,
batch_size=16,
shuffle=shuffle,
num_workers=1,
pin_memory=True,
sampler=sampler,
drop_last=False,
collate_fn=collator
)
dataloader.num_samples = num_samples
dataloader.num_batches = len(dataloader)
return DataInfo(dataloader)
### EVALUATION
def evaluate(model, data, complexity, negative_type):
metrics = {}
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dataloader = data.dataloader
# num_samples = 0
# samples_per_val = dataloader.num_samples
# cumulative_loss = 0.0
# all_image_features, all_text_features = [], []
one2many = dataloader.dataset.one2many
assert(one2many, "Not one2many?")
if one2many:
all_ranks = []
with torch.no_grad():
for i, batch in enumerate(dataloader):
images, texts, masks = batch
images = images.to(device=device, non_blocking=True)
texts = texts.to(device=device, non_blocking=True)
masks = masks.to(device=device, non_blocking=True)
if one2many:
image_feat = model.visual_encoder(images)
image_embed = model.vision_proj(image_feat[:,0,:])
image_embed = F.normalize(image_embed,dim=-1)
text_out = model.text_encoder(texts, attention_mask = masks, mode='text')
text_feat = text_out.last_hidden_state
text_emb = F.normalize(model.text_proj(text_feat[:,0,:]))
set_size = text_emb.shape[0] // image_embed.shape[0]
for j in range(image_embed.shape[0]):
curr_image_emb = image_embed[j:j+1, :]
curr_text_emb = text_emb[j*set_size:(j+1)*set_size, :]
rank = get_one2many_rank(curr_image_emb, curr_text_emb)
all_ranks.append(rank)
print(f'Processed example {i*16}')
metrics = get_one2many_metrics(np.array(all_ranks))
# Alter output here
logging.info(
"\t".join([f"{k}: {round(v, 4):.4f}" for k, v in metrics.items()])
)
return metrics
def main():
args = setup_args()
if args.output_dir:
output_dir = os.path.join(args.output_dir, 'albef')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# LOAD ALBEF
config_str = './configs/Retrieval_coco.yaml'
config = yaml.load(open(config_str, 'r'), Loader=yaml.Loader)
tokenizer = BertTokenizer.from_pretrained(TEXT_DEFAULT_TOKENIZER)
albef = ALBEF(config=config, text_encoder=TEXT_DEFAULT_TOKENIZER, tokenizer=tokenizer)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info(f"Using device: {device}")
# MODEL CHECKPOINT
checkpoint = torch.load('./ALBEF.pth', map_location='cpu')
state_dict = checkpoint['model']
# reshape positional embedding to accomodate for image resolution change
pos_embed_reshaped = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'],albef.visual_encoder)
state_dict['visual_encoder.pos_embed'] = pos_embed_reshaped
m_pos_embed_reshaped = interpolate_pos_embed(state_dict['visual_encoder_m.pos_embed'],albef.visual_encoder_m)
state_dict['visual_encoder_m.pos_embed'] = m_pos_embed_reshaped
for key in list(state_dict.keys()):
if 'bert' in key:
encoder_key = key.replace('bert.','')
state_dict[encoder_key] = state_dict[key]
del state_dict[key]
msg = albef.load_state_dict(state_dict,strict=False)
albef = albef.to(device)
albef.eval()
for hard_neg_type in args.hard_neg_types:
all_metrics = {}
# Iterate over each complexity
for i in range(4, 13):
print('\n' + '*' * 45 + f' Evaluating on complexity {i} ' + '*' * 45 + '\n')
start_time = time()
retrieval_data_path = os.path.join(args.input_dir, f'{hard_neg_type}/prod_vg_hard_negs_{hard_neg_type}_complexity_{i}.csv')
data = get_data(args, retrieval_data_path, config)
metrics = evaluate(albef, data, i, hard_neg_type)
print(f'Complexity {i} took {time() - start_time} seconds')
all_metrics[i] = metrics
if args.output_dir:
output = os.path.join(output_dir, f'productivity_albef_{hard_neg_type}_metrics.json')
print("saving results to:", output)
with open(output, 'w') as f:
json.dump(all_metrics, f)
if __name__ == "__main__":
main()
| 7,966 | 34.887387 | 135 | py |
CREPE | CREPE-master/crepe_prod_eval_flava.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import ast
import logging
import os
from PIL import Image
from dataclasses import dataclass
from time import time
import json
import torch
from torchmultimodal.transforms.flava_transform import FLAVAImageTransform
from torch import nn
from torch.utils.data import DataLoader, Dataset
from torchmultimodal.models.flava.model import flava_model
from transformers import BertTokenizer
import torchvision.transforms.functional as TF
import numpy as np
import pandas as pd
from crepe_eval_utils import BaseCsvDataset, get_one2many_rank, get_one2many_metrics, DataInfo
from crepe_params import setup_args
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
max_text_length = 512
TEXT_DEFAULT_TOKENIZER = "bert-base-uncased"
text_tokenizer = BertTokenizer.from_pretrained(TEXT_DEFAULT_TOKENIZER)
def collator(batch):
texts = []
images = torch.stack([x[0]["image"] for x in batch], dim=0)
texts = torch.cat([x[1] for x in batch], dim=0)
return images, texts
### DATASET CONSTRUCTION
def default_text_transform(texts):
# Expect a list of texts
tokenized_texts = []
start_time = time()
for text in texts:
tokenized = text_tokenizer(text, padding="max_length",
max_length=max_text_length, truncation=True, return_tensors='pt')
tokenized_texts.append(torch.LongTensor(tokenized['input_ids']))
tokenized_texts = torch.cat(tokenized_texts, dim=0)
return tokenized_texts
class CsvDataset(BaseCsvDataset):
def __init__(self, input_filename, args):
super().__init__(input_filename, args)
self.image_transform = FLAVAImageTransform(is_train=False)
self.text_transform = default_text_transform
def __getitem__(self, idx):
raw_image = self.get_image_by_id(self.images[idx])
if self.crop:
raw_image = TF.crop(raw_image, self.ys[idx], self.xs[idx], self.heights[idx], self.widths[idx])
image = self.image_transform(raw_image)
if self.one2many:
texts = self.text_transform([str(self.captions[idx])] + list(self.hard_negs[idx]))
else:
texts = self.text_transform([str(self.captions[idx])])[0]
return image, texts
def get_data(args, retrieval_data_path):
# Get CSVDataset
input_filename = retrieval_data_path
dataset = CsvDataset(
input_filename,
args)
num_samples = len(dataset)
sampler = None
shuffle=False
dataloader = DataLoader(
dataset,
batch_size=8,
shuffle=shuffle,
num_workers=1,
pin_memory=True,
sampler=sampler,
drop_last=False,
collate_fn=collator
)
dataloader.num_samples = num_samples
dataloader.num_batches = len(dataloader)
return DataInfo(dataloader)
### EVALUATION
def evaluate(model, data, complexity, negative_type, output_path):
metrics = {}
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dataloader = data.dataloader
# num_samples = 0
# samples_per_val = dataloader.num_samples
# cumulative_loss = 0.0
# all_image_features, all_text_features = [], []
one2many = dataloader.dataset.one2many
assert(one2many, "Not one2many?")
if one2many:
all_ranks = []
with torch.no_grad():
for i, batch in enumerate(dataloader):
images, texts = batch
images = images.to(device=device, non_blocking=True)
texts = texts.to(device=device, non_blocking=True)
if one2many:
_, image_emb = model.encode_image(images, projection=True)
image_emb = nn.functional.normalize(image_emb, dim=-1)
_, text_emb = model.encode_text(texts, projection=True)
text_emb = nn.functional.normalize(text_emb)
set_size = text_emb.shape[0] // image_emb.shape[0]
for j in range(image_emb.shape[0]):
curr_image_emb = image_emb[j:j+1, :]
curr_text_emb = text_emb[j*set_size:(j+1)*set_size, :]
rank = get_one2many_rank(curr_image_emb, curr_text_emb)
all_ranks.append(rank)
# print(f'Processed example {i*8}')
metrics = get_one2many_metrics(np.array(all_ranks))
# Alter output here
logging.info(
"\t".join([f"{k}: {round(v, 4):.4f}" for k, v in metrics.items()])
)
return metrics
def main():
args = setup_args()
if args.output_dir:
output_dir = os.path.join(args.output_dir, 'flava')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Load the model
flava = flava_model(pretrained=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info(f"Using device: {device}")
flava = flava.to(device)
flava.eval()
for hard_neg_type in args.hard_neg_types:
all_metrics = {}
# Iterate over each complexity
for i in range(4, 13):
print('\n' + '*' * 45 + f' Evaluating on complexity {i} ' + '*' * 45 + '\n')
start_time = time()
retrieval_data_path = os.path.join(args.input_dir, f'{hard_neg_type}/prod_vg_hard_negs_{hard_neg_type}_complexity_{i}.csv')
data = get_data(args, retrieval_data_path)
metrics = evaluate(flava, data, i, hard_neg_type)
print(f'Complexity {i} took {time() - start_time} seconds')
all_metrics[i] = metrics
if args.output_dir:
output = os.path.join(output_dir, f'productivity_flava_{hard_neg_type}_metrics.json')
print("saving results to:", output)
with open(output, 'w') as f:
json.dump(all_metrics, f)
if __name__ == "__main__":
main()
| 6,056 | 31.918478 | 135 | py |
CREPE | CREPE-master/crepe_prod_eval_clip.py | import logging
import os
from time import time
import json
import torch
import torchvision.transforms.functional as TF
import clip
from torch.utils.data import DataLoader
import numpy as np
import pandas as pd
from crepe_eval_utils import BaseCsvDataset, get_one2many_rank, get_one2many_metrics, DataInfo
from crepe_params import setup_args
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
def collator(batch):
images = torch.stack([x[0] for x in batch], dim=0)
texts = torch.cat([x[1] for x in batch], dim=0)
return images, texts
### DATASET CONSTRUCTION
class CsvDataset(BaseCsvDataset):
def __init__(self, input_filename, args, processor, device):
super().__init__(input_filename, args)
self.processor = processor
self.device = device
def __getitem__(self, idx):
raw_image = self.get_image_by_id(self.images[idx])
if self.crop:
raw_image = TF.crop(raw_image, self.ys[idx], self.xs[idx], self.heights[idx], self.widths[idx])
image = self.processor(raw_image)
texts = self.process_text([str(self.captions[idx])] + list(self.hard_negs[idx]))
return image, texts
def process_text(self, texts):
proc_text = [clip.tokenize(text, truncate=True) for text in texts]
return torch.cat(proc_text)
def get_data(args, retrieval_data_path, processor, device):
# Get CSVDataset
input_filename = retrieval_data_path
dataset = CsvDataset(
input_filename,
args,
processor,
device)
num_samples = len(dataset)
sampler = None
shuffle=False
dataloader = DataLoader(
dataset,
batch_size=16,
shuffle=shuffle,
num_workers=1,
pin_memory=True,
sampler=sampler,
drop_last=False,
collate_fn=collator
)
dataloader.num_samples = num_samples
dataloader.num_batches = len(dataloader)
return DataInfo(dataloader)
### EVALUATION
def evaluate(model, data, complexity, negative_type, device):
metrics = {}
dataloader = data.dataloader
# num_samples = 0
# samples_per_val = dataloader.num_samples
# cumulative_loss = 0.0
# all_image_features, all_text_features = [], []
one2many = dataloader.dataset.one2many
if one2many:
all_ranks = []
with torch.no_grad():
for i, batch in enumerate(dataloader):
images, texts = batch
images = images.to(device)
texts = texts.to(device)
if one2many:
image_emb = model.encode_image(images)
image_emb /= image_emb.norm(dim = -1, keepdim = True)
text_emb = model.encode_text(texts)
text_emb /= text_emb.norm(dim = -1, keepdim = True)
set_size = text_emb.shape[0] // image_emb.shape[0]
for j in range(image_emb.shape[0]):
curr_image_emb = image_emb[j:j+1, :]
curr_text_emb = text_emb[j*set_size:(j+1)*set_size, :]
rank = get_one2many_rank(curr_image_emb, curr_text_emb)
all_ranks.append(rank)
print(f'Processed example {i*16}')
metrics = get_one2many_metrics(np.array(all_ranks))
# Alter output here
logging.info(
"\t".join([f"{k}: {round(v, 4):.4f}" for k, v in metrics.items()])
)
return metrics
def main():
args = setup_args()
if args.output_dir:
output_dir = os.path.join(args.output_dir, 'open_ai_clip')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Load the model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model, preprocess = clip.load(name = args.model_name, device=device)
model = model.to(device)
model.eval()
for hard_neg_type in args.hard_neg_types:
all_metrics = {}
# Iterate over each complexity
for i in range(4, 13):
print('\n' + '*' * 45 + f' Evaluating on complexity {i} ' + '*' * 45 + '\n')
start_time = time()
retrieval_data_path = os.path.join(args.input_dir, f'{hard_neg_type}/prod_vg_hard_negs_{hard_neg_type}_complexity_{i}.csv')
if args.model_name == "RN50" or args.model_name == "RN101":
model_save_name = args.model_name
elif args.model_name == "ViT-B/32":
model_save_name = 'vit_b32'
elif args.model_name == "ViT-B/16":
model_save_name = 'vit_b16'
elif args.model_name == "ViT-L/14":
model_save_name = 'vit_l14'
data = get_data(args, retrieval_data_path, preprocess, device)
metrics = evaluate(model, data, i, hard_neg_type, device)
print(f'Complexity {i} took {time() - start_time} seconds')
all_metrics[i] = metrics
if args.output_dir:
output = os.path.join(output_dir, f'productivity_clip_{model_save_name}_{hard_neg_type}_metrics.json')
print("saving results to:", output)
with open(output, 'w') as f:
json.dump(all_metrics, f)
if __name__ == '__main__':
main()
| 5,220 | 30.642424 | 135 | py |
CREPE | CREPE-master/crepe_compo_eval_open_clip.py | import os
import json
import logging
import torch
import numpy as np
import torch.nn.functional as F
import torchvision.transforms.functional as TF
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from dataclasses import dataclass
from open_clip import tokenize, create_model_and_transforms
from crepe_eval_utils import BaseCsvDataset, get_one2many_metrics, get_one2many_rank, get_metrics
from crepe_params import setup_args
DATA2MODEL = {
'cc12m': {
'RN50-quickgelu': 'rn50-quickgelu-cc12m-f000538c.pt'
},
'yfcc': {
'RN50-quickgelu': 'rn50-quickgelu-yfcc15m-455df137.pt',
'RN101-quickgelu': 'rn101-quickgelu-yfcc15m-3e04b30e.pt'
},
'laion': {
'ViT-B-16':'vit_b_16-laion400m_e32-55e67d44.pt',
'ViT-B-16-plus-240': 'vit_b_16_plus_240-laion400m_e32-699c4b84.pt',
'ViT-B-32-quickgelu': 'vit_b_32-quickgelu-laion400m_e32-46683a32.pt',
'ViT-L-14': 'vit_l_14-laion400m_e32-3d133497.pt',
}
}
COMPO_SPLITS = ['seen_compounds', 'unseen_compounds']
COMPLEXITIES = list(range(4, 13))
@dataclass
class DataInfo:
dataloader: DataLoader
sampler: DistributedSampler
class CsvDataset(BaseCsvDataset):
def __init__(self, input_filename, args, transforms):
super().__init__(input_filename, args, transforms=transforms)
def __getitem__(self, idx):
raw_image = self.get_image_by_id(self.images[idx])
if self.crop:
raw_image = TF.crop(raw_image, self.ys[idx], self.xs[idx], self.heights[idx], self.widths[idx])
image = self.transforms(raw_image)
if self.one2many:
texts = tokenize([str(self.captions[idx])] + list(self.hard_negs[idx]))
else:
texts = tokenize([str(self.captions[idx])])[0]
return image, texts
def get_csv_dataset(args, preprocess_fn, is_train):
input_filename = args.val_data
assert input_filename
dataset = CsvDataset(
input_filename,
args,
preprocess_fn)
num_samples = len(dataset)
sampler = None
shuffle = is_train and sampler is None
dataloader = DataLoader(
dataset,
batch_size=args.batch_size,
shuffle=shuffle,
num_workers=1,
pin_memory=True,
sampler=sampler,
drop_last=is_train,
)
dataloader.num_samples = num_samples
dataloader.num_batches = len(dataloader)
return DataInfo(dataloader, sampler)
def get_data(args, preprocess_fns):
preprocess_train, preprocess_val = preprocess_fns
data = {}
data["val"] = get_csv_dataset(
args, preprocess_val, is_train=False)
return data
def evaluate(model, data, args):
metrics = {}
device = torch.device(args.device)
model.eval()
autocast = torch.cuda.amp.autocast
dataloader = data['val'].dataloader
# FIXME this does not scale past small eval datasets
# all_image_features @ all_text_features will blow up memory and compute very quickly
all_image_features, all_text_features = [], []
one2many = dataloader.dataset.one2many
if one2many:
all_ranks = []
with torch.no_grad():
for i, batch in enumerate(dataloader):
images, texts = batch
images = images.to(device=device, non_blocking=True)
texts = texts.to(device=device, non_blocking=True)
if one2many:
image_features = model.encode_image(images)
image_features = F.normalize(image_features, dim=-1)
texts = torch.squeeze(texts, dim=0)
text_features = model.encode_text(texts)
text_features = F.normalize(text_features, dim=-1)
rank = get_one2many_rank(image_features, text_features)
all_ranks.append(rank)
else:
with autocast():
image_features, text_features, logit_scale = model(images, texts)
# features are accumulated in CPU tensors, otherwise GPU memory exhausted quickly
# however, system RAM is easily exceeded and compute time becomes problematic
all_image_features.append(image_features.cpu())
all_text_features.append(text_features.cpu())
if one2many:
val_metrics = get_one2many_metrics(np.array(all_ranks))
metrics.update(
{**val_metrics}
)
else:
val_metrics = get_metrics(
image_features=torch.cat(all_image_features),
text_features=torch.cat(all_text_features)
)
metrics.update(
{**val_metrics}
)
logging.info("\t".join([f"{k}: {round(v, 4):.4f}" for k, v in metrics.items()]))
return metrics
def gather_params(args, hard_neg_type, split):
if args.compo_type == 'systematicity':
if hard_neg_type in ['atom', 'comp', 'combined']:
hard_neg_key = f'valid_hard_negs_{hard_neg_type}'
else:
raise NotImplementedError
retrieval_data_path = os.path.join(args.input_dir, f'syst_vg_hard_negs_{split}_in_{args.train_dataset}.csv')
elif args.compo_type == 'productivity':
hard_neg_key = 'hard_negs'
if hard_neg_type in ['atom', 'negate', 'swap']:
input_dir = os.path.join(args.input_dir, hard_neg_type)
retrieval_data_path = os.path.join(input_dir, f'prod_vg_hard_negs_{hard_neg_type}_complexity_{split}.csv')
else:
raise NotImplementedError
else:
raise NotImplementedError
args.val_data = retrieval_data_path
args.one2many = True
args.crop = True
args.hard_neg_key = hard_neg_key
args.batch_size = 1
return args
def main():
args = setup_args()
models = DATA2MODEL[args.train_dataset].keys()
if args.compo_type == 'systematicity':
splits = COMPO_SPLITS
elif args.compo_type == 'productivity':
splits = COMPLEXITIES
if args.output_dir:
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
if torch.cuda.is_available():
device = 'cuda:0'
torch.cuda.set_device(device)
else:
device = 'cpu'
args.device = device
device = torch.device(device)
for model_name in models:
pretrained = os.path.join(args.model_dir, DATA2MODEL[args.train_dataset][model_name])
model, preprocess_train, preprocess_val = create_model_and_transforms(
model_name,
pretrained,
precision='amp',
device=device
)
for hard_neg_type in args.hard_neg_types:
all_metrics = {}
for split in splits:
# params = gather_params(args, model, split)
print('\n' + '*' * 45 + f' Evaluating {model_name} {args.compo_type} on HN-{hard_neg_type.upper()} test set split {split} ' + '*' * 45 + '\n')
args = gather_params(args, hard_neg_type, split)
# initialize datasets
data = get_data(args, (preprocess_train, preprocess_val))
assert len(data), 'At least one dataset must be specified.'
metrics = evaluate(model, data, args)
all_metrics[split] = metrics
if args.output_dir:
output = os.path.join(args.output_dir, f'{args.compo_type}_{args.train_dataset}_{model_name}_{hard_neg_type}_metrics.json')
print("saving results to:", output)
with open(output, 'w') as f:
json.dump(all_metrics, f)
if __name__ == "__main__":
main()
| 7,732 | 34.15 | 160 | py |
CREPE | CREPE-master/crepe_eval_utils.py | import ast
import logging
import os
from PIL import Image
from dataclasses import dataclass
import torch
from torch.utils.data import DataLoader, Dataset
import numpy as np
import pandas as pd
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
### DATASET CONSTRUCTION
class BaseCsvDataset(Dataset):
def __init__(self, input_filename, args, transforms=None):
logging.debug(f'Loading csv data from {input_filename}.')
df = pd.read_csv(input_filename)
# print(f"Total number of examples: {len(df)}.")
self.crop = args.crop
if self.crop:
assert 'x' in df.columns and 'y' in df.columns and 'width' in df.columns and 'height' in df.columns, "missing x, y, width, or height."
self.xs = df['x'].tolist()
self.ys = df['y'].tolist()
self.heights = df['height'].tolist()
self.widths = df['width'].tolist()
# print("cropping:", self.crop)
self.one2many = args.one2many
# print("one2many:", self.one2many)
if self.one2many:
self.hard_negs = [ast.literal_eval(ls_str) for ls_str in df[args.hard_neg_key]]
self.images = df[args.csv_img_key].tolist()
self.captions = df[args.csv_caption_key].tolist()
self.transforms = transforms
def __len__(self):
return len(self.captions)
def get_image_by_id(self, image_id):
vg_image_paths = ['/nlp/scr/irena/data/visual_genome/img/VG_100K', '/nlp/scr/irena/data/visual_genome/img/VG_100K_2']
for p in vg_image_paths:
path = os.path.join(p, f"{image_id}.jpg")
if os.path.exists(path):
return Image.open(path).convert("RGB")
raise FileNotFoundError(f'The image with id {image_id} is not found.')
def __getitem__(self, idx):
print("Not yet implemented.")
assert(False)
@dataclass
class DataInfo:
dataloader: DataLoader
# EVALUATION UTILITIES
def get_one2many_rank(image_features, text_features):
logits_per_image = (image_features @ text_features.t()).detach().cpu()
ground_truth = 0 # because the grountruth caption is placed first, see CsvDataset.__getitem__() in data.py
ranking = torch.argsort(logits_per_image, descending=True)
pred = torch.where(ranking == ground_truth)[1].detach().cpu().numpy()
return pred
def get_one2many_metrics(preds, name='image_to_text'):
metrics = {}
metrics[f"{name}_mean_rank"] = preds.mean() + 1
metrics[f"{name}_rank_std"] = preds.std()
metrics[f"{name}_median_rank"] = np.floor(np.median(preds)) + 1
for k in [1, 3, 5, 10]:
metrics[f"{name}_R@{k}"] = np.mean(preds < k)
metrics[f"{name}_R@{k}_std"] = np.std(preds < k)
return metrics
def get_metrics(image_features, text_features):
metrics = {}
logits_per_image = (image_features @ text_features.t()).detach().cpu()
logits_per_text = logits_per_image.t().detach().cpu()
logits = {"image_to_text": logits_per_image, "text_to_image": logits_per_text}
ground_truth = torch.arange(len(text_features)).view(-1, 1)
for name, logit in logits.items():
ranking = torch.argsort(logit, descending=True)
preds = torch.where(ranking == ground_truth)[1]
preds = preds.detach().cpu().numpy()
metrics[f"{name}_mean_rank"] = preds.mean() + 1
metrics[f"{name}_median_rank"] = np.floor(np.median(preds)) + 1
for k in [1, 3, 5, 10]:
metrics[f"{name}_R@{k}"] = np.mean(preds < k)
return metrics
| 3,542 | 35.525773 | 146 | py |
CREPE | CREPE-master/open_clip/openai.py | """ OpenAI pretrained model functions
Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
"""
import os
import warnings
from typing import Union, List
import torch
from .model import build_model_from_openai_state_dict
from .pretrained import get_pretrained_url, list_pretrained_tag_models, download_pretrained
__all__ = ["list_openai_models", "load_openai_model"]
def list_openai_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list_pretrained_tag_models('openai')
def load_openai_model(
name: str,
device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
jit=True,
):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model (default) or more hackable non-JIT model.
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if get_pretrained_url(name, 'openai'):
model_path = download_pretrained(get_pretrained_url(name, 'openai'))
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {list_openai_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
try:
model = build_model_from_openai_state_dict(state_dict or model.state_dict()).to(device)
except KeyError:
sd = {k[7:]: v for k, v in state_dict["state_dict"].items()}
model = build_model_from_openai_state_dict(sd).to(device)
if str(device) == "cpu":
model.float()
return model
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
# ensure image_size attr available at consistent location for both jit and non-jit
model.visual.image_size = model.input_resolution.item()
return model
| 4,503 | 34.464567 | 117 | py |
CREPE | CREPE-master/open_clip/transform.py | from torchvision.transforms import Normalize, Compose, RandomResizedCrop, ToTensor, Resize, \
CenterCrop
from PIL import Image
def _convert_to_rgb(image):
return image.convert('RGB')
def image_transform(
image_size: int,
is_train: bool,
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)
):
normalize = Normalize(mean=mean, std=std)
if is_train:
return Compose([
RandomResizedCrop(image_size, scale=(0.9, 1.0), interpolation=Image.BICUBIC),
_convert_to_rgb,
ToTensor(),
normalize,
])
else:
return Compose([
Resize(image_size, interpolation=Image.BICUBIC),
CenterCrop(image_size),
_convert_to_rgb,
ToTensor(),
normalize,
])
| 850 | 26.451613 | 93 | py |
CREPE | CREPE-master/open_clip/loss.py | import torch
import torch.distributed.nn
from torch import distributed as dist, nn as nn
from torch.nn import functional as F
try:
import horovod.torch as hvd
except ImportError:
hvd = None
def gather_features(
image_features,
text_features,
local_loss=False,
gather_with_grad=False,
rank=0,
world_size=1,
use_horovod=False
):
if use_horovod:
assert hvd is not None, 'Please install horovod'
if gather_with_grad:
all_image_features = hvd.allgather(image_features)
all_text_features = hvd.allgather(text_features)
else:
with torch.no_grad():
all_image_features = hvd.allgather(image_features)
all_text_features = hvd.allgather(text_features)
if not local_loss:
# ensure grads for local rank when all_* features don't have a gradient
gathered_image_features = list(all_image_features.chunk(world_size, dim=0))
gathered_text_features = list(all_text_features.chunk(world_size, dim=0))
gathered_image_features[rank] = image_features
gathered_text_features[rank] = text_features
all_image_features = torch.cat(gathered_image_features, dim=0)
all_text_features = torch.cat(gathered_text_features, dim=0)
else:
# We gather tensors from all gpus
if gather_with_grad:
all_image_features = torch.cat(torch.distributed.nn.all_gather(image_features), dim=0)
all_text_features = torch.cat(torch.distributed.nn.all_gather(text_features), dim=0)
else:
gathered_image_features = [torch.zeros_like(image_features) for _ in range(world_size)]
gathered_text_features = [torch.zeros_like(text_features) for _ in range(world_size)]
dist.all_gather(gathered_image_features, image_features)
dist.all_gather(gathered_text_features, text_features)
if not local_loss:
# ensure grads for local rank when all_* features don't have a gradient
gathered_image_features[rank] = image_features
gathered_text_features[rank] = text_features
all_image_features = torch.cat(gathered_image_features, dim=0)
all_text_features = torch.cat(gathered_text_features, dim=0)
return all_image_features, all_text_features
class ClipLoss(nn.Module):
def __init__(
self,
local_loss=False,
gather_with_grad=False,
cache_labels=False,
rank=0,
world_size=1,
use_horovod=False,
):
super().__init__()
self.local_loss = local_loss
self.gather_with_grad = gather_with_grad
self.cache_labels = cache_labels
self.rank = rank
self.world_size = world_size
self.use_horovod = use_horovod
# cache state
self.prev_num_logits = 0
self.labels = {}
def forward(self, image_features, text_features, logit_scale):
device = image_features.device
if self.world_size > 1:
all_image_features, all_text_features = gather_features(
image_features, text_features,
self.local_loss, self.gather_with_grad, self.rank, self.world_size, self.use_horovod)
if self.local_loss:
logits_per_image = logit_scale * image_features @ all_text_features.T
logits_per_text = logit_scale * text_features @ all_image_features.T
else:
logits_per_image = logit_scale * all_image_features @ all_text_features.T
logits_per_text = logits_per_image.T
else:
logits_per_image = logit_scale * image_features @ text_features.T
logits_per_text = logit_scale * text_features @ image_features.T
# calculated ground-truth and cache if enabled
num_logits = logits_per_image.shape[0]
if self.prev_num_logits != num_logits or device not in self.labels:
labels = torch.arange(num_logits, device=device, dtype=torch.long)
if self.world_size > 1 and self.local_loss:
labels = labels + num_logits * self.rank
if self.cache_labels:
self.labels[device] = labels
self.prev_num_logits = num_logits
else:
labels = self.labels[device]
total_loss = (
F.cross_entropy(logits_per_image, labels) +
F.cross_entropy(logits_per_text, labels)
) / 2
return total_loss
| 4,658 | 39.513043 | 101 | py |
CREPE | CREPE-master/open_clip/utils.py | from torch import nn as nn
from torchvision.ops.misc import FrozenBatchNorm2d
def freeze_batch_norm_2d(module, module_match={}, name=''):
"""
Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is
itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and
returned. Otherwise, the module is walked recursively and submodules are converted in place.
Args:
module (torch.nn.Module): Any PyTorch module.
module_match (dict): Dictionary of full module names to freeze (all if empty)
name (str): Full module name (prefix)
Returns:
torch.nn.Module: Resulting module
Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762
"""
res = module
is_match = True
if module_match:
is_match = name in module_match
if is_match and isinstance(module, (nn.modules.batchnorm.BatchNorm2d, nn.modules.batchnorm.SyncBatchNorm)):
res = FrozenBatchNorm2d(module.num_features)
res.num_features = module.num_features
res.affine = module.affine
if module.affine:
res.weight.data = module.weight.data.clone().detach()
res.bias.data = module.bias.data.clone().detach()
res.running_mean.data = module.running_mean.data
res.running_var.data = module.running_var.data
res.eps = module.eps
else:
for child_name, child in module.named_children():
full_child_name = '.'.join([name, child_name]) if name else child_name
new_child = freeze_batch_norm_2d(child, module_match, full_child_name)
if new_child is not child:
res.add_module(child_name, new_child)
return res | 1,850 | 44.146341 | 131 | py |
CREPE | CREPE-master/open_clip/model.py | """ CLIP Model
Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
"""
from collections import OrderedDict
from dataclasses import dataclass
from typing import Tuple, Union, Callable, Optional
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.utils.checkpoint import checkpoint
from .timm_model import TimmModel
from .utils import freeze_batch_norm_2d
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.relu2 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu3 = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu1(self.bn1(self.conv1(x)))
out = self.relu2(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu3(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, image_size=224, width=64):
super().__init__()
self.output_dim = output_dim
self.image_size = image_size
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.relu2 = nn.ReLU(inplace=True)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.relu3 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(2)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(image_size // 32, embed_dim, heads, output_dim)
self.init_parameters()
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def init_parameters(self):
if self.attnpool is not None:
std = self.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.attnpool.c_proj.weight, std=std)
for resnet_block in [self.layer1, self.layer2, self.layer3, self.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
assert unlocked_groups == 0, 'partial locking not currently supported for this model'
for param in self.parameters():
param.requires_grad = False
if freeze_bn_stats:
freeze_batch_norm_2d(self)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
# FIXME support for non-transformer
pass
def stem(self, x):
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.avgpool(x)
return x
def forward(self, x):
x = self.stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
return x.to(orig_type)
class QuickGELU(nn.Module):
# NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, mlp_ratio: float = 4.0, act_layer: Callable = nn.GELU):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
mlp_width = int(d_model * mlp_ratio)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, mlp_width)),
("gelu", act_layer()),
("c_proj", nn.Linear(mlp_width, d_model))
]))
self.ln_2 = LayerNorm(d_model)
def attention(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask)[0]
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
x = x + self.attention(self.ln_1(x), attn_mask=attn_mask)
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, mlp_ratio: float = 4.0, act_layer: Callable = nn.GELU):
super().__init__()
self.width = width
self.layers = layers
self.grad_checkpointing = False
self.resblocks = nn.ModuleList([
ResidualAttentionBlock(width, heads, mlp_ratio, act_layer=act_layer)
for _ in range(layers)
])
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
for r in self.resblocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(r, x, attn_mask)
else:
x = r(x, attn_mask=attn_mask)
return x
class VisualTransformer(nn.Module):
def __init__(
self, image_size: int, patch_size: int, width: int, layers: int, heads: int, mlp_ratio: float,
output_dim: int, act_layer: Callable = nn.GELU):
super().__init__()
self.image_size = image_size
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((image_size // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads, mlp_ratio, act_layer=act_layer)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
assert unlocked_groups == 0, 'partial locking not currently supported for this model'
for param in self.parameters():
param.requires_grad = False
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.transformer.grad_checkpointing = enable
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat(
[self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),
x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if self.proj is not None:
x = x @ self.proj
return x
@dataclass
class CLIPVisionCfg:
layers: Union[Tuple[int, int, int, int], int] = 12
width: int = 768
head_width: int = 64
mlp_ratio: float = 4.0
patch_size: int = 16
image_size: Union[Tuple[int, int], int] = 224
timm_model_name: str = None # a valid model name overrides layers, width, patch_size
timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model
timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '')
timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '')
@dataclass
class CLIPTextCfg:
context_length: int = 77
vocab_size: int = 49408
width: int = 512
heads: int = 8
layers: int = 12
class CLIP(nn.Module):
def __init__(
self,
embed_dim: int,
vision_cfg: CLIPVisionCfg,
text_cfg: CLIPTextCfg,
quick_gelu: bool = False,
):
super().__init__()
if isinstance(vision_cfg, dict):
vision_cfg = CLIPVisionCfg(**vision_cfg)
if isinstance(text_cfg, dict):
text_cfg = CLIPTextCfg(**text_cfg)
self.context_length = text_cfg.context_length
# OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more
# memory efficient in recent PyTorch releases (>= 1.10).
# NOTE: timm models always use native GELU regardless of quick_gelu flag.
act_layer = QuickGELU if quick_gelu else nn.GELU
if vision_cfg.timm_model_name:
self.visual = TimmModel(
vision_cfg.timm_model_name,
pretrained=vision_cfg.timm_model_pretrained,
pool=vision_cfg.timm_pool,
proj=vision_cfg.timm_proj,
embed_dim=embed_dim,
image_size=vision_cfg.image_size
)
act_layer = nn.GELU # so that text transformer doesn't use QuickGELU w/ timm models
elif isinstance(vision_cfg.layers, (tuple, list)):
vision_heads = vision_cfg.width * 32 // vision_cfg.head_width
self.visual = ModifiedResNet(
layers=vision_cfg.layers,
output_dim=embed_dim,
heads=vision_heads,
image_size=vision_cfg.image_size,
width=vision_cfg.width
)
else:
vision_heads = vision_cfg.width // vision_cfg.head_width
self.visual = VisualTransformer(
image_size=vision_cfg.image_size,
patch_size=vision_cfg.patch_size,
width=vision_cfg.width,
layers=vision_cfg.layers,
heads=vision_heads,
mlp_ratio=vision_cfg.mlp_ratio,
output_dim=embed_dim,
act_layer=act_layer,
)
self.transformer = Transformer(
width=text_cfg.width,
layers=text_cfg.layers,
heads=text_cfg.heads,
act_layer=act_layer,
)
self.vocab_size = text_cfg.vocab_size
self.token_embedding = nn.Embedding(text_cfg.vocab_size, text_cfg.width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, text_cfg.width))
self.ln_final = LayerNorm(text_cfg.width)
self.text_projection = nn.Parameter(torch.empty(text_cfg.width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.register_buffer('attn_mask', self.build_attention_mask(), persistent=False)
self.init_parameters()
def init_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
nn.init.constant_(self.logit_scale, np.log(1 / 0.07))
if hasattr(self.visual, 'init_parameters'):
self.visual.init_parameters()
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False):
# lock image tower as per LiT - https://arxiv.org/abs/2111.07991
self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.visual.set_grad_checkpointing(enable)
self.transformer.grad_checkpointing = enable
def encode_image(self, image):
return self.visual(image)
def encode_text(self, text):
# print('text before embedding:', text)
x = self.token_embedding(text) # [batch_size, n_ctx, d_model]
# print('text after embedding:', x)
x = x + self.positional_embedding
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x, attn_mask=self.attn_mask)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return x
def forward(self, image, text):
if image is None:
return self.encode_text(text)
elif text is None:
return self.encode_image(image)
image_features = self.encode_image(image)
image_features = F.normalize(image_features, dim=-1)
text_features = self.encode_text(text)
text_features = F.normalize(text_features, dim=-1)
return image_features, text_features, self.logit_scale.exp()
def convert_weights_to_fp16(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model_from_openai_state_dict(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len(
[k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_size = vision_patch_size * grid_size
else:
counts: list = [
len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_size = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
vision_cfg = CLIPVisionCfg(
layers=vision_layers,
width=vision_width,
patch_size=vision_patch_size,
image_size=image_size,
)
text_cfg = CLIPTextCfg(
context_length=context_length,
vocab_size=vocab_size,
width=transformer_width,
heads=transformer_heads,
layers=transformer_layers
)
model = CLIP(
embed_dim,
vision_cfg=vision_cfg,
text_cfg=text_cfg,
quick_gelu=True, # OpenAI models were trained with QuickGELU
)
for key in ["input_resolution", "context_length", "vocab_size"]:
state_dict.pop(key, None)
convert_weights_to_fp16(model)
model.load_state_dict(state_dict)
return model.eval()
def trace_model(model, batch_size=256, device=torch.device('cpu')):
model.eval()
image_size = model.visual.image_size
example_images = torch.ones((batch_size, 3, image_size, image_size), device=device)
example_text = torch.zeros((batch_size, model.context_length), dtype=torch.int, device=device)
model = torch.jit.trace_module(
model,
inputs=dict(
forward=(example_images, example_text),
encode_text=(example_text,),
encode_image=(example_images,)
))
model.visual.image_size = image_size
return model
| 21,811 | 37.95 | 120 | py |
CREPE | CREPE-master/open_clip/factory.py | import json
import logging
import os
import pathlib
import re
from copy import deepcopy
from pathlib import Path
import torch
from .model import CLIP, convert_weights_to_fp16
from .openai import load_openai_model
from .pretrained import get_pretrained_url, download_pretrained
from .transform import image_transform
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
def _natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
def _rescan_model_configs():
global _MODEL_CONFIGS
config_ext = ('.json',)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f'*{ext}'))
for cf in config_files:
with open(cf, 'r') as f:
model_cfg = json.load(f)
if all(a in model_cfg for a in ('embed_dim', 'vision_cfg', 'text_cfg')):
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = {k: v for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))}
_rescan_model_configs() # initial populate of model config registry
def load_state_dict(checkpoint_path: str, map_location='cpu'):
checkpoint = torch.load(checkpoint_path, map_location=map_location)
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
if next(iter(state_dict.items()))[0].startswith('module'):
state_dict = {k[7:]: v for k, v in state_dict.items()}
return state_dict
def create_model(
model_name: str,
pretrained: str = '',
precision: str = 'fp32',
device: torch.device = torch.device('cpu'),
jit: bool = False,
force_quick_gelu: bool = False,
pretrained_image: bool = False,
):
model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names
if pretrained.lower() == 'openai':
logging.info(f'Loading pretrained {model_name} from OpenAI.')
model = load_openai_model(model_name, device=device, jit=jit)
# See https://discuss.pytorch.org/t/valueerror-attemting-to-unscale-fp16-gradients/81372
if precision == "amp" or precision == "fp32":
model = model.float()
else:
if model_name in _MODEL_CONFIGS:
logging.info(f'Loading {model_name} model config.')
model_cfg = deepcopy(_MODEL_CONFIGS[model_name])
else:
logging.error(f'Model config for {model_name} not found; available models {list_models()}.')
raise RuntimeError(f'Model config for {model_name} not found.')
if force_quick_gelu:
# override for use of QuickGELU on non-OpenAI transformer models
model_cfg["quick_gelu"] = True
if pretrained_image:
if 'timm_model_name' in model_cfg.get('vision_cfg', {}):
# pretrained weight loading for timm models set via vision_cfg
model_cfg['vision_cfg']['timm_model_pretrained'] = True
else:
assert False, 'pretrained image towers currently only supported for timm models'
model = CLIP(**model_cfg)
if pretrained:
checkpoint_path = ''
url = get_pretrained_url(model_name, pretrained)
if url:
checkpoint_path = download_pretrained(url)
elif os.path.exists(pretrained):
checkpoint_path = pretrained
if checkpoint_path:
logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
model.load_state_dict(load_state_dict(checkpoint_path))
else:
logging.warning(f'Pretrained weights ({pretrained}) not found for model {model_name}.')
raise RuntimeError(f'Pretrained weights ({pretrained}) not found for model {model_name}.')
model.to(device=device)
if precision == "fp16":
assert device.type != 'cpu'
convert_weights_to_fp16(model)
if jit:
model = torch.jit.script(model)
return model
def create_model_and_transforms(
model_name: str,
pretrained: str = '',
precision: str = 'fp32',
device: torch.device = torch.device('cpu'),
jit: bool = False,
force_quick_gelu: bool = False,
pretrained_image: bool = False,
):
model = create_model(
model_name, pretrained, precision, device, jit,
force_quick_gelu=force_quick_gelu,
pretrained_image=pretrained_image)
preprocess_train = image_transform(model.visual.image_size, is_train=True)
preprocess_val = image_transform(model.visual.image_size, is_train=False)
return model, preprocess_train, preprocess_val
def list_models():
""" enumerate available model architectures based on config files """
return list(_MODEL_CONFIGS.keys())
def add_model_config(path):
""" add model config path or file and update registry """
if not isinstance(path, Path):
path = Path(path)
_MODEL_CONFIG_PATHS.append(path)
_rescan_model_configs()
| 5,455 | 34.660131 | 106 | py |
CREPE | CREPE-master/open_clip/tokenizer.py | """ CLIP tokenizer
Copied from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
"""
import gzip
import html
import os
from functools import lru_cache
from typing import Union, List
import ftfy
import regex as re
import torch
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe(), special_tokens=None):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
# print("merges len:", len(merges))
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
if not special_tokens:
special_tokens = ['<start_of_text>', '<end_of_text>']
else:
special_tokens = ['<start_of_text>', '<end_of_text>'] + special_tokens
vocab.extend(special_tokens)
# print("vocab:", len(vocab))
self.encoder = dict(zip(vocab, range(len(vocab))))
# print("encoder:", self.encoder)
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {t:t for t in special_tokens}
special = "|".join(special_tokens)
self.pat = re.compile(special + r"""|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
self.vocab_size = len(self.encoder)
self.all_special_ids = [self.encoder[t] for t in special_tokens]
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
# print("token, bpe:", token, self.bpe(token))
# print(self.bpe(token).split(' '))
# for bpe_token in self.bpe(token).split(' '):
# print('token:', bpe_token )
# print('bpe:', self.encoder[bpe_token])
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
# print("overall bpe:", bpe_tokens)
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
_tokenizer = SimpleTokenizer()
def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<start_of_text>"]
eot_token = _tokenizer.encoder["<end_of_text>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
tokens = tokens[:context_length] # Truncate
result[i, :len(tokens)] = torch.tensor(tokens)
return result
| 6,637 | 33.936842 | 121 | py |
CREPE | CREPE-master/open_clip/timm_model.py | """ timm model adapter
Wraps timm (https://github.com/rwightman/pytorch-image-models) models for use as a vision tower in CLIP model.
"""
from collections import OrderedDict
import torch.nn as nn
try:
import timm
from timm.models.layers import Mlp, to_2tuple
from timm.models.layers.attention_pool2d import RotAttentionPool2d
from timm.models.layers.attention_pool2d import AttentionPool2d as AbsAttentionPool2d
except ImportError as e:
timm = None
from .utils import freeze_batch_norm_2d
class TimmModel(nn.Module):
""" timm model adapter
# FIXME this adapter is a work in progress, may change in ways that break weight compat
"""
def __init__(
self,
model_name,
embed_dim,
image_size=224,
pool='avg',
proj='linear',
drop=0.,
pretrained=False):
super().__init__()
if timm is None:
raise RuntimeError("Please `pip install timm` to use timm models.")
self.image_size = to_2tuple(image_size)
self.trunk = timm.create_model(model_name, pretrained=pretrained)
feat_size = self.trunk.default_cfg.get('pool_size', None)
feature_ndim = 1 if not feat_size else 2
if pool in ('abs_attn', 'rot_attn'):
assert feature_ndim == 2
# if attn pooling used, remove both classifier and default pool
self.trunk.reset_classifier(0, global_pool='')
else:
# reset global pool if pool config set, otherwise leave as network default
reset_kwargs = dict(global_pool=pool) if pool else {}
self.trunk.reset_classifier(0, **reset_kwargs)
prev_chs = self.trunk.num_features
head_layers = OrderedDict()
if pool == 'abs_attn':
head_layers['pool'] = AbsAttentionPool2d(prev_chs, feat_size=feat_size, out_features=embed_dim)
prev_chs = embed_dim
elif pool == 'rot_attn':
head_layers['pool'] = RotAttentionPool2d(prev_chs, out_features=embed_dim)
prev_chs = embed_dim
else:
assert proj, 'projection layer needed if non-attention pooling is used.'
# NOTE attention pool ends with a projection layer, so proj should usually be set to '' if such pooling is used
if proj == 'linear':
head_layers['drop'] = nn.Dropout(drop)
head_layers['proj'] = nn.Linear(prev_chs, embed_dim)
elif proj == 'mlp':
head_layers['mlp'] = Mlp(prev_chs, 2 * embed_dim, embed_dim, drop=drop)
self.head = nn.Sequential(head_layers)
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
""" lock modules
Args:
unlocked_groups (int): leave last n layer groups unlocked (default: 0)
"""
if not unlocked_groups:
# lock full model
for param in self.trunk.parameters():
param.requires_grad = False
if freeze_bn_stats:
freeze_batch_norm_2d(self.trunk)
else:
# NOTE: partial freeze requires latest timm (master) branch and is subject to change
try:
# FIXME import here until API stable and in an official release
from timm.models.helpers import group_parameters, group_modules
except ImportError:
raise RuntimeError(
'Please install latest timm `pip install git+https://github.com/rwightman/pytorch-image-models`')
matcher = self.trunk.group_matcher()
gparams = group_parameters(self.trunk, matcher)
max_layer_id = max(gparams.keys())
max_layer_id = max_layer_id - unlocked_groups
for group_idx in range(max_layer_id + 1):
group = gparams[group_idx]
for param in group:
self.trunk.get_parameter(param).requires_grad = False
if freeze_bn_stats:
gmodules = group_modules(self.trunk, matcher, reverse=True)
gmodules = {k for k, v in gmodules.items() if v <= max_layer_id}
freeze_batch_norm_2d(self.trunk, gmodules)
def forward(self, x):
x = self.trunk(x)
x = self.head(x)
return x
| 4,300 | 39.196262 | 119 | py |
DNN_Rover | DNN_Rover-master/rover/Data.py | import time
import numpy as np
import h5py
import progressbar
import datetime
import tflearn
from tflearn.layers.core import input_data
import torchvision.models as models
from NetworkSwitch import *
import torch
import torch.nn as nn
from scipy.misc import imresize
from skimage.transform import resize
class Data():
def __init__(self, driver_name, rover_name, save_data, framework,
filename, network_name, input_shape, normalization, norm_vals,
num_out, image_type):
self.angles = []
self.images = []
self.start = time.time()
self.names = driver_name + '_' + rover_name
self.save_data = save_data
self.framework = framework
self.filename = filename
self.network_name =network_name
self.input_shape = input_shape
self.normalization = normalization
self.norm_vals = norm_vals
self.num_out = num_out
self.image_type = image_type
def load_network(self):
if self.framework in ['tf', 'TF']:
if self.network_name in ['ResNet34',
'ResNet26',
'ResNeXt34',
'ResNeXt26']:
tflearn.config.init_training_mode()
self.network_name = modelswitch[self.network_name]
self.network = input_data(shape=self.input_shape)
self.network = self.network_name(self.network,
self.num_out,
drop_prob=1.0)
self.model = tflearn.DNN(self.network)
self.model.load(self.filename)
elif self.framework in ['PT', 'pt']:
self.network_name = models.__dict__[self.network_name]
self.model=self.network_name()
self.model.fc = nn.Linear(512, self.num_out)
self.model.cuda()
self.model.load_state_dict(torch.load(self.filename))
self.model.eval()
return
def predict(self, s):
if self.framework in ['tf', 'TF']:
s = s[None, 110:, ...]
if self.image_type in ['grayscale', 'framestack']:
s = np.mean(s, 3, keepdims=True)
if self.image_type in ['framestack']:
current = s
self.framestack = np.concatenate((current,
self.framestack[:, :, :, 1:]), 3)
s = self.framestack[:, :, :, self.stack]
out = self.model.predict(s)
elif self.framework in ['pt', 'PT']:
out = resize(s, (224, 224)).transpose((2, 0, 1))[None,...]
out = torch.from_numpy(out).float().cuda()
out = self.model(out).detach().cpu().numpy()[0, :]
return np.argmax(out)
def normalize(self, x):
if self.normalization is not None:
if self.normalization == 'instance_norm':
x = (x - np.mean(x)) / (np.std(x) + 1e-6)
elif self.normalization == 'channel_norm':
for j in range(x.shape[-1]):
x[..., j] -= self.norm_vals[j]
return x
def add_data(self, image, action):
self.angles.append(action)
self.images.append(image)
print('Collecting Data')
return
def save(self):
if self.save_data in ['y', 'Y', 'yes', 'Yes']:
print('Saving the Training Data you collected.')
self.images = np.array(self.images, dtype='uint8')
self.angles = np.array(self.angles, dtype='float16')
elapsedTime = int(time.time() - self.start)
dset_name = str(elapsedTime) + "seconds_" + self.names + ".h5"
h5f = h5py.File(dset_name, 'w')
h5f.create_dataset('X', data=self.images)
h5f.create_dataset('Y', data=self.angles)
h5f.close()
return
| 3,962 | 35.027273 | 80 | py |
Rep-Learning | Rep-Learning-main/nips_supp/mom_iota.py | from scipy.io import loadmat
import numpy as np
# import torch
# import torch.nn as nn
# import torch.optim as optim
# from torchvision import models
# import torch.utils.data
# from torch.utils import data
# from torch.utils.data import DataLoader, TensorDataset
import scipy
from scipy.optimize import minimize
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import shelve
N = 10
pf = .3
pt = .2
rf = int(N*pf)
rt = int(N*pt)
iota = 0.01*np.arange(101)
err = np.zeros((101,))
for idx in range(101):
i = iota[idx]
bSi = np.concatenate( (np.ones((rf,)), i * np.ones((N-rf,))) )
B = np.concatenate( (np.ones((rt,)), i * np.ones((N-rt,))) )
r1 = np.sum(B*bSi)
sf = np.sum(bSi)
st = np.sum(B)
err[idx] = np.sqrt(sf)*r1 + np.sqrt(st)
#save
plt.plot(iota,err/np.max(err),'b',linewidth = 2)
plt.xlabel(r'$\iota$',fontsize=25)
plt.ylabel(r'$||\hat\mathbf{M} - \mathbf{M}||$',fontsize=25)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.tight_layout()
plt.grid(True)
#plt.savefig('align_err_YS2.pdf')
plt.savefig('align_err_YS2.eps', format='eps')
plt.show()
| 1,116 | 23.822222 | 66 | py |
Rep-Learning | Rep-Learning-main/nips_supp/SVM_MNIST.py | import numpy as np
import numpy.linalg as npl
import matplotlib.pyplot as plt
from keras.datasets import mnist
import cvxpy as cp
(train_X, train_y), (test_X, test_y) = mnist.load_data()
train_X=train_X.astype(float)
test_X=test_X.astype(float)
for i in range(len(train_X)):
train_X[i]-=np.mean(train_X[i])
train_X[i]/=np.std(train_X[i])
for i in range(len(test_X)):
test_X[i]-=np.mean(test_X[i])
test_X[i]/=np.std(test_X[i])
trX=train_X.reshape([60000,784])
tsX=test_X.reshape([10000,784])
i1=0
i2=2
MAX=10
NUM=int(MAX*(MAX-1)/2)
ctr=0
BT=np.zeros((NUM,784))
BTC=np.zeros((NUM,784))
COV=np.zeros((NUM,784,784))
batch_sz = 800
for i1 in np.arange(MAX):
for i2 in np.arange(i1+1,MAX):
print(ctr)
ly1=train_y==i1
ly2=train_y==i2
lt1=test_y==i1
lt2=test_y==i2
tr1=trX[ly1]+0.
tr2=trX[ly2]+0.
ts1=tsX[lt1]+0.
ts2=tsX[lt2]+0.
y=-np.ones((np.sum(ly1)+np.sum(ly2>0)))
y[:np.sum(ly1)]=1
yt=-np.ones((np.sum(lt1)+np.sum(lt2>0)))
yt[:np.sum(lt1)]=1
ts12=np.concatenate([ts1,ts2])
tr12=np.concatenate([tr1,tr2])
batch = np.random.choice(tr12.shape[0],batch_sz)
print("batch")
print(batch[:10])
tr_b = tr12[batch,:]
y_b = y[batch]
w = cp.Variable(784)
objective = cp.Minimize(cp.sum_squares(w))
constraints = [1 <= cp.multiply(y_b, cp.matmul(tr_b,w))]
#objective = cp.Minimize(cp.sum_squares(w) + 0.1 * cp.sum(cp.pos(1 - cp.multiply(y_b, cp.matmul(tr_b,w)))))
#constraints = []
prob = cp.Problem(objective, constraints)
#result = prob.solve()
result = prob.solve(solver=cp.CVXOPT,abstol = 1e-4)
#result = prob.solve(solver=cp.CBC,maximumSeconds = 10)
bt = w.value
#print(cp.matmul(tr_b,w.value).shape)
#print(y_b.shape)
#print(cp.multiply(y_b, cp.matmul(tr_b,w.value)).value)
#ytest = y*cp.matmul(tr12,w)
#print(ytest.shape)
#bt = np.mean(ts1, axis=0) - np.mean(ts2, axis=0)
#bt=npl.pinv(tr12).dot(y)
COV12=ts12.T.dot(ts12)/len(ts12)
COV[ctr]=COV12
V,EIG,_=npl.svd(COV12)
SQ=V.dot(np.diag(np.sqrt(EIG)).dot(V.T))
BT[ctr]=bt
btc=SQ.dot(bt)
BTC[ctr]=btc
ctr+=1
np.save('BT',BT)
np.save('COV',COV)
for i in range(NUM):
BT[i,:]/=npl.norm(BT[i,:])
BTC[i,:]/=npl.norm(BTC[i,:])
COV_BT=BTC.T.dot(BTC)/NUM
COV_F=np.sum(COV,0)/NUM
EIG_BT=npl.eig(COV_BT)[0]
EIG_F=npl.eig(COV_F)[0]
V,EIG_BT,_=npl.svd(COV_BT)
SQ=V.dot(np.diag(np.sqrt(EIG_BT)).dot(V.T))
PROD=SQ.dot(COV_F).dot(SQ.T)
ID=np.identity(784)
PRODID=SQ.dot(ID).dot(SQ.T)
print('Identity alignment',np.sum(np.real(npl.eig(PRODID)[0]))/npl.norm(EIG_BT)/np.sqrt(784))
print('Canonical alignment',np.sum(np.real(npl.eig(PROD)[0]))/npl.norm(EIG_F)/npl.norm(EIG_BT))
COV_BT=BT.T.dot(BT)/NUM
COV_F=np.sum(COV,0)/NUM
EIG_BT=npl.eig(COV_BT)[0]
EIG_F=npl.eig(COV_F)[0]
V,EIG_BT,_=npl.svd(COV_BT)
SQ=V.dot(np.diag(np.sqrt(EIG_BT)).dot(V.T))
PROD=SQ.dot(COV_F).dot(SQ.T)
ID=np.identity(784)
PRODID=SQ.dot(ID).dot(SQ.T)
print('Identity alignment',np.sum(np.real(npl.eig(PRODID)[0]))/npl.norm(EIG_BT)/np.sqrt(784))
print('Beta alignment',np.sum(np.real(npl.eig(PROD)[0]))/npl.norm(EIG_F)/npl.norm(EIG_BT))
| 3,339 | 28.298246 | 115 | py |
FORK | FORK-master/TD3-FORK/TD3_FORK.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, action_dim)
self.max_action = max_action
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
return self.max_action * torch.tanh(self.l3(a))
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, 1)
# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, 256)
self.l5 = nn.Linear(256, 256)
self.l6 = nn.Linear(256, 1)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(sa))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
class Sys_R(nn.Module):
def __init__(self,state_dim, action_dim, fc1_units, fc2_units):
super(Sys_R, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(2 * state_dim + action_dim, fc1_units)
self.l2 = nn.Linear(fc1_units,fc2_units)
self.l3 = nn.Linear(fc2_units, 1)
def forward(self, state,next_state, action):
sa = torch.cat([state,next_state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
class SysModel(nn.Module):
def __init__(self, state_size, action_size, fc1_units, fc2_units):
super(SysModel, self).__init__()
self.l1 = nn.Linear(state_size + action_size, fc1_units)
self.l2 = nn.Linear(fc1_units, fc2_units)
self.l3 = nn.Linear(fc2_units, state_size)
def forward(self, state, action):
"""Build a system model to predict the next state at a given state."""
xa = torch.cat([state, action], 1)
x1 = F.relu(self.l1(xa))
x1 = F.relu(self.l2(x1))
x1 = self.l3(x1)
return x1
class TD3_FORK(object):
def __init__(
self,
env,
policy,
state_dim,
action_dim,
max_action,
sys1_units = 400,
sys2_units = 300,
r1_units = 256,
r2_units = 256,
discount=0.99,
tau=0.005,
policy_noise=0.2,
noise_clip=0.5,
policy_freq=2,
sys_weight = 0.5,
sys_weight2 = 0.4,
sys_threshold = 0.020,
):
self.env = env
self.policy = policy
self.actor = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target = copy.deepcopy(self.actor)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4)
self.critic = Critic(state_dim, action_dim).to(device)
self.critic_target = copy.deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=3e-4)
self.sysmodel = SysModel(state_dim, action_dim, sys1_units,sys2_units).to(device)
self.sysmodel_optimizer = torch.optim.Adam(self.sysmodel.parameters(), lr=3e-4)
self.sysmodel.apply(self.init_weights)
self.sysr = Sys_R(state_dim, action_dim, r1_units, r2_units).to(device)
self.sysr_optimizer = torch.optim.Adam(self.sysr.parameters(), lr=3e-4)
self.obs_upper_bound = float(self.env.observation_space.high[0]) #state space upper bound
self.obs_lower_bound = float(self.env.observation_space.low[0]) #state space lower bound
self.reward_lower_bound = 0
self.reward_upper_bound = 0
if self.obs_upper_bound == float('inf'):
self.obs_upper_bound,self.obs_lower_bound = 0,0
self.sysmodel_loss = 0
self.sysr_loss = 0
self.max_action = max_action
self.discount = discount
self.tau = tau
self.policy_noise = policy_noise
self.noise_clip = noise_clip
self.policy_freq = policy_freq
self.sys_weight = sys_weight
self.sys_weight2 = sys_weight2
self.sys_threshold = sys_threshold
self.total_it = 0
def init_weights(self,m):
if type(m) == nn.Linear:
torch.nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.001)
def select_action(self, state):
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
return self.actor(state).cpu().data.numpy().flatten()
def train(self, replay_buffer, batch_size=100,train_steps=1):
for _ in range(train_steps):
self.total_it += 1
# Sample replay buffer
state, action, next_state, reward, not_done = replay_buffer.sample(batch_size)
with torch.no_grad():
# Select action according to policy and add clipped noise
noise = (
torch.randn_like(action) * self.policy_noise
).clamp(-self.noise_clip, self.noise_clip)
next_action = (
self.actor_target(next_state) + noise
).clamp(-self.max_action, self.max_action)
# Compute the target Q value
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward + not_done * self.discount * target_Q
# Get current Q estimates
current_Q1, current_Q2 = self.critic(state, action)
# Compute critic loss
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
#Train system and reward model
predict_next_state = self.sysmodel(state, action)
predict_next_state = predict_next_state.clamp(self.obs_lower_bound,self.obs_upper_bound)
sysmodel_loss = F.smooth_l1_loss(predict_next_state, next_state.detach())
self.sysmodel_optimizer.zero_grad()
sysmodel_loss.backward()
self.sysmodel_optimizer.step()
self.sysmodel_loss = sysmodel_loss.item()
predict_reward = self.sysr(state,next_state,action)
sysr_loss = F.mse_loss(predict_reward, reward.detach())
self.sysr_optimizer.zero_grad()
sysr_loss.backward()
self.sysr_optimizer.step()
self.sysr_loss = sysr_loss.item()
s_flag = 1 if sysmodel_loss.item() < self.sys_threshold else 0
#Delayed policy updates
if self.total_it % self.policy_freq == 0:
#Compute actor losse
actor_loss1 = -self.critic.Q1(state, self.actor(state)).mean()
if s_flag == 1:
p_next_state = self.sysmodel(state, self.actor(state))
p_next_state = p_next_state.clamp(self.obs_lower_bound,self.obs_upper_bound)
actions2 = self.actor(p_next_state.detach())
if self.policy in ['TD3_FORK_Q','TD3_FORK_Q_F','TD3_FORK_DQ','TD3_FORK_DQ_F']:
actor_loss2 = self.critic.Q1(p_next_state.detach(),actions2)
if self.policy in ['TD3_FORK_DQ','TD3_FORK_DQ_F']:
p_next_state2 = self.sysmodel(p_next_state, self.actor(p_next_state.detach()))
p_next_state2 = p_next_state2.clamp(self.obs_lower_bound,self.obs_upper_bound)
actions3 = self.actor(p_next_state2.detach())
actor_loss22 = self.critic.Q1(p_next_state2.detach(),actions3)
actor_loss3 = - actor_loss2.mean() - self.sys_weight2 * actor_loss22.mean()
else:
actor_loss3 = - actor_loss2.mean()
elif self.policy in ['TD3_FORK_S','TD3_FORK_S_F','TD3_FORK','TD3_FORK_F']:
p_next_r = self.sysr(state,p_next_state.detach(),self.actor(state))
if self.policy in ['TD3_FORK_S','TD3_FORK_S_F']:
actor_loss2 = self.critic.Q1(p_next_state.detach(),actions2)
actor_loss3 = -(p_next_r + self.discount * actor_loss2).mean()
else:
p_next_state2 = self.sysmodel(p_next_state, self.actor(p_next_state.detach()))
p_next_state2 = p_next_state2.clamp(self.obs_lower_bound,self.obs_upper_bound)
p_next_r2 = self.sysr(p_next_state.detach(),p_next_state2.detach(),self.actor(p_next_state.detach()))
actions3 = self.actor(p_next_state2.detach())
actor_loss2 = self.critic.Q1(p_next_state2.detach(),actions3)
actor_loss3 = -(p_next_r + self.discount * p_next_r2 + self.discount ** 2 * actor_loss2).mean()
actor_loss = (actor_loss1 + self.sys_weight * actor_loss3)
self.update_sys += 1
else:
actor_loss = actor_loss1
# Optimize the actor
self.critic_optimizer.zero_grad()
self.sysmodel_optimizer.zero_grad()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Update the frozen target models
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def save(self, filename):
torch.save(self.critic.state_dict(), filename + "_critic")
torch.save(self.critic_optimizer.state_dict(), filename + "_critic_optimizer")
torch.save(self.actor.state_dict(), filename + "_actor")
torch.save(self.actor_optimizer.state_dict(), filename + "_actor_optimizer")
torch.save(self.sysmodel.state_dict(), filename + "_sysmodel")
torch.save(self.sysmodel_optimizer.state_dict(), filename + "_sysmodel_optimizer")
torch.save(self.sysr.state_dict(), filename + "_reward_model")
torch.save(self.sysr_optimizer.state_dict(), filename + "_reward_model_optimizer")
def load(self, filename):
self.critic.load_state_dict(torch.load(filename + "_critic.pth"))
self.critic_optimizer.load_state_dict(torch.load(filename + "_critic_optimizer"))
self.critic_target = copy.deepcopy(self.critic)
self.actor.load_state_dict(torch.load(filename + "_actor.pth"))
self.actor_optimizer.load_state_dict(torch.load(filename + "_actor_optimizer"))
self.actor_target = copy.deepcopy(self.actor)
self.sysmodel.load_state_dict(torch.load(filename + "_sysmodel.pth"))
relf.sysmodel_optimizer.load_state_dict(torch.load(filename + "_sysmodel_optimizer"))
self.sysr.load_state_dict(torch.load(filename + "_reward_model.pth"))
relf.sysr_optimizer.load_state_dict(torch.load(filename + "_reward_model_optimizer"))
| 10,157 | 31.14557 | 108 | py |
FORK | FORK-master/TD3-FORK/utils.py | import numpy as np
import torch
import math
class ReplayBuffer(object):
def __init__(self, state_dim, action_dim, max_size=int(1e6)):
self.max_size = max_size
self.ptr = 0
self.size = 0
self.state = np.zeros((max_size, state_dim))
self.action = np.zeros((max_size, action_dim))
self.next_state = np.zeros((max_size, state_dim))
self.reward = np.zeros((max_size, 1))
self.not_done = np.zeros((max_size, 1))
self.welford_state_n = 1
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def normalize_state(self, states, update=True):
"""
Use Welford's algorithm to normalize a state, and optionally update the statistics
for normalizing states using the new state, online.
"""
states = torch.Tensor(states)
states2 = states.data.clone()
ii = 0
for state in states:
if self.welford_state_n == 1:
self.welford_state_mean = torch.zeros(state.size(-1))
self.welford_state_mean_diff = torch.ones(state.size(-1))
if update:
if len(state.size()) == 1: # if we get a single state vector
state_old = self.welford_state_mean
self.welford_state_mean += (state - state_old) / self.welford_state_n
self.welford_state_mean_diff += (state - state_old) * (state - state_old)
self.welford_state_n += 1
else:
raise RuntimeError # this really should not happen
states2[ii] = (state - self.welford_state_mean) / np.sqrt(self.welford_state_mean_diff / self.welford_state_n)
ii += 1
return states2
def add(self, state, action, next_state, reward, done):
self.state[self.ptr] = state
self.action[self.ptr] = action
self.next_state[self.ptr] = next_state
self.reward[self.ptr] = reward
self.not_done[self.ptr] = 1. - done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample(self, batch_size):
ind = np.random.randint(0,int(self.size), size=batch_size)
return (
torch.FloatTensor(self.state[ind]).to(self.device),
#torch.FloatTensor(self.normalize_state(self.state[ind])).to(self.device),
torch.FloatTensor(self.action[ind]).to(self.device),
torch.FloatTensor(self.next_state[ind]).to(self.device),
torch.FloatTensor(self.reward[ind]).to(self.device),
torch.FloatTensor(self.not_done[ind]).to(self.device)
)
def create_log_gaussian(mean, log_std, t):
quadratic = -((0.5 * (t - mean) / (log_std.exp())).pow(2))
l = mean.shape
log_z = log_std
z = l[-1] * math.log(2 * math.pi)
log_p = quadratic.sum(dim=-1) - log_z.sum(dim=-1) - 0.5 * z
return log_p
def logsumexp(inputs, dim=None, keepdim=False):
if dim is None:
inputs = inputs.view(-1)
dim = 0
s, _ = torch.max(inputs, dim=dim, keepdim=True)
outputs = s + (inputs - s).exp().sum(dim=dim, keepdim=True).log()
if not keepdim:
outputs = outputs.squeeze(dim)
return outputs
def soft_update(target, source, tau):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
def hard_update(target, source):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
| 3,165 | 32.680851 | 113 | py |
FORK | FORK-master/TD3-FORK/TD3.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Implementation of Twin Delayed Deep Deterministic Policy Gradients (TD3)
# Paper: https://arxiv.org/abs/1802.09477
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, action_dim)
self.max_action = max_action
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
return self.max_action * torch.tanh(self.l3(a))
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, 1)
# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, 256)
self.l5 = nn.Linear(256, 256)
self.l6 = nn.Linear(256, 1)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(sa))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
class TD3(object):
def __init__(
self,
state_dim,
action_dim,
max_action,
discount=0.99,
tau=0.005,
policy_noise=0.2,
noise_clip=0.5,
policy_freq=2
):
self.actor = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target = copy.deepcopy(self.actor)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4)
self.critic = Critic(state_dim, action_dim).to(device)
self.critic_target = copy.deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=3e-4)
self.max_action = max_action
self.discount = discount
self.tau = tau
self.policy_noise = policy_noise
self.noise_clip = noise_clip
self.policy_freq = policy_freq
self.total_it = 0
def select_action(self, state):
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
return self.actor(state).cpu().data.numpy().flatten()
def train(self, replay_buffer, batch_size=100):
self.total_it += 1
# Sample replay buffer
state, action, next_state, reward, not_done = replay_buffer.sample(batch_size)
with torch.no_grad():
# Select action according to policy and add clipped noise
noise = (
torch.randn_like(action) * self.policy_noise
).clamp(-self.noise_clip, self.noise_clip)
next_action = (
self.actor_target(next_state) + noise
).clamp(-self.max_action, self.max_action)
# Compute the target Q value
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward + not_done * self.discount * target_Q
# Get current Q estimates
current_Q1, current_Q2 = self.critic(state, action)
# Compute critic loss
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Delayed policy updates
if self.total_it % self.policy_freq == 0:
# Compute actor losse
actor_loss = -self.critic.Q1(state, self.actor(state)).mean()
# Optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Update the frozen target models
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def save(self, filename):
torch.save(self.critic.state_dict(), filename + "_critic")
torch.save(self.critic_optimizer.state_dict(), filename + "_critic_optimizer")
torch.save(self.actor.state_dict(), filename + "_actor")
torch.save(self.actor_optimizer.state_dict(), filename + "_actor_optimizer")
def load(self, filename):
self.critic.load_state_dict(torch.load(filename + "_critic"))
self.critic_optimizer.load_state_dict(torch.load(filename + "_critic_optimizer"))
self.critic_target = copy.deepcopy(self.critic)
self.actor.load_state_dict(torch.load(filename + "_actor"))
self.actor_optimizer.load_state_dict(torch.load(filename + "_actor_optimizer"))
self.actor_target = copy.deepcopy(self.actor)
| 4,752 | 26.473988 | 93 | py |
FORK | FORK-master/TD3-FORK/main_td3_fork.py | import numpy as np
import torch
import gym
import argparse
import os
import copy
import utils
import TD3
import pandas as pd
import json,os
import TD3_FORK
def eval_policy(policy, env_name,eval_episodes=10):
eval_env = gym.make(env_name)
avg_reward = 0.
for _ in range(eval_episodes):
state, done = eval_env.reset(), False
while not done:
action = policy.select_action(np.array(state))
state, reward, done, _ = eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
print("---------------------------------------")
print(f"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}")
print("---------------------------------------")
return avg_reward
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--policy", default="TD3") # Policy name (TD3,or TD3_FORK,TD3_FORK_Q,TD3_FORK_DQ,TD3_FORK_S)
parser.add_argument("--env", default="HalfCheetah-v2") # OpenAI gym environment name
parser.add_argument("--seed", default=0, type=int) # Sets Gym, PyTorch and Numpy seeds
parser.add_argument("--start_timesteps", default=1e4, type=int) # Time steps initial random policy is used
parser.add_argument("--eval_freq", default=5e3, type=int) # How often (time steps) we evaluate
parser.add_argument("--max_timesteps", default=1e6, type=int) # Max time steps to run environment
parser.add_argument("--expl_noise", default=0.1) # Std of Gaussian exploration noise
parser.add_argument("--batch_size", default=100, type=int) # Batch size for both actor and critic
parser.add_argument("--max_reward", default=100, type=int) # max_reward for dynamic weight
parser.add_argument("--discount", default=0.99) # Discount factor
parser.add_argument("--tau", default=0.005) # Target network update rate
parser.add_argument("--policy_noise", default=0.2,type=float) # Noise added to target policy during critic update
parser.add_argument("--noise_clip", default=0.5,type=float) # Range to clip target policy noise
parser.add_argument("--policy_freq", default=2, type=int) # Frequency of delayed policy updates
parser.add_argument("--sys_neurons1", default=400, type=int) #units of the first layer in system model
parser.add_argument("--sys_neurons2", default=300, type=int) #units of the second layer in system model
parser.add_argument("--r_neurons1", default=256, type=int) #units of the first layer in reward model
parser.add_argument("--r_neurons2", default=256, type=int) #units of the second layer in reward model
parser.add_argument("--save_model", default="False") # Save model and optimizer parameters
parser.add_argument("--load_model", default="") # Model load file name, "" doesn't load, "default" uses file_name
parser.add_argument("--training_mode", default="Online") #training_mode Offline or Online
parser.add_argument("--sys_weight", default=0.5,type=float) # weight for FORK
parser.add_argument("--sys_weight2", default=0.4,type=float) # weight for FORK-DQ
parser.add_argument("--base_weight", default=0.6,type=float) # base weight if using dynamic_weight
parser.add_argument("--sys_threshold", default=0.020,type=float) # threshold for FORK
parser.add_argument("--sys_dynamic_weight", default="False") # whether use dynamic weight or not
args = parser.parse_args()
if args.sys_dynamic_weight == 'False':
args.policy = args.policy + '_F'
file_name = f"{args.policy}_{args.env}_{args.seed}_{args.training_mode}"
print("---------------------------------------")
print(f"Policy: {args.policy}, Env: {args.env}, Seed: {args.seed}, Weight: {args.sys_weight},Training_mode: {args.training_mode}, Dynamic_weight: {args.sys_dynamic_weight}")
print("---------------------------------------")
if args.save_model == "True" and not os.path.exists("./models"):
os.makedirs("./models")
env = gym.make(args.env)
# Set seeds
env.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
state_dim = env.observation_space.shape[0]
state_max = env.observation_space.shape
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
kwargs = {
"state_dim": state_dim,
"action_dim": action_dim,
"max_action": max_action,
"discount": args.discount,
"tau": args.tau,
}
# Initialize policy
if args.policy == "TD3":
# Target policy smoothing is scaled wrt the action scale
kwargs["policy_noise"] = args.policy_noise * max_action
kwargs["noise_clip"] = args.noise_clip * max_action
kwargs["policy_freq"] = args.policy_freq
policy = TD3.TD3(**kwargs)
variant = dict(
algorithm='TD3',
env=args.env,
)
elif args.policy in ["TD3_FORK","TD3_FORK_F","TD3_FORK_DQ","TD3_FORK_DQ_F","TD3_FORK_Q","TD3_FORK_Q_F","TD3_FORK_S","TD3_FORK_S_F"]:
# Target policy smoothing is scaled wrt the action scale
kwargs["env"] = env
kwargs["policy"] = args.policy
kwargs["policy_noise"] = args.policy_noise * max_action
kwargs["noise_clip"] = args.noise_clip * max_action
kwargs["policy_freq"] = args.policy_freq
kwargs["sys_weight"] = args.sys_weight
kwargs["sys_weight2"] = args.sys_weight2
kwargs["sys_threshold"] = args.sys_threshold
kwargs["sys1_units"] = args.sys_neurons1
kwargs["sys2_units"] = args.sys_neurons2
kwargs["r1_units"] = args.r_neurons1
kwargs["r2_units"] = args.r_neurons2
policy = TD3_FORK.TD3_FORK(**kwargs)
variant = dict(
algorithm=args.policy,
env=args.env,
sys_weight=args.sys_weight,
sys_threshold=args.sys_threshold,
max_reward=args.max_reward,
sys1_units=args.sys_neurons1,
sys2_units=args.sys_neurons2,
r1_units=args.r_neurons1,
r2_units=args.r_neurons2
)
else:
raise Exception("invaled policy!!!")
if not os.path.exists(f"./data/{args.env}/{args.policy}/seed{args.seed}"):
os.makedirs(f'./data/{args.env}/{args.policy}/seed{args.seed}')
with open(f'./data/{args.env}/{args.policy}/seed{int(args.seed)}/variant.json', 'w') as outfile:
json.dump(variant,outfile)
if args.load_model != "":
policy_file = file_name if args.load_model == "default" else args.load_model
policy.load(f"./models/{policy_file}")
replay_buffer = utils.ReplayBuffer(state_dim, action_dim)
# Evaluate untrained policy
evaluations = [eval_policy(policy, args.env)]
state, done = env.reset(), False
episode_reward = 0
episode_timesteps = 0
episode_num = 0
policy.update_sys = 0 #monitoring how many updated times of FORK
ep_reward_list = []
base_weight = args.base_weight
for t in range(int(args.max_timesteps)):
episode_timesteps += 1
# Select action randomly or according to policy
if t < args.start_timesteps:
action = env.action_space.sample()
else:
action = (
policy.select_action(np.array(state))
+ np.random.normal(0, max_action * args.expl_noise, size=action_dim)
).clip(-max_action, max_action)
# Perform action
next_state, reward, done, _ = env.step(action)
done_bool = float(done) if episode_timesteps < env._max_episode_steps else 0
# Store data in replay buffer
replay_buffer.add(state, action, next_state, reward, done_bool)
state = next_state
# Store observation and reward bounds
policy.obs_upper_bound = np.amax(state) if policy.obs_upper_bound < np.amax(state) else policy.obs_upper_bound
policy.obs_lower_bound = np.amin(state) if policy.obs_lower_bound > np.amin(state) else policy.obs_lower_bound
policy.reward_lower_bound = (reward) if policy.reward_lower_bound > reward else policy.reward_lower_bound
policy.reward_upper_bound = (reward) if policy.reward_upper_bound < reward else policy.reward_upper_bound
episode_reward += reward
# Train agent after collecting sufficient data
if args.training_mode == 'Online':
if t >= args.start_timesteps:
policy.train(replay_buffer, args.batch_size,train_steps = 1)
if done:
ep_reward_list.append(episode_reward)
if args.sys_dynamic_weight == "True":
policy.sys_weight = np.round((1 - np.clip(np.mean(ep_reward_list[-100:])/args.max_reward, 0, 1)),4) * base_weight
if args.policy in ["TD3_FORK","TD3_FORK_F","TD3_FORK_DQ","TD3_FORK_DQ_F","TD3_FORK_Q","TD3_FORK_Q_F","TD3_FORK_S","TD3_FORK_S_F"]:
print(f"Total T: {t+1} Episode Num: {episode_num+1} Episode T: {episode_timesteps} Reward: {episode_reward:.3f} Sysmodel_Loss: {policy.sysmodel_loss} Reward_loss: {policy.sysr_loss} Sys updated times: {policy.update_sys} Sys_weight: {policy.sys_weight}")
policy.update_sys = 0
else:
print(f"Total T: {t+1} Episode Num: {episode_num+1} Episode T: {episode_timesteps} Reward: {episode_reward:.3f}")
if args.training_mode == 'Offline':
if t >= args.start_timesteps:
policy.train(replay_buffer, args.batch_size,train_steps = episode_timesteps)
# Reset environment
state, done = env.reset(), False
episode_reward = 0
episode_timesteps = 0
episode_num += 1
# Evaluate episode
if (t + 1) % args.eval_freq == 0:
evaluations.append(eval_policy(policy, args.env))
if args.save_model == "True":
policy.save(f"./models/{file_name}")
data = np.array(evaluations)
df = pd.DataFrame(data=data,columns=["Average Return"]).reset_index()
df['Timesteps'] = df['index'] * args.eval_freq
df['env'] = args.env
df['algorithm_name'] = args.policy
df.to_csv(f'./data/{args.env}/{args.policy}/seed{args.seed}/progress.csv', index = False)
| 9,548 | 44.255924 | 258 | py |
FORK | FORK-master/SAC-FORK/SAC.py | import os
import torch
import torch.nn.functional as F
from torch.optim import Adam
from utils import soft_update, hard_update
from model import GaussianPolicy, QNetwork, DeterministicPolicy
class SAC(object):
def __init__(self, num_inputs, action_space, args):
self.gamma = args.gamma
self.tau = args.tau
self.alpha = args.alpha
self.policy_type = args.policy_type
self.target_update_interval = args.target_update_interval
self.automatic_entropy_tuning = args.automatic_entropy_tuning
self.device = torch.device("cuda" if args.cuda else "cpu")
self.critic = QNetwork(num_inputs, action_space.shape[0], args.hidden_size).to(device=self.device)
self.critic_optim = Adam(self.critic.parameters(), lr=args.lr)
self.critic_target = QNetwork(num_inputs, action_space.shape[0], args.hidden_size).to(self.device)
hard_update(self.critic_target, self.critic)
self.obs_upper_bound,self.obs_lower_bound = 0,0
self.reward_lower_bound,self.reward_upper_bound=0,0
if self.policy_type == "Gaussian":
# Target Entropy = −dim(A) (e.g. , -6 for HalfCheetah-v2) as given in the paper
if self.automatic_entropy_tuning is True:
self.target_entropy = -torch.prod(torch.Tensor(action_space.shape).to(self.device)).item()
self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device)
self.alpha_optim = Adam([self.log_alpha], lr=args.lr)
self.policy = GaussianPolicy(num_inputs, action_space.shape[0], args.hidden_size, action_space).to(self.device)
self.policy_optim = Adam(self.policy.parameters(), lr=args.lr)
else:
self.alpha = 0
self.automatic_entropy_tuning = False
self.policy = DeterministicPolicy(num_inputs, action_space.shape[0], args.hidden_size, action_space).to(self.device)
self.policy_optim = Adam(self.policy.parameters(), lr=args.lr)
def select_action(self, state, evaluate=False):
state = torch.FloatTensor(state).to(self.device).unsqueeze(0)
if evaluate is False:
action, _, _ = self.policy.sample(state)
else:
_, _, action = self.policy.sample(state)
return action.detach().cpu().numpy()[0]
def update_parameters(self, memory, batch_size, updates):
# Sample a batch from memory
state_batch, action_batch, reward_batch, next_state_batch, mask_batch = memory.sample(batch_size=batch_size)
state_batch = torch.FloatTensor(state_batch).to(self.device)
next_state_batch = torch.FloatTensor(next_state_batch).to(self.device)
action_batch = torch.FloatTensor(action_batch).to(self.device)
reward_batch = torch.FloatTensor(reward_batch).to(self.device).unsqueeze(1)
mask_batch = torch.FloatTensor(mask_batch).to(self.device).unsqueeze(1)
with torch.no_grad():
next_state_action, next_state_log_pi, _ = self.policy.sample(next_state_batch)
qf1_next_target, qf2_next_target = self.critic_target(next_state_batch, next_state_action)
min_qf_next_target = torch.min(qf1_next_target, qf2_next_target) - self.alpha * next_state_log_pi
next_q_value = reward_batch + mask_batch * self.gamma * (min_qf_next_target)
qf1, qf2 = self.critic(state_batch, action_batch) # Two Q-functions to mitigate positive bias in the policy improvement step
qf1_loss = F.mse_loss(qf1, next_q_value) # JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
qf2_loss = F.mse_loss(qf2, next_q_value) # JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
qf_loss = qf1_loss + qf2_loss
self.critic_optim.zero_grad()
qf_loss.backward()
self.critic_optim.step()
pi, log_pi, _ = self.policy.sample(state_batch)
qf1_pi, qf2_pi = self.critic(state_batch, pi)
min_qf_pi = torch.min(qf1_pi, qf2_pi)
policy_loss = ((self.alpha * log_pi) - min_qf_pi).mean() # Jπ = 𝔼st∼D,εt∼N[α * logπ(f(εt;st)|st) − Q(st,f(εt;st))]
self.policy_optim.zero_grad()
policy_loss.backward()
self.policy_optim.step()
if self.automatic_entropy_tuning:
alpha_loss = -(self.log_alpha * (log_pi + self.target_entropy).detach()).mean()
self.alpha_optim.zero_grad()
alpha_loss.backward()
self.alpha_optim.step()
self.alpha = self.log_alpha.exp()
alpha_tlogs = self.alpha.clone() # For TensorboardX logs
else:
alpha_loss = torch.tensor(0.).to(self.device)
alpha_tlogs = torch.tensor(self.alpha) # For TensorboardX logs
if updates % self.target_update_interval == 0:
soft_update(self.critic_target, self.critic, self.tau)
return qf1_loss.item(), qf2_loss.item(), policy_loss.item(), alpha_loss.item(), alpha_tlogs.item()
# Save model parameters
def save(self, filename):
torch.save(self.critic.state_dict(), filename + "_critic")
torch.save(self.critic_optim.state_dict(), filename + "_critic_optimizer")
torch.save(self.policy.state_dict(), filename + "_actor")
torch.save(self.policy_optim.state_dict(), filename + "_actor_optimizer")
def load(self, filename):
self.critic.load_state_dict(torch.load(filename + "_critic.pth"))
self.critic_optim.load_state_dict(torch.load(filename + "_critic_optimizer"))
self.critic_target = copy.deepcopy(self.critic)
self.policy.load_state_dict(torch.load(filename + "_actor.pth"))
self.policy_optim.load_state_dict(torch.load(filename + "_actor_optimizer"))
| 5,765 | 45.128 | 133 | py |
FORK | FORK-master/SAC-FORK/utils.py | import numpy as np
import torch
import math
class ReplayBuffer(object):
def __init__(self, state_dim, action_dim, max_size=int(1e6)):
self.max_size = max_size
self.ptr = 0
self.size = 0
self.state = np.zeros((max_size, state_dim))
self.action = np.zeros((max_size, action_dim))
self.next_state = np.zeros((max_size, state_dim))
self.reward = np.zeros((max_size, 1))
self.not_done = np.zeros((max_size, 1))
self.welford_state_n = 1
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def normalize_state(self, states, update=True):
"""
Use Welford's algorithm to normalize a state, and optionally update the statistics
for normalizing states using the new state, online.
"""
states = torch.Tensor(states)
states2 = states.data.clone()
ii = 0
for state in states:
if self.welford_state_n == 1:
self.welford_state_mean = torch.zeros(state.size(-1))
self.welford_state_mean_diff = torch.ones(state.size(-1))
if update:
if len(state.size()) == 1: # if we get a single state vector
state_old = self.welford_state_mean
self.welford_state_mean += (state - state_old) / self.welford_state_n
self.welford_state_mean_diff += (state - state_old) * (state - state_old)
self.welford_state_n += 1
else:
raise RuntimeError # this really should not happen
states2[ii] = (state - self.welford_state_mean) / np.sqrt(self.welford_state_mean_diff / self.welford_state_n)
ii += 1
return states2
def add(self, state, action, next_state, reward, done):
self.state[self.ptr] = state
self.action[self.ptr] = action
self.next_state[self.ptr] = next_state
self.reward[self.ptr] = reward
self.not_done[self.ptr] = 1. - done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample(self, batch_size):
ind = np.random.randint(0,int(self.size), size=batch_size)
return (
torch.FloatTensor(self.state[ind]).to(self.device),
#torch.FloatTensor(self.normalize_state(self.state[ind])).to(self.device),
torch.FloatTensor(self.action[ind]).to(self.device),
torch.FloatTensor(self.next_state[ind]).to(self.device),
torch.FloatTensor(self.reward[ind]).to(self.device),
torch.FloatTensor(self.not_done[ind]).to(self.device)
)
def create_log_gaussian(mean, log_std, t):
quadratic = -((0.5 * (t - mean) / (log_std.exp())).pow(2))
l = mean.shape
log_z = log_std
z = l[-1] * math.log(2 * math.pi)
log_p = quadratic.sum(dim=-1) - log_z.sum(dim=-1) - 0.5 * z
return log_p
def logsumexp(inputs, dim=None, keepdim=False):
if dim is None:
inputs = inputs.view(-1)
dim = 0
s, _ = torch.max(inputs, dim=dim, keepdim=True)
outputs = s + (inputs - s).exp().sum(dim=dim, keepdim=True).log()
if not keepdim:
outputs = outputs.squeeze(dim)
return outputs
def soft_update(target, source, tau):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
def hard_update(target, source):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
| 3,165 | 32.680851 | 113 | py |
FORK | FORK-master/SAC-FORK/model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
LOG_SIG_MAX = 2
LOG_SIG_MIN = -20
epsilon = 1e-6
# Initialize Policy weights
def weights_init_(m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight, gain=1)
torch.nn.init.constant_(m.bias, 0)
class ValueNetwork(nn.Module):
def __init__(self, num_inputs, hidden_dim):
super(ValueNetwork, self).__init__()
self.linear1 = nn.Linear(num_inputs, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, 1)
self.apply(weights_init_)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
x = self.linear3(x)
return x
class QNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim):
super(QNetwork, self).__init__()
# Q1 architecture
self.linear1 = nn.Linear(num_inputs + num_actions, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, 1)
# Q2 architecture
self.linear4 = nn.Linear(num_inputs + num_actions, hidden_dim)
self.linear5 = nn.Linear(hidden_dim, hidden_dim)
self.linear6 = nn.Linear(hidden_dim, 1)
self.apply(weights_init_)
def forward(self, state, action):
xu = torch.cat([state, action], 1)
x1 = F.relu(self.linear1(xu))
x1 = F.relu(self.linear2(x1))
x1 = self.linear3(x1)
x2 = F.relu(self.linear4(xu))
x2 = F.relu(self.linear5(x2))
x2 = self.linear6(x2)
return x1, x2
class GaussianPolicy(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim, action_space=None):
super(GaussianPolicy, self).__init__()
self.linear1 = nn.Linear(num_inputs, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.mean_linear = nn.Linear(hidden_dim, num_actions)
self.log_std_linear = nn.Linear(hidden_dim, num_actions)
self.apply(weights_init_)
# action rescaling
if action_space is None:
self.action_scale = torch.tensor(1.)
self.action_bias = torch.tensor(0.)
else:
self.action_scale = torch.FloatTensor(
(action_space.high - action_space.low) / 2.)
self.action_bias = torch.FloatTensor(
(action_space.high + action_space.low) / 2.)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
mean = self.mean_linear(x)
log_std = self.log_std_linear(x)
log_std = torch.clamp(log_std, min=LOG_SIG_MIN, max=LOG_SIG_MAX)
return mean, log_std
def sample(self, state):
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
x_t = normal.rsample() # for reparameterization trick (mean + std * N(0,1))
y_t = torch.tanh(x_t)
action = y_t * self.action_scale + self.action_bias
log_prob = normal.log_prob(x_t)
# Enforcing Action Bound
log_prob -= torch.log(self.action_scale * (1 - y_t.pow(2)) + epsilon)
log_prob = log_prob.sum(1, keepdim=True)
mean = torch.tanh(mean) * self.action_scale + self.action_bias
return action, log_prob, mean
def to(self, device):
self.action_scale = self.action_scale.to(device)
self.action_bias = self.action_bias.to(device)
return super(GaussianPolicy, self).to(device)
class DeterministicPolicy(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim, action_space=None):
super(DeterministicPolicy, self).__init__()
self.linear1 = nn.Linear(num_inputs, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.mean = nn.Linear(hidden_dim, num_actions)
self.noise = torch.Tensor(num_actions)
self.apply(weights_init_)
# action rescaling
if action_space is None:
self.action_scale = 1.
self.action_bias = 0.
else:
self.action_scale = torch.FloatTensor(
(action_space.high - action_space.low) / 2.)
self.action_bias = torch.FloatTensor(
(action_space.high + action_space.low) / 2.)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
mean = torch.tanh(self.mean(x)) * self.action_scale + self.action_bias
return mean
def sample(self, state):
mean = self.forward(state)
noise = self.noise.normal_(0., std=0.1)
noise = noise.clamp(-0.25, 0.25)
action = mean + noise
return action, torch.tensor(0.), mean
def to(self, device):
self.action_scale = self.action_scale.to(device)
self.action_bias = self.action_bias.to(device)
self.noise = self.noise.to(device)
return super(DeterministicPolicy, self).to(device)
class Sys_R(nn.Module):
def __init__(self,state_dim, action_dim, fc1_units, fc2_units):
super(Sys_R, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(2 * state_dim + action_dim,fc1_units)
self.l2 = nn.Linear(fc1_units, fc2_units)
self.l3 = nn.Linear(fc2_units, 1)
self.apply(weights_init_)
def forward(self, state,next_state, action):
sa = torch.cat([state,next_state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
class SysModel(nn.Module):
def __init__(self, state_size, action_size, fc1_units, fc2_units):
super(SysModel, self).__init__()
self.l1 = nn.Linear(state_size + action_size, fc1_units)
self.l2 = nn.Linear(fc1_units, fc2_units)
self.l3 = nn.Linear(fc2_units, state_size)
self.apply(weights_init_)
def forward(self, state, action):
"""Build a system model to predict the next state at a given state."""
xa = torch.cat([state, action], 1)
x1 = F.relu(self.l1(xa))
x1 = F.relu(self.l2(x1))
x1 = self.l3(x1)
return x1
| 6,142 | 31.162304 | 84 | py |
FORK | FORK-master/SAC-FORK/main_sac_fork.py | import argparse
import datetime
import gym
import numpy as np
import itertools
import os
import json
import pandas as pd
import torch
import SAC
import SAC_FORK
from replay_memory import ReplayMemory
def eval_policy(policy, env_name, eval_episodes=10):
eval_env = gym.make(env_name)
avg_reward = 0.
for _ in range(eval_episodes):
state, done = eval_env.reset(), False
while not done:
action = policy.select_action(np.array(state),evaluate=True)
state, reward, done, _ = eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
print("---------------------------------------")
print(f"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}")
print("---------------------------------------")
return avg_reward
parser = argparse.ArgumentParser(description='PyTorch Soft Actor-Critic Args')
parser.add_argument('--env', default="HalfCheetah-v2",
help='Mujoco Gym environment (default: HalfCheetah-v2)')
parser.add_argument('--policy_type', default="Gaussian",
help='Policy Type: Gaussian | Deterministic (default: Gaussian)')
parser.add_argument('--policy', default="SAC",
help='Policy name SAC or SAC-FORK')
parser.add_argument('--eval', type=bool, default=True,
help='Evaluates a policy a policy every 10 episode (default: True)')
parser.add_argument('--gamma', type=float, default=0.99, metavar='G',
help='discount factor for reward (default: 0.99)')
parser.add_argument('--tau', type=float, default=0.005, metavar='G',
help='target smoothing coefficient(τ) (default: 0.005)')
parser.add_argument('--lr', type=float, default=0.0003, metavar='G',
help='learning rate (default: 0.0003)')
parser.add_argument('--alpha', type=float, default=0.2, metavar='G',
help='Temperature parameter α determines the relative importance of the entropy\
term against the reward (default: 0.2)')
parser.add_argument('--automatic_entropy_tuning', type=bool, default=False, metavar='G',
help='Automaically adjust α (default: False)')
parser.add_argument('--seed', type=int, default=123456, metavar='N',
help='random seed (default: 123456)')
parser.add_argument('--batch_size', type=int, default=256, metavar='N',
help='batch size (default: 256)')
parser.add_argument('--num_steps', type=int, default=1000001, metavar='N',
help='maximum number of steps (default: 1000000)')
parser.add_argument('--hidden_size', type=int, default=256, metavar='N',
help='hidden size (default: 256)')
parser.add_argument('--sys_hidden_size', type=int, default=512, metavar='N',
help='sys_hidden_size (default: 512)')
parser.add_argument('--sysr_hidden_size', type=int, default=512, metavar='N',
help='sysr hidden size (default: 512)')
parser.add_argument('--updates_per_step', type=int, default=1, metavar='N',
help='model updates per simulator step (default: 1)')
parser.add_argument('--start_steps', type=int, default=10000, metavar='N',
help='Steps sampling random actions (default: 10000)')
parser.add_argument('--target_update_interval', type=int, default=1, metavar='N',
help='Value target update per no. of updates per step (default: 1)')
parser.add_argument('--replay_size', type=int, default=1000000, metavar='N',
help='size of replay buffer (default: 10000000)')
parser.add_argument("--eval_freq", default=5e3, type=int, help="evaluation frequency")
parser.add_argument("--training_mode", default="Online", help="Online Training or Offline Training")
parser.add_argument('--cuda', action="store_true",
help='run on CUDA (default: False)')
parser.add_argument("--sys_weight", default=0.6,type=float, help="weight for FORK")
parser.add_argument("--base_weight", default=0.6,type=float, help="base weight if using dynamic weight")
parser.add_argument("--sys_threshold", default=0.020,type=float, help="threshold for FORK")
parser.add_argument("--sys_dynamic_weight", default="False",help="whether use dynamic weight or not")
parser.add_argument("--max_reward", default=100, type=int,help="max reward for dynamic weight")
parser.add_argument("--save_model", default="False",help="Save training models")
parser.add_argument("--load_model", default="" ,help="Loding model or not")
args = parser.parse_args()
file_name = f"{args.policy}_{args.env}_{args.seed}_{args.training_mode}"
print("---------------------------------------")
print(f"Policy: {args.policy}, Env: {args.env}, Seed: {args.seed}, Weight: {args.sys_weight},Training_mode: {args.training_mode}, Dynamic_weight: {args.sys_dynamic_weight}")
print("---------------------------------------")
if args.sys_dynamic_weight == 'True':
file_name += f"_DW_{args.sys_dynamic_weight}"
if args.save_model == "True" and not os.path.exists("./models"):
os.makedirs("./models")
# Environment
env = gym.make(args.env)
env.seed(args.seed)
env.action_space.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# Agent
if args.policy == 'SAC':
agent = SAC.SAC(env.observation_space.shape[0], env.action_space, args)
elif args.policy == 'SAC_FORK':
agent = SAC_FORK.SAC_FORK(env.observation_space.shape[0], env.action_space, args)
memory = ReplayMemory(args.replay_size, args.seed)
# Training Loop
total_numsteps = 0
updates = 0
evaluations = [eval_policy(agent, args.env)]
agent.update_sys = 0
base_weight = args.base_weight
ep_reward_list = []
if args.policy == "SAC":
variant = dict(
algorithm='SAC',
env=args.env,
)
elif args.policy == "SAC_FORK":
variant = dict(
algorithm=args.policy,
env=args.env,
sys_weight=args.sys_weight,
sys_threshold=args.sys_threshold,
max_reward=args.max_reward,
sys_hidden_size=args.sys_hidden_size,
sysr_hidden_size=args.sysr_hidden_size,
)
if not os.path.exists(f"./data/{args.env}/{args.policy}/seed{args.seed}"):
os.makedirs(f'./data/{args.env}/{args.policy}/seed{args.seed}')
with open(f'./data/{args.env}/{args.policy}/seed{int(args.seed)}/variant.json', 'w') as outfile:
json.dump(variant,outfile)
for i_episode in itertools.count(1):
episode_reward = 0
episode_steps = 0
done = False
state = env.reset()
while not done:
if args.start_steps > total_numsteps:
action = env.action_space.sample() # Sample random action
else:
action = agent.select_action(state) # Sample action from policy
if len(memory) > args.batch_size:
# Number of updates per step in environment
for i in range(args.updates_per_step):
# Update parameters of all the networks
critic_1_loss, critic_2_loss, policy_loss, ent_loss, alpha = agent.update_parameters(memory, args.batch_size, updates)
updates += 1
next_state, reward, done, _ = env.step(action) # Step
episode_steps += 1
total_numsteps += 1
episode_reward += reward
if (total_numsteps + 1) % args.eval_freq == 0:
eval_reward = eval_policy(agent, args.env)
evaluations.append(eval_reward)
if args.save_model == "True":
agent.save(f"./models/{file_name}")
data = np.array(evaluations)
df = pd.DataFrame(data=data,columns=["Average Return"]).reset_index()
df['Timesteps'] = df['index'] * 5000
df['env'] = args.env
df['algorithm_name'] = args.policy
df.to_csv(f'./data/{args.env}/{args.policy}/seed{args.seed}/progress.csv', index = False)
# Ignore the "done" signal if it comes from hitting the time horizon.
# (https://github.com/openai/spinningup/blob/master/spinup/algos/sac/sac.py)
mask = 1 if episode_steps == env._max_episode_steps else float(not done)
memory.push(state, action, reward, next_state, mask) # Append transition to memory
state = next_state
agent.obs_upper_bound = np.amax(state) if agent.obs_upper_bound < np.amax(state) else agent.obs_upper_bound
agent.obs_lower_bound = np.amin(state) if agent.obs_lower_bound > np.amin(state) else agent.obs_lower_bound
ep_reward_list.append(episode_reward)
if args.sys_dynamic_weight == "True":
agent.sys_weight = np.round((1 - np.clip(np.mean(ep_reward_list[-100:])/args.max_reward, 0, 1)),4) * base_weight
if total_numsteps > args.num_steps:
break
if args.policy == "SAC_FORK":
print(f"Total T: {total_numsteps+1} Episode Num: {i_episode+1} Episode T: {episode_steps} Reward: {episode_reward:.3f} Sysmodel_Loss: {agent.sysmodel_loss} Reward_loss: {agent.sysr_loss} Sys updated times: {agent.update_sys} Sys_weight: {agent.sys_weight}")
else:
print("Episode: {}, total numsteps: {}, episode steps: {}, reward: {}".format(i_episode, total_numsteps, episode_steps, round(episode_reward, 2)))
agent.update_sys = 0
env.close()
| 9,260 | 44.62069 | 265 | py |
FORK | FORK-master/SAC-FORK/SAC_FORK.py | import os
import torch
import torch.nn.functional as F
from torch.optim import Adam
from utils import soft_update, hard_update
from model import GaussianPolicy, QNetwork, DeterministicPolicy, Sys_R, SysModel
class SAC_FORK(object):
def __init__(self, num_inputs, action_space, args):
self.gamma = args.gamma
self.tau = args.tau
self.alpha = args.alpha
self.policy_type = args.policy_type
self.target_update_interval = args.target_update_interval
self.automatic_entropy_tuning = args.automatic_entropy_tuning
self.device = torch.device("cuda" if args.cuda else "cpu")
self.critic = QNetwork(num_inputs, action_space.shape[0], args.hidden_size).to(device=self.device)
self.critic_optim = Adam(self.critic.parameters(), lr=args.lr)
self.critic_target = QNetwork(num_inputs, action_space.shape[0], args.hidden_size).to(self.device)
hard_update(self.critic_target, self.critic)
self.sysmodel = SysModel(num_inputs, action_space.shape[0], args.sys_hidden_size,args.sys_hidden_size).to(self.device)
self.sysmodel_optimizer = Adam(self.sysmodel.parameters(), lr=args.lr)
self.obs_upper_bound = 0 #state space upper bound
self.obs_lower_bound = 0 #state space lower bound
self.sysr = Sys_R(num_inputs, action_space.shape[0],args.sysr_hidden_size,args.sysr_hidden_size).to(self.device)
self.sysr_optimizer = torch.optim.Adam(self.sysr.parameters(), lr=args.lr)
self.sys_threshold = args.sys_threshold
self.sys_weight = args.sys_weight
self.sysmodel_loss = 0
self.sysr_loss = 0
if self.policy_type == "Gaussian":
# Target Entropy = −dim(A) (e.g. , -6 for HalfCheetah-v2) as given in the paper
if self.automatic_entropy_tuning is True:
self.target_entropy = -torch.prod(torch.Tensor(action_space.shape).to(self.device)).item()
self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device)
self.alpha_optim = Adam([self.log_alpha], lr=args.lr)
self.policy = GaussianPolicy(num_inputs, action_space.shape[0], args.hidden_size, action_space).to(self.device)
self.policy_optim = Adam(self.policy.parameters(), lr=args.lr)
else:
self.alpha = 0
self.automatic_entropy_tuning = False
self.policy = DeterministicPolicy(num_inputs, action_space.shape[0], args.hidden_size, action_space).to(self.device)
self.policy_optim = Adam(self.policy.parameters(), lr=args.lr)
def select_action(self, state, evaluate=False):
state = torch.FloatTensor(state).to(self.device).unsqueeze(0)
if evaluate is False:
action, _, _ = self.policy.sample(state)
else:
_, _, action = self.policy.sample(state)
return action.detach().cpu().numpy()[0]
def update_parameters(self, memory, batch_size, updates):
# Sample a batch from memory
state_batch, action_batch, reward_batch, next_state_batch, mask_batch = memory.sample(batch_size=batch_size)
state_batch = torch.FloatTensor(state_batch).to(self.device)
next_state_batch = torch.FloatTensor(next_state_batch).to(self.device)
action_batch = torch.FloatTensor(action_batch).to(self.device)
reward_batch = torch.FloatTensor(reward_batch).to(self.device).unsqueeze(1)
mask_batch = torch.FloatTensor(mask_batch).to(self.device).unsqueeze(1)
with torch.no_grad():
next_state_action, next_state_log_pi, _ = self.policy.sample(next_state_batch)
qf1_next_target, qf2_next_target = self.critic_target(next_state_batch, next_state_action)
min_qf_next_target = torch.min(qf1_next_target, qf2_next_target) - self.alpha * next_state_log_pi
next_q_value = reward_batch + mask_batch * self.gamma * (min_qf_next_target)
qf1, qf2 = self.critic(state_batch, action_batch) # Two Q-functions to mitigate positive bias in the policy improvement step
qf1_loss = F.mse_loss(qf1, next_q_value) # JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
qf2_loss = F.mse_loss(qf2, next_q_value) # JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
qf_loss = qf1_loss + qf2_loss
self.critic_optim.zero_grad()
qf_loss.backward()
self.critic_optim.step()
predict_next_state = self.sysmodel(state_batch, action_batch)
predict_next_state = predict_next_state.clamp(self.obs_lower_bound,self.obs_upper_bound)
sysmodel_loss = F.smooth_l1_loss(predict_next_state, next_state_batch.detach())
self.sysmodel_optimizer.zero_grad()
sysmodel_loss.backward()
self.sysmodel_optimizer.step()
self.sysmodel_loss = sysmodel_loss.item()
predict_reward = self.sysr(state_batch,next_state_batch,action_batch)
sysr_loss = F.mse_loss(predict_reward, reward_batch.detach())
self.sysr_optimizer.zero_grad()
sysr_loss.backward()
self.sysr_optimizer.step()
self.sysr_loss = sysr_loss.item()
s_flag = 1 if sysmodel_loss.item() < self.sys_threshold else 0
pi, log_pi, _ = self.policy.sample(state_batch)
qf1_pi, qf2_pi = self.critic(state_batch, pi)
min_qf_pi = torch.min(qf1_pi, qf2_pi)
policy_loss = ((self.alpha * log_pi) - min_qf_pi).mean() # Jπ = 𝔼st∼D,εt∼N[α * logπ(f(εt;st)|st) − Q(st,f(εt;st))]
if s_flag == 1 and self.sys_weight != 0:
p_next_state = self.sysmodel(state_batch,pi)
p_next_state = p_next_state.clamp(self.obs_lower_bound,self.obs_upper_bound)
p_next_r = self.sysr(state_batch,p_next_state.detach(),pi)
pi2, log_pi2, _ = self.policy.sample(p_next_state.detach())
p_next_state2 = self.sysmodel(p_next_state,pi2)
p_next_state2 = p_next_state2.clamp(self.obs_lower_bound,self.obs_upper_bound)
p_next_r2 = self.sysr(p_next_state.detach(),p_next_state2.detach(),pi2)
pi3, log_pi3, _ = self.policy.sample(p_next_state2.detach())
qf3_pi, qf4_pi = self.critic(p_next_state2.detach(), pi3)
min_qf_pi2 = torch.min(qf3_pi, qf4_pi)
#sys_loss = (-p_next_r -self.gamma * p_next_r2 + self.gamma ** 2 * ((self.alpha * log_pi3) - min_qf_pi2)).mean()
sys_loss = (-p_next_r + self.alpha * log_pi - self.gamma * (p_next_r2 - self.alpha * log_pi2) + self.gamma ** 2 * ((self.alpha * log_pi3) - min_qf_pi2)).mean()
policy_loss += self.sys_weight * sys_loss
self.update_sys += 1
self.policy_optim.zero_grad()
policy_loss.backward()
self.policy_optim.step()
if self.automatic_entropy_tuning:
alpha_loss = -(self.log_alpha * (log_pi + self.target_entropy).detach()).mean()
self.alpha_optim.zero_grad()
alpha_loss.backward()
self.alpha_optim.step()
self.alpha = self.log_alpha.exp()
alpha_tlogs = self.alpha.clone() # For TensorboardX logs
else:
alpha_loss = torch.tensor(0.).to(self.device)
alpha_tlogs = torch.tensor(self.alpha) # For TensorboardX logs
if updates % self.target_update_interval == 0:
soft_update(self.critic_target, self.critic, self.tau)
return qf1_loss.item(), qf2_loss.item(), policy_loss.item(), alpha_loss.item(), alpha_tlogs.item()
# Save model parameters
def save(self, filename):
torch.save(self.critic.state_dict(), filename + "_critic")
torch.save(self.critic_optim.state_dict(), filename + "_critic_optimizer")
torch.save(self.policy.state_dict(), filename + "_actor")
torch.save(self.policy_optim.state_dict(), filename + "_actor_optimizer")
torch.save(self.sysmodel.state_dict(), filename + "_sysmodel")
torch.save(self.sysmodel_optimizer.state_dict(), filename + "_sysmodel_optimizer")
torch.save(self.sysr.state_dict(), filename + "_reward_model")
torch.save(self.sysr_optimizer.state_dict(), filename + "_reward_model_optimizer")
def load(self, filename):
self.critic.load_state_dict(torch.load(filename + "_critic.pth"))
self.critic_optim.load_state_dict(torch.load(filename + "_critic_optimizer"))
self.critic_target = copy.deepcopy(self.critic)
self.policy.load_state_dict(torch.load(filename + "_actor.pth"))
self.policy_optim.load_state_dict(torch.load(filename + "_actor_optimizer"))
self.sysmodel.load_state_dict(torch.load(filename + "_sysmodel.pth"))
relf.sysmodel_optimizer.load_state_dict(torch.load(filename + "_sysmodel_optimizer"))
self.sysr.load_state_dict(torch.load(filename + "_reward_model.pth"))
relf.sysr_optimizer.load_state_dict(torch.load(filename + "_reward_model_optimizer"))
| 8,966 | 47.733696 | 172 | py |
Higgs-ML | Higgs-ML-master/cnn.py | from __future__ import print_function
import os, sys
import math
import pandas as pd
import numpy as np
import keras
from keras.models import load_model
from keras import backend as K
from keras.callbacks import ModelCheckpoint, EarlyStopping
from sklearn.model_selection import train_test_split
from sklearn import metrics
from func.figure import LossHistory, ROC_plot, deltaKg_plot
from func.models import our_model
from func.file import removedir
try:
import tkinter
except:
import Tkinter as tkinter
########################################################## load data ##########################################################
dirs=['npydata','model','plot']
for d in dirs:
if os.path.exists(d):
removedir(d)
os.makedirs(d)
img_rows,img_cols =34,66
data1= pd.read_table('data/train.txt', header=None, sep=',')
data2= pd.read_table('data/test.txt', header=None, sep=',')
Train_number = len(data1)
test_number = len(data2)
total_number = len(data1)+len(data2)
print ('total_number:', total_number)
print ('test_number:', test_number)
print ('Train_number:', Train_number)
A1 = data1.values
B1 = data2.values
np.random.shuffle(A1)
np.random.shuffle(B1)
A2 = A1[:,2:img_rows*img_cols+2]
B2 = B1[:,2:img_rows*img_cols+2]
#A2_sum=np.sum(A2, axis = 1)
#A2 = A2.T
#A2 /= (A2_sum+10e-8)
#A2 = A2.T
#A2 -= np.mean(A2, axis = 0)
#A2 /= (np.std(A2, axis = 0)+10e-5)
#B2_sum=np.sum(B2, axis = 1)
#B2 = B2.T
#B2 /= (B2_sum+10e-8)
#B2 = B2.T
#B2 -= np.mean(B2, axis = 0)
#B2 /= (np.std(B2, axis = 0)+10e-5)
Train_image = A2.reshape(Train_number,img_rows,img_cols,1)
Train_label = A1[:,1:2]
Train_weight = A1[:,0:1]
test_image = B2.reshape(test_number,img_rows,img_cols,1)
test_label = B1[:,1:2]
test_weight = B1[:,0:1]
#np.save('npydata/Train_image',Train_image)
#np.save('npydata/Train_label',Train_label)
#np.save('npydata/Train_weight',Train_weight)
#np.save('npydata/test_image',test_image)
#np.save('npydata/test_label',test_label)
#np.save('npydata/test_weight',test_weight)
X_train, X_valid, y_train, y_valid = train_test_split(Train_image,Train_label,test_size=0.1,random_state=22)
print ('train shape:', X_train.shape)
print ('valid shape:', X_valid.shape)
x_train = X_train.astype('float32')
x_valid = X_valid.astype('float32')
############################################## train ######################################################################################################
model=our_model(img_rows,img_cols)
history = LossHistory()
early_stopping = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')
saveBestModel = ModelCheckpoint(filepath='model/best.h5', monitor='val_loss', verbose=0, save_best_only=True, mode='min')
model.fit(x_train, y_train, batch_size=128, epochs=100, verbose=1, validation_data=(x_valid, y_valid),callbacks=[early_stopping, saveBestModel, history])
model.save('model/final.h5')
############################################# evaluate ####################################################################################################
TestPrediction = model.predict_proba(test_image)
fpr, tpr, thresh = metrics.roc_curve(test_label, TestPrediction, pos_label=None, sample_weight=test_weight, drop_intermediate=True)
auc = metrics.auc(fpr, tpr, reorder=True)
print ('AUC :',auc)
Ng, NB=4090, 21141
delta_kg=[]
for i in range(len(tpr)):
if tpr[i]==0:
delta_kg.append(1000)
else:
delta_kg.append(math.sqrt(Ng*tpr[i]+NB*fpr[i])/(2.0*Ng*tpr[i]))
best=min(delta_kg)
min_index=delta_kg.index(best)
print ('best point: (tpr, fpr) = (',tpr[min_index],',',fpr[min_index],')')
print ('minimal delta_kg =',best)
history.loss_plot('epoch')
ROC_plot(tpr, fpr)
deltaKg_plot(tpr, delta_kg)
| 3,700 | 27.689922 | 155 | py |
Higgs-ML | Higgs-ML-master/func/figure.py | from __future__ import print_function
import keras
import numpy as np
import matplotlib.pyplot as plt
import math
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self,log={}):
self.losses = {'batch':[], 'epoch':[]}
self.accuracy = {'batch':[], 'epoch':[]}
self.val_loss = {'batch':[], 'epoch':[]}
self.val_acc = {'batch':[], 'epoch':[]}
def on_batch_end(self, batch, logs={}):
self.losses['batch'].append(logs.get('loss'))
self.accuracy['batch'].append(logs.get('acc'))
self.val_loss['batch'].append(logs.get('val_loss'))
self.val_acc['batch'].append(logs.get('val_acc'))
def on_epoch_end(self, batch, logs={}):
self.losses['epoch'].append(logs.get('loss'))
self.accuracy['epoch'].append(logs.get('acc'))
self.val_loss['epoch'].append(logs.get('val_loss'))
self.val_acc['epoch'].append(logs.get('val_acc'))
#val_predict=(np.asarray(self.model.predict(self.validation_data[0]))).round()
#val_targ=self.validation_data[1]
#out=np.hstack((val_targ, val_predict))
#TP,TN,FP,FN=0,0,0,0
#for i in range(len(out)):
#print (round(float(out[order-1:order,0]),8), round(float(out[order-1:order,1]),8))
# if out[i,0]==1 and out[i,1]==1:
# TP+=1
# if out[i,0]==0 and out[i,1]==0:
# TN+=1
# if out[i,0]==0 and out[i,1]==1:
# FP+=1
# if out[i,0]==1 and out[i,1]==0:
# FN+=1
#if TP!=0 and TN!=0 and FP!=0 and FN!=0:
# precision=TP/float(TP+FP);
# recall=TP/float(TP+FN);
# f1=2*precision*recall/(precision+recall);
# print("TP:",TP," TN:",TN," FP:",FP," FN:",FN," tpr:",TP/float(TP+FN)," fpr:",FP/float(FP+TN)," precision:",precision," recall:",recall," f1:",f1)
def loss_plot(self, loss_type):
iters = range(len(self.losses[loss_type]))
plt.figure()
plt.plot(iters, self.accuracy[loss_type], 'r', label='train acc', lw=2.0)
plt.plot(iters, self.losses[loss_type], 'g', label='train loss', lw=2.0)
if loss_type == 'epoch':
plt.plot(iters, self.val_acc[loss_type], 'b', label='val acc', lw=2.0)
plt.plot(iters, self.val_loss[loss_type], 'k', label='val loss', lw=2.0)
plt.grid(True)
plt.xlabel(loss_type, fontsize=16)
plt.ylabel('acc-loss', fontsize=16)
plt.legend(loc="upper right")
plt.savefig('plot/loss_acc.png')
#plt.show()
def ROC_plot(tpr, fpr):
plt.figure(figsize=(8.5,3.7))
plt.subplot(1,2,1)
plt.plot(tpr,1-fpr)
plt.xlabel('Signal Efficiency',fontsize=16)
plt.ylabel('Background Rejection',fontsize=16)
plt.xlim(0,1)
plt.ylim(0,1)
plt.title('ROC',fontsize=16)
plt.savefig('plot/ROC.png')
#plt.show()
return
def deltaKg_plot(tpr, delta_kg):
plt.figure(figsize=(8.5,3.7))
plt.subplot(1,2,1)
plt.plot(tpr,delta_kg,lw=2.0)
plt.xlabel('True Positive Rate',fontsize=16)
plt.ylabel('delta_kg',fontsize=16)
plt.xlim(0.4,0.9)
plt.ylim(0.01,0.02)
plt.title('Uncertainty',fontsize=16)
plt.savefig('plot/delta_Kg.png')
#plt.show()
return
| 3,107 | 32.782609 | 158 | py |
Higgs-ML | Higgs-ML-master/func/models.py | import keras
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Activation, Flatten
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.optimizers import SGD, Adam, Nadam
def our_model(img_rows,img_cols):
model=Sequential()
model.add(Conv2D(64,(3,3),padding='valid',kernel_initializer="uniform",input_shape=(img_rows,img_cols,1)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=2))
model.add(Dropout(0.25))
model.add(Conv2D(64,(3,3),padding='same',kernel_initializer="uniform"))
model.add(Activation('relu'))
model.add(Conv2D(64,(3,3),padding='same',kernel_initializer="uniform"))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=2))
model.add(Dropout(0.5))
model.add(Conv2D(128,(3,3),padding='same',kernel_initializer="uniform"))
model.add(Activation('relu'))
model.add(Conv2D(128,(3,3),padding='same',kernel_initializer="uniform"))
model.add(Activation('relu'))
model.add(Conv2D(128,(3,3),padding='same',kernel_initializer="uniform"))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=2))
model.add(Dropout(0.5))
model.add(Conv2D(128,(3,3),padding='same',kernel_initializer="uniform"))
model.add(Activation('relu'))
model.add(Conv2D(128,(3,3),padding='same',kernel_initializer="uniform"))
model.add(Activation('relu'))
model.add(Conv2D(128,(3,3),padding='same',kernel_initializer="uniform"))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=2))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
adam = Adam(lr=0.0005, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(loss='binary_crossentropy',optimizer = adam, metrics=['accuracy'])
return model
| 1,906 | 36.392157 | 107 | py |
hyperas | hyperas-master/setup.py | from setuptools import setup
from setuptools import find_packages
setup(name='hyperas',
version='0.4.1',
description='Simple wrapper for hyperopt to do convenient hyperparameter optimization for Keras models',
url='http://github.com/maxpumperla/hyperas',
download_url='https://github.com/maxpumperla/hyperas/tarball/0.4.1',
author='Max Pumperla',
author_email='max.pumperla@googlemail.com',
install_requires=['keras', 'hyperopt', 'entrypoints', 'jupyter', 'nbformat', 'nbconvert'],
license='MIT',
packages=find_packages(),
zip_safe=False)
| 600 | 39.066667 | 110 | py |
hyperas | hyperas-master/hyperas/optim.py | import inspect
import os
import re
import sys
import nbformat
import numpy as np
from hyperopt import fmin
from nbconvert import PythonExporter
from .ensemble import VotingModel
from .utils import (
remove_imports, remove_all_comments, extract_imports, temp_string,
write_temp_files, determine_indent, with_line_numbers, unpack_hyperopt_vals,
eval_hyperopt_space, find_signature_end)
sys.path.append(".")
def minimize(model,
data,
algo,
max_evals,
trials,
functions=None,
rseed=1337,
notebook_name=None,
verbose=True,
eval_space=False,
return_space=False,
keep_temp=False,
data_args=None):
"""
Minimize a keras model for given data and implicit hyperparameters.
Parameters
----------
model: A function defining a keras model with hyperas templates, which returns a
valid hyperopt results dictionary, e.g.
return {'loss': -acc, 'status': STATUS_OK}
data: A parameter-less function that defines and return all data needed in the above
model definition.
algo: A hyperopt algorithm, like tpe.suggest or rand.suggest
max_evals: Maximum number of optimization runs
trials: A hyperopt trials object, used to store intermediate results for all
optimization runs
rseed: Integer random seed for experiments
notebook_name: If running from an ipython notebook, provide filename (not path)
verbose: Print verbose output
eval_space: Evaluate the best run in the search space such that 'choice's contain actually meaningful values instead of mere indices
return_space: Return the hyperopt search space object (e.g. for further processing) as last return value
keep_temp: Keep temp_model.py file on the filesystem
data_args: Arguments to be passed to data function
Returns
-------
If `return_space` is False: A pair consisting of the results dictionary of the best run and the corresponding
keras model.
If `return_space` is True: The pair of best result and corresponding keras model, and the hyperopt search space
"""
best_run, space = base_minimizer(model=model,
data=data,
functions=functions,
algo=algo,
max_evals=max_evals,
trials=trials,
rseed=rseed,
full_model_string=None,
notebook_name=notebook_name,
verbose=verbose,
keep_temp=keep_temp,
data_args=data_args)
best_model = None
for trial in trials:
vals = trial.get('misc').get('vals')
# unpack the values from lists without overwriting the mutable dict within 'trial'
unpacked_vals = unpack_hyperopt_vals(vals)
# identify the best_run (comes with unpacked values from the hyperopt function `base.Trials.argmin`)
if unpacked_vals == best_run and 'model' in trial.get('result').keys():
best_model = trial.get('result').get('model')
if eval_space is True:
# evaluate the search space
best_run = eval_hyperopt_space(space, best_run)
if return_space is True:
# return the space as well
return best_run, best_model, space
else:
# the default case for backwards compatibility with expanded return arguments
return best_run, best_model
def base_minimizer(model, data, functions, algo, max_evals, trials,
rseed=1337, full_model_string=None, notebook_name=None,
verbose=True, stack=3, keep_temp=False, data_args=None):
if full_model_string is not None:
model_str = full_model_string
else:
model_str = get_hyperopt_model_string(model, data, functions, notebook_name, verbose, stack, data_args=data_args)
temp_file = './temp_model.py'
write_temp_files(model_str, temp_file)
if 'temp_model' in sys.modules:
del sys.modules["temp_model"]
try:
from temp_model import keras_fmin_fnct, get_space
except:
print("Unexpected error: {}".format(sys.exc_info()[0]))
raise
try:
if not keep_temp:
os.remove(temp_file)
os.remove(temp_file + 'c')
except OSError:
pass
try:
# for backward compatibility.
return (
fmin(keras_fmin_fnct,
space=get_space(),
algo=algo,
max_evals=max_evals,
trials=trials,
rseed=rseed,
return_argmin=True),
get_space()
)
except TypeError:
pass
return (
fmin(keras_fmin_fnct,
space=get_space(),
algo=algo,
max_evals=max_evals,
trials=trials,
#rstate=np.random.RandomState(rseed),
rstate=np.random.default_rng(rseed),
return_argmin=True),
get_space()
)
def best_ensemble(nb_ensemble_models, model, data, algo, max_evals,
trials, voting='hard', weights=None, nb_classes=None, functions=None):
model_list = best_models(nb_models=nb_ensemble_models,
model=model,
data=data,
algo=algo,
max_evals=max_evals,
trials=trials,
functions=functions)
return VotingModel(model_list, voting, weights, nb_classes)
def best_models(nb_models, model, data, algo, max_evals, trials, functions=None, keep_temp=False):
base_minimizer(model=model,
data=data,
functions=functions,
algo=algo,
max_evals=max_evals,
trials=trials,
stack=4,
keep_temp=keep_temp)
if len(trials) < nb_models:
nb_models = len(trials)
scores = [trial.get('result').get('loss') for trial in trials]
cut_off = sorted(scores, reverse=True)[nb_models - 1]
model_list = [trial.get('result').get('model') for trial in trials if trial.get('result').get('loss') >= cut_off]
return model_list
def get_hyperopt_model_string(model, data, functions, notebook_name, verbose, stack, data_args):
model_string = inspect.getsource(model)
model_string = remove_imports(model_string)
if notebook_name:
notebook_path = os.getcwd() + "/{}.ipynb".format(notebook_name)
with open(notebook_path, 'r') as f:
notebook = nbformat.reads(f.read(), nbformat.NO_CONVERT)
exporter = PythonExporter()
source, _ = exporter.from_notebook_node(notebook)
else:
calling_script_file = os.path.abspath(inspect.stack()[stack][1])
with open(calling_script_file, 'r') as f:
source = f.read()
cleaned_source = remove_all_comments(source)
imports = extract_imports(cleaned_source, verbose)
parts = hyperparameter_names(model_string)
aug_parts = augmented_names(parts)
hyperopt_params = get_hyperparameters(model_string)
space = get_hyperopt_space(parts, hyperopt_params, verbose)
functions_string = retrieve_function_string(functions, verbose)
data_string = retrieve_data_string(data, verbose, data_args)
model = hyperopt_keras_model(model_string, parts, aug_parts, verbose)
temp_str = temp_string(imports, model, data_string, functions_string, space)
return temp_str
def get_hyperopt_space(parts, hyperopt_params, verbose=True):
space = "def get_space():\n return {\n"
for name, param in zip(parts, hyperopt_params):
param = re.sub(r"\(", "('" + name + "', ", param, 1)
space += " '" + name + "': hp." + param + ",\n"
space = space[:-1]
space += "\n }\n"
if verbose:
print('>>> Hyperas search space:\n')
print(space)
return space
def retrieve_data_string(data, verbose=True, data_args=None):
data_string = inspect.getsource(data)
first_line = data_string.split("\n")[0]
indent_length = len(determine_indent(data_string))
data_string = data_string.replace(first_line, "")
r = re.compile(r'^\s*return.*')
last_line = [s for s in reversed(data_string.split("\n")) if r.match(s)][0]
data_string = data_string.replace(last_line, "")
required_arguments = inspect.getfullargspec(data).args
if required_arguments:
if data_args is None:
raise ValueError(
"Data function takes arguments {} but no values are passed via data_args".format(required_arguments))
data_string = "\n".join(" {} = {}".format(x, repr(y)) for x, y in zip(required_arguments, data_args)) + data_string
split_data = data_string.split("\n")
for i, line in enumerate(split_data):
split_data[i] = line[indent_length:] + "\n"
data_string = ''.join(split_data)
if verbose:
print(">>> Data")
print(with_line_numbers(data_string))
return data_string
def retrieve_function_string(functions, verbose=True):
function_strings = ''
if functions is None:
return function_strings
for function in functions:
function_string = inspect.getsource(function)
function_strings = function_strings + function_string + '\n'
if verbose:
print(">>> Functions")
print(with_line_numbers(function_strings))
return function_strings
def hyperparameter_names(model_string):
parts = []
params = re.findall(r"(\{\{[^}]+}\})", model_string)
for param in params:
name = re.findall(r"(\w+(?=\s*[\=\(]\s*" + re.escape(param) + r"))", model_string)
if len(name) > 0:
parts.append(name[0])
else:
parts.append(parts[-1])
part_dict = {}
for i, part in enumerate(parts):
if part in part_dict.keys():
part_dict[part] += 1
parts[i] = part + "_" + str(part_dict[part])
else:
part_dict[part] = 0
return parts
def get_hyperparameters(model_string):
hyperopt_params = re.findall(r"(\{\{[^}]+}\})", model_string)
for i, param in enumerate(hyperopt_params):
hyperopt_params[i] = re.sub(r"[\{\}]", '', param)
return hyperopt_params
def augmented_names(parts):
aug_parts = []
for i, part in enumerate(parts):
aug_parts.append("space['" + part + "']")
return aug_parts
def hyperopt_keras_model(model_string, parts, aug_parts, verbose=True):
colon_index = find_signature_end(model_string)
func_sign_line_end = model_string.count("\n", 0, colon_index) + 1
func_sign_lines = "\n".join(model_string.split("\n")[:func_sign_line_end])
model_string = model_string.replace(func_sign_lines, "def keras_fmin_fnct(space):\n")
result = re.sub(r"(\{\{[^}]+}\})", lambda match: aug_parts.pop(0), model_string, count=len(parts))
if verbose:
print('>>> Resulting replaced keras model:\n')
print(with_line_numbers(result))
return result
| 11,352 | 36.468647 | 136 | py |
hyperas | hyperas-master/hyperas/ensemble.py | import numpy as np
from keras.models import model_from_yaml
class VotingModel(object):
def __init__(self, model_list, voting='hard',
weights=None, nb_classes=None):
"""(Weighted) majority vote model for a given list of Keras models.
Parameters
----------
model_list: An iterable of Keras models.
voting: Choose 'hard' for straight-up majority vote of highest model probilities or 'soft'
for a weighted majority vote. In the latter, a weight vector has to be specified.
weights: Weight vector (numpy array) used for soft majority vote.
nb_classes: Number of classes being predicted.
Returns
-------
A voting model that has a predict method with the same signature of a single keras model.
"""
self.model_list = model_list
self.voting = voting
self.weights = weights
self.nb_classes = nb_classes
if voting not in ['hard', 'soft']:
raise 'Voting has to be either hard or soft'
if weights is not None:
if len(weights) != len(model_list):
raise ('Number of models {0} and length of weight vector {1} has to match.'
.format(len(weights), len(model_list)))
def predict(self, X, batch_size=128, verbose=0):
predictions = list(map(lambda model: model.predict(X, batch_size, verbose), self.model_list))
nb_preds = len(X)
if self.voting == 'hard':
for i, pred in enumerate(predictions):
pred = list(map(
lambda probas: np.argmax(probas, axis=-1), pred
))
predictions[i] = np.asarray(pred).reshape(nb_preds, 1)
argmax_list = list(np.concatenate(predictions, axis=1))
votes = np.asarray(list(
map(lambda arr: max(set(arr)), argmax_list)
))
if self.voting == 'soft':
for i, pred in enumerate(predictions):
pred = list(map(lambda probas: probas * self.weights[i], pred))
predictions[i] = np.asarray(pred).reshape(nb_preds, self.nb_classes, 1)
weighted_preds = np.concatenate(predictions, axis=2)
weighted_avg = np.mean(weighted_preds, axis=2)
votes = np.argmax(weighted_avg, axis=1)
return votes
def voting_model_from_yaml(yaml_list, voting='hard', weights=None):
model_list = map(lambda yml: model_from_yaml(yml), yaml_list)
return VotingModel(model_list, voting, weights)
| 2,567 | 39.125 | 101 | py |
hyperas | hyperas-master/examples/mnist_readme.py | from __future__ import print_function
from hyperopt import Trials, STATUS_OK, tpe
from keras.datasets import mnist
from keras.layers.core import Dense, Dropout, Activation
from keras.models import Sequential
from keras.utils import np_utils
from hyperas import optim
from hyperas.distributions import choice, uniform
def data():
"""
Data providing function:
This function is separated from model() so that hyperopt
won't reload data for each evaluation run.
"""
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
nb_classes = 10
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
return x_train, y_train, x_test, y_test
def model(x_train, y_train, x_test, y_test):
"""
Model providing function:
Create Keras model with double curly brackets dropped-in as needed.
Return value has to be a valid python dictionary with two customary keys:
- loss: Specify a numeric evaluation metric to be minimized
- status: Just use STATUS_OK and see hyperopt documentation if not feasible
The last one is optional, though recommended, namely:
- model: specify the model just created so that we can later use it again.
"""
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense({{choice([256, 512, 1024])}}))
model.add(Activation({{choice(['relu', 'sigmoid'])}}))
model.add(Dropout({{uniform(0, 1)}}))
# If we choose 'four', add an additional fourth layer
if {{choice(['three', 'four'])}} == 'four':
model.add(Dense(100))
# We can also choose between complete sets of layers
model.add({{choice([Dropout(0.5), Activation('linear')])}})
model.add(Activation('relu'))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', metrics=['accuracy'],
optimizer={{choice(['rmsprop', 'adam', 'sgd'])}})
model.fit(x_train, y_train,
batch_size={{choice([64, 128])}},
epochs=1,
verbose=2,
validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test, verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=5,
trials=Trials())
X_train, Y_train, X_test, Y_test = data()
print("Evalutation of best performing model:")
print(best_model.evaluate(X_test, Y_test))
print("Best performing model chosen hyper-parameters:")
print(best_run)
| 3,140 | 34.693182 | 83 | py |
hyperas | hyperas-master/examples/lstm.py | from __future__ import print_function
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
from keras.preprocessing import sequence
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM
from keras.callbacks import EarlyStopping, ModelCheckpoint
def data():
maxlen = 100
max_features = 20000
print('Loading data...')
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print("Pad sequences (samples x time)")
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
return X_train, X_test, y_train, y_test, max_features, maxlen
def model(X_train, X_test, y_train, y_test, max_features, maxlen):
model = Sequential()
model.add(Embedding(max_features, 128, input_length=maxlen))
model.add(LSTM(128))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
early_stopping = EarlyStopping(monitor='val_loss', patience=4)
checkpointer = ModelCheckpoint(filepath='keras_weights.hdf5',
verbose=1,
save_best_only=True)
model.fit(X_train, y_train,
batch_size={{choice([32, 64, 128])}},
nb_epoch=1,
validation_split=0.08,
callbacks=[early_stopping, checkpointer])
score, acc = model.evaluate(X_test, y_test, verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=10,
trials=Trials())
print(best_run)
| 2,374 | 34.447761 | 80 | py |
hyperas | hyperas-master/examples/mnist_distributed.py | from hyperas import optim
from hyperas.distributions import quniform, uniform
from hyperopt import STATUS_OK, tpe, mongoexp
import keras
from keras.layers import Dense, Dropout
from keras.models import Sequential
from keras.optimizers import RMSprop
from keras.datasets import mnist
import tempfile
def data():
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
num_classes = 10
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
return x_train, y_train, x_test, y_test
def create_model(x_train, y_train, x_test, y_test):
"""
Create your model...
"""
layer_1_size = {{quniform(12, 256, 4)}}
l1_dropout = {{uniform(0.001, 0.7)}}
params = {
'l1_size': layer_1_size,
'l1_dropout': l1_dropout
}
num_classes = 10
model = Sequential()
model.add(Dense(int(layer_1_size), activation='relu'))
model.add(Dropout(l1_dropout))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=128, epochs=10, validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test, verbose=0)
out = {
'loss': -acc,
'score': score,
'status': STATUS_OK,
'model_params': params,
}
# optionally store a dump of your model here so you can get it from the database later
temp_name = tempfile.gettempdir()+'/'+next(tempfile._get_candidate_names()) + '.h5'
model.save(temp_name)
with open(temp_name, 'rb') as infile:
model_bytes = infile.read()
out['model_serial'] = model_bytes
return out
if __name__ == "__main__":
trials = mongoexp.MongoTrials('mongo://username:pass@mongodb.host:27017/jobs/jobs', exp_key='mnist_test')
best_run, best_model = optim.minimize(model=create_model,
data=data,
algo=tpe.suggest,
max_evals=10,
trials=trials,
keep_temp=True) # this last bit is important
print("Best performing model chosen hyper-parameters:")
print(best_run)
| 2,561 | 35.084507 | 109 | py |
hyperas | hyperas-master/examples/simple.py | from __future__ import print_function
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop
from keras.datasets import mnist
from keras.utils import np_utils
def data():
'''
Data providing function:
This function is separated from model() so that hyperopt
won't reload data for each evaluation run.
'''
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
nb_classes = 10
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, Y_train, X_test, Y_test
def model(X_train, Y_train, X_test, Y_test):
'''
Model providing function:
Create Keras model with double curly brackets dropped-in as needed.
Return value has to be a valid python dictionary with two customary keys:
- loss: Specify a numeric evaluation metric to be minimized
- status: Just use STATUS_OK and see hyperopt documentation if not feasible
The last one is optional, though recommended, namely:
- model: specify the model just created so that we can later use it again.
'''
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense({{choice([256, 512, 1024])}}))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense(10))
model.add(Activation('softmax'))
rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])
model.fit(X_train, Y_train,
batch_size={{choice([64, 128])}},
nb_epoch=1,
verbose=2,
validation_data=(X_test, Y_test))
score, acc = model.evaluate(X_test, Y_test, verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
X_train, Y_train, X_test, Y_test = data()
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=5,
trials=Trials())
print("Evalutation of best performing model:")
print(best_model.evaluate(X_test, Y_test))
| 2,737 | 33.225 | 87 | py |
hyperas | hyperas-master/examples/complex.py | from __future__ import print_function
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.datasets import mnist
from keras.utils import np_utils
def data():
'''
Data providing function:
This function is separated from model() so that hyperopt
won't reload data for each evaluation run.
'''
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
nb_classes = 10
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, Y_train, X_test, Y_test
def model(X_train, Y_train, X_test, Y_test):
'''
Model providing function:
Create Keras model with double curly brackets dropped-in as needed.
Return value has to be a valid python dictionary with two customary keys:
- loss: Specify a numeric evaluation metric to be minimized
- status: Just use STATUS_OK and see hyperopt documentation if not feasible
The last one is optional, though recommended, namely:
- model: specify the model just created so that we can later use it again.
'''
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense({{choice([256, 512, 1024])}}))
model.add(Activation({{choice(['relu', 'sigmoid'])}}))
model.add(Dropout({{uniform(0, 1)}}))
# If we choose 'four', add an additional fourth layer
if {{choice(['three', 'four'])}} == 'four':
model.add(Dense(100))
model.add({{choice([Dropout(0.5), Activation('linear')])}})
model.add(Activation('relu'))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer={{choice(['rmsprop', 'adam', 'sgd'])}},
metrics=['accuracy'])
model.fit(X_train, Y_train,
batch_size={{choice([64, 128])}},
nb_epoch=1,
verbose=2,
validation_data=(X_test, Y_test))
score, acc = model.evaluate(X_test, Y_test, verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
trials = Trials()
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=5,
trials=trials)
for trial in trials:
print(trial)
X_train, Y_train, X_test, Y_test = data()
print("Evalutation of best performing model:")
print(best_model.evaluate(X_test, Y_test))
| 3,080 | 35.678571 | 83 | py |
hyperas | hyperas-master/examples/mnist_ensemble.py | from __future__ import print_function
from hyperopt import Trials, STATUS_OK, rand
from hyperas import optim
from hyperas.distributions import choice, uniform
from sklearn.metrics import accuracy_score
from keras.utils import np_utils
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop
def data():
nb_classes = 10
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, X_test, Y_train, Y_test
def model(X_train, X_test, Y_train, Y_test):
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense({{choice([400, 512, 600])}}))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense(10))
model.add(Activation('softmax'))
rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])
nb_epoch = 10
batch_size = 128
model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=2,
validation_data=(X_test, Y_test))
score, acc = model.evaluate(X_test, Y_test, verbose=0)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
X_train, X_test, Y_train, Y_test = data()
'''
Generate ensemble model from optimization run:
First, run hyperas optimization on specified setup, i.e. 10 trials with TPE,
then return the best 5 models and create a majority voting model from it.
'''
ensemble_model = optim.best_ensemble(nb_ensemble_models=5,
model=model, data=data,
algo=rand.suggest, max_evals=10,
trials=Trials(),
voting='hard')
preds = ensemble_model.predict(X_test)
y_test = np_utils.categorical_probas_to_classes(Y_test)
print(accuracy_score(preds, y_test))
| 2,430 | 33.239437 | 87 | py |
hyperas | hyperas-master/examples/use_intermediate_functions.py | from __future__ import print_function
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop
from keras.datasets import mnist
from keras.utils import np_utils
import matplotlib.pyplot as plt
def visualization_mnist(x_data,n=10):
plt.figure(figsize=(20, 4))
for i in range(n):
# display digit
ax = plt.subplot(1, n, i+1)
plt.imshow(x_data[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
def data():
'''
Data providing function:
This function is separated from model() so that hyperopt
won't reload data for each evaluation run.
'''
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
visualization_mnist(X_test)
X_train /= 255
X_test /= 255
nb_classes = 10
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, Y_train, X_test, Y_test
def model(X_train, Y_train, X_test, Y_test):
'''
Model providing function:
Create Keras model with double curly brackets dropped-in as needed.
Return value has to be a valid python dictionary with two customary keys:
- loss: Specify a numeric evaluation metric to be minimized
- status: Just use STATUS_OK and see hyperopt documentation if not feasible
The last one is optional, though recommended, namely:
- model: specify the model just created so that we can later use it again.
'''
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense({{choice([256, 512, 1024])}}))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense(10))
model.add(Activation('softmax'))
rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])
model.fit(X_train, Y_train,
batch_size={{choice([64, 128])}},
nb_epoch=1,
verbose=2,
validation_data=(X_test, Y_test))
score, acc = model.evaluate(X_test, Y_test, verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
X_train, Y_train, X_test, Y_test = data()
functions=[visualization_mnist]
best_run, best_model = optim.minimize(model=model,
data=data,
functions=functions,
algo=tpe.suggest,
max_evals=5,
trials=Trials())
print("Evalutation of best performing model:")
print(best_model.evaluate(X_test, Y_test))
| 3,226 | 32.968421 | 87 | py |
hyperas | hyperas-master/examples/hyperas_in_intermediate_fns.py | import numpy
import random
from keras.datasets import mnist
from keras.models import Model
from keras.layers import Input, Flatten, Dense, Dropout, Lambda
from keras.optimizers import RMSprop
from keras import backend as K
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def eucl_dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0], 1)
def create_pairs(x, digit_indices):
num_classes = 10
pairs = []
labels = []
n = min([len(digit_indices[d]) for d in range(num_classes)]) - 1
for d in range(num_classes):
for i in range(n):
z1, z2 = digit_indices[d][i], digit_indices[d][i + 1]
pairs += [[x[z1], x[z2]]]
inc = random.randrange(1, num_classes)
dn = (d + inc) % num_classes
z1, z2 = digit_indices[d][i], digit_indices[dn][i]
pairs += [[x[z1], x[z2]]]
labels += [1, 0]
return numpy.array(pairs), numpy.array(labels)
def create_base_network(input_shape,dense_filter1,dense_filter2,dense_filter3,dropout1,dropout2):
input = Input(shape=input_shape)
x = Flatten()(input)
x = Dense(dense_filter1, activation='relu')(x)
x = Dropout(dropout1)(x)
x = Dense(dense_filter2, activation='relu')(x)
x = Dropout(dropout2)(x)
x = Dense(dense_filter3, activation='relu')(x)
return Model(input, x)
def compute_accuracy(y_true, y_pred):
pred = y_pred.ravel() < 0.5
return numpy.mean(pred == y_true)
def accuracy(y_true, y_pred):
return K.mean(K.equal(y_true, K.cast(y_pred < 0.5, y_true.dtype)))
def process_data():
num_classes = 10
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
input_shape = x_train.shape[1:]
# create training+test positive and negative pairs
digit_indices = [numpy.where(y_train == i)[0] for i in range(num_classes)]
tr_pairs, tr_y = create_pairs(x_train, digit_indices)
digit_indices = [numpy.where(y_test == i)[0] for i in range(num_classes)]
te_pairs, te_y = create_pairs(x_test, digit_indices)
return tr_pairs, tr_y, te_pairs, te_y,input_shape
def data():
tr_pairs, tr_y, te_pairs, te_y,input_shape = process_data()
return tr_pairs, tr_y, te_pairs, te_y,input_shape
def contrastive_loss(y_true, y_pred):
margin = 1
return K.mean(y_true * K.square(y_pred) +
(1 - y_true) * K.square(K.maximum(margin - y_pred, 0)))
def create_model(tr_pairs, tr_y, te_pairs, te_y,input_shape):
epochs = 20
dropout1 = {{uniform(0,1)}}
dropout2 = {{uniform(0,1)}}
dense_filter1 = {{choice([64,128,256])}}
dense_filter2 = {{choice([64,128,256])}}
dense_filter3 = {{choice([64,128,256])}}
# network definition
base_network = create_base_network(input_shape,dense_filter1,dense_filter2,dense_filter3,dropout1,dropout2)
input_a = Input(shape=input_shape)
input_b = Input(shape=input_shape)
processed_a = base_network(input_a)
processed_b = base_network(input_b)
distance = Lambda(euclidean_distance,
output_shape=eucl_dist_output_shape)([processed_a, processed_b])
model = Model([input_a, input_b], distance)
rms = RMSprop()
model.compile(loss=contrastive_loss, optimizer=rms, metrics=[accuracy])
model.fit([tr_pairs[:, 0], tr_pairs[:, 1]], tr_y,
batch_size=128,
epochs=epochs,
verbose=1,
validation_data=([te_pairs[:, 0], te_pairs[:, 1]], te_y))
y_pred = model.predict([tr_pairs[:, 0], tr_pairs[:, 1]])
tr_acc = compute_accuracy(tr_y, y_pred)
y_pred = model.predict([te_pairs[:, 0], te_pairs[:, 1]])
te_acc = compute_accuracy(te_y, y_pred)
print('* Accuracy on training set: %0.2f%%' % (100 * tr_acc))
print('* Accuracy on test set: %0.2f%%' % (100 * te_acc))
return {'loss': -te_acc, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
tr_pairs, tr_y, te_pairs, te_y,input_shape = data()
best_run, best_model = optim.minimize(model=create_model, data=data,
functions = [process_data,create_base_network,euclidean_distance,contrastive_loss,eucl_dist_output_shape,create_pairs,accuracy,compute_accuracy],
algo=tpe.suggest,max_evals=100,trials=Trials())
print("best model",best_model)
print("best run",best_run)
print("Evalutation of best performing model:")
loss,te_acc = best_model.evaluate([te_pairs[:, 0], te_pairs[:, 1]], te_y)
print("best prediction accuracy on test data %0.2f%%" % (100 * te_acc))
| 4,820 | 35.801527 | 149 | py |
hyperas | hyperas-master/examples/cnn_lstm.py | from __future__ import print_function
from hyperopt import Trials, STATUS_OK, rand
from hyperas import optim
from hyperas.distributions import uniform, choice
import numpy as np
from keras.preprocessing import sequence
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM
from keras.layers.convolutional import Convolution1D, MaxPooling1D
def data():
np.random.seed(1337) # for reproducibility
max_features = 20000
maxlen = 100
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features)
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
return X_train, X_test, y_train, y_test, maxlen, max_features
def model(X_train, X_test, y_train, y_test, maxlen, max_features):
embedding_size = 300
pool_length = 4
lstm_output_size = 100
batch_size = 200
nb_epoch = 1
model = Sequential()
model.add(Embedding(max_features, embedding_size, input_length=maxlen))
model.add(Dropout({{uniform(0, 1)}}))
# Note that we use unnamed parameters here, which is bad style, but is used here
# to demonstrate that it works. Always prefer named parameters.
model.add(Convolution1D({{choice([64, 128])}},
{{choice([6, 8])}},
border_mode='valid',
activation='relu',
subsample_length=1))
model.add(MaxPooling1D(pool_length=pool_length))
model.add(LSTM(lstm_output_size))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Train...')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,
validation_data=(X_test, y_test))
score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
best_run, best_model = optim.minimize(model=model,
data=data,
algo=rand.suggest,
max_evals=5,
trials=Trials())
print(best_run)
| 2,541 | 35.84058 | 84 | py |
hyperas | hyperas-master/examples/cifar_generator_cnn.py | from __future__ import print_function
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import uniform
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
from keras.datasets import cifar10
from keras.utils import np_utils
def data():
nb_classes = 10
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# this will do preprocessing and realtime data augmentation
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(X_train)
return datagen, X_train, Y_train, X_test, Y_test
def model(datagen, X_train, Y_train, X_test, Y_test):
batch_size = 32
nb_epoch = 200
# input image dimensions
img_rows, img_cols = 32, 32
# the CIFAR10 images are RGB
img_channels = 3
model = Sequential()
model.add(Convolution2D(32, 3, 3, border_mode='same',
input_shape=X_train.shape[1:]))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
# fit the model on the batches generated by datagen.flow()
model.fit_generator(datagen.flow(X_train, Y_train,
batch_size=batch_size),
samples_per_epoch=X_train.shape[0],
nb_epoch=nb_epoch,
validation_data=(X_test, Y_test))
score, acc = model.evaluate(X_test, Y_test, verbose=0)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
datagen, X_train, Y_train, X_test, Y_test = data()
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=5,
trials=Trials())
print("Evalutation of best performing model:")
print(best_model.evaluate(X_test, Y_test))
| 4,262 | 36.394737 | 94 | py |
hyperas | hyperas-master/tests/test_functional_api.py | from __future__ import print_function
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice
from keras.models import Model
from keras.layers import Dense, Input
from keras.optimizers import RMSprop
from keras.datasets import mnist
from keras.utils import np_utils
def data():
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
nb_classes = 10
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, Y_train, X_test, Y_test
def model(X_train, Y_train, X_test, Y_test):
inputs = Input(shape=(784,))
x = Dense({{choice([20, 30, 40])}}, activation='relu')(inputs)
x = Dense(64, activation='relu')(x)
predictions = Dense(10, activation='softmax')(x)
model = Model(inputs=inputs, outputs=predictions)
rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])
model.fit(X_train, Y_train,
batch_size={{choice([64, 128])}},
epochs=1,
verbose=2,
validation_data=(X_test, Y_test))
score, acc = model.evaluate(X_test, Y_test, verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def model_multi_line_arguments(X_train, Y_train,
X_test, Y_test):
inputs = Input(shape=(784,))
x = Dense({{choice([20, 30, 40])}}, activation='relu')(inputs)
x = Dense(64, activation='relu')(x)
predictions = Dense(10, activation='softmax')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, Y_train,
batch_size={{choice([64, 128])}},
epochs=1,
verbose=2,
validation_data=(X_test, Y_test))
score, acc = model.evaluate(X_test, Y_test, verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def test_functional_api():
X_train, Y_train, X_test, Y_test = data()
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=1,
trials=Trials(),
verbose=False)
best_run, best_model = optim.minimize(model=model_multi_line_arguments,
data=data,
algo=tpe.suggest,
max_evals=1,
trials=Trials(),
verbose=False)
| 3,058 | 35.416667 | 90 | py |
hyperas | hyperas-master/tests/test_lr_plateau.py | from __future__ import print_function
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.datasets import mnist
from keras.utils import np_utils
from keras.callbacks import ReduceLROnPlateau, EarlyStopping
def data():
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
nb_classes = 10
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
return x_train, y_train, x_test, y_test
def create_model(x_train, y_train, x_test, y_test):
model = Sequential()
model.add(Dense(44, input_shape=(784,)))
model.add(Activation({{choice(['relu', 'sigmoid'])}}))
model.add(Dense(44))
model.add(Activation({{choice(['relu', 'sigmoid'])}}))
model.add(Dense(10))
model.compile(loss='mae', metrics=['mse'], optimizer="adam")
es = EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=10)
rlr = ReduceLROnPlateau(factor=0.1, patience=10)
_ = model.fit(x_train, y_train, epochs=1, verbose=0, callbacks=[es, rlr],
batch_size=24, validation_data=(x_test, y_test))
mae, mse = model.evaluate(x_test, y_test, verbose=0)
print('MAE:', mae)
return {'loss': mae, 'status': STATUS_OK, 'model': model}
def test_advanced_callbacks():
X_train, Y_train, X_test, Y_test = data()
best_run, best_model = optim.minimize(model=create_model,
data=data,
algo=tpe.suggest,
max_evals=1,
trials=Trials(),
verbose=False)
| 2,006 | 34.839286 | 77 | py |
hyperas | hyperas-master/tests/test_e2e.py | from __future__ import print_function
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop
from keras.datasets import mnist
from keras.utils import np_utils
from hyperopt import rand
def data():
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
nb_classes = 10
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, Y_train, X_test, Y_test
def model(X_train, Y_train, X_test, Y_test):
model = Sequential()
model.add(Dense(50, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense({{choice([20, 30, 40])}}))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense(10))
model.add(Activation('softmax'))
rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])
model.fit(X_train, Y_train,
batch_size={{choice([64, 128])}},
epochs=1,
verbose=2,
validation_data=(X_test, Y_test))
score, acc = model.evaluate(X_test, Y_test, verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def test_simple():
X_train, Y_train, X_test, Y_test = data()
trials = Trials()
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=1,
trials=trials,
verbose=False)
def ensemble_data():
nb_classes = 10
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, X_test, Y_train, Y_test
def ensemble_model(X_train, X_test, Y_train, Y_test):
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense({{choice([400, 512, 600])}}))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense(10))
model.add(Activation('softmax'))
rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])
nb_epoch = 10
batch_size = 128
model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=2,
validation_data=(X_test, Y_test))
score, acc = model.evaluate(X_test, Y_test, verbose=0)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def test_ensemble():
X_train, X_test, Y_train, Y_test = data()
optim.best_ensemble(nb_ensemble_models=2,
model=model,
data=data,
algo=rand.suggest,
max_evals=1,
trials=Trials(),
voting='hard')
| 3,714 | 31.587719 | 87 | py |
hyperas | hyperas-master/tests/test_optim.py | from keras.datasets import mnist
from keras.utils import np_utils
from hyperas.optim import retrieve_data_string
def test_data():
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
nb_classes_return = 10
Y_train = np_utils.to_categorical(y_train, nb_classes_return)
Y_test = np_utils.to_categorical(y_test, nb_classes_return)
return X_train, Y_train, X_test, Y_test
def test_data_function():
result = retrieve_data_string(test_data, verbose=False)
assert 'return X_train, Y_train, X_test, Y_test' not in result
assert 'def data():' not in result
assert 'nb_classes_return = 10' in result
assert '(X_train, y_train), (X_test, y_test) = mnist.load_data()' in result
assert 'Y_test = np_utils.to_categorical(y_test, nb_classes_return)' in result
if __name__ == '__main__':
test_data_function()
| 1,049 | 31.8125 | 82 | py |
autoagora-agents | autoagora-agents-master/tests/autoagora_agents/test_algorithm.py | # Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
import torch
from autoagora_agents import algorithm
from ..fixture import *
def test_predetermined(predeterminedconfig):
agent = algorithm.algorithmgroupfactory(**predeterminedconfig)[0]
obs = np.zeros(1)
rew = 1
act = np.zeros(1)
for i in range(10):
act = agent(observation=obs, action=act, reward=rew, done=False)
if i < 3:
assert np.array_equiv(np.zeros(1), act)
elif i >= 6:
assert np.array_equiv(2 * np.ones(1), act)
else:
assert np.array_equiv(np.ones(1), act)
assert agent.niterations == 10
agent.reset()
assert agent.niterations == 0
def test_predetermined_nonzero_first_timestamp(predeterminedconfig):
predeterminedconfig["timestamps"] = [5, 10, 15]
with pytest.raises(ValueError):
_ = algorithm.algorithmgroupfactory(**predeterminedconfig)[0]
def test_predetermined_different_length_lists(predeterminedconfig):
predeterminedconfig["timestamps"] = [0, 10]
with pytest.raises(ValueError):
_ = algorithm.algorithmgroupfactory(**predeterminedconfig)[0]
def test_advantage_reward_std_nan(predeterminedconfig):
# The config here doesn't matter. We just need to set up some agent to get access to the advantage static method
agent = algorithm.algorithmgroupfactory(**predeterminedconfig)[0]
rewards = torch.as_tensor([1.0])
adv = agent.advantage(rewards)
assert adv == rewards.unsqueeze(dim=1)
def test_advantage_reward_std_zero(predeterminedconfig):
# The config here doesn't matter. We just need to set up some agent to get access to the advantage static method
agent = algorithm.algorithmgroupfactory(**predeterminedconfig)[0]
rewards = torch.as_tensor([1.0, 1.0])
adv = agent.advantage(rewards)
assert all(adv == rewards.unsqueeze(dim=1))
def test_advantage_reward_std_nonzero(predeterminedconfig):
# The config here doesn't matter. We just need to set up some agent to get access to the advantage static method
agent = algorithm.algorithmgroupfactory(**predeterminedconfig)[0]
for _ in range(100):
rewards = torch.randint(-100, 100, (10,), dtype=torch.float32)
adv = agent.advantage(rewards)
# Our definintion of advantage here is essentially just standardising a gaussian
assert torch.allclose(adv.mean(), torch.zeros(1), atol=1e-2)
assert torch.allclose(adv.std(), torch.ones(1), atol=1e-2)
def test_bandit_call(vpgbanditconfig):
agent = algorithm.algorithmgroupfactory(**vpgbanditconfig)[0]
obs = np.zeros(1)
act = np.zeros(1)
rew = 1
done = False
act = agent(observation=obs, action=act, reward=rew, done=done)
assert len(agent.buffer) == 1 # type: ignore
act = agent(observation=obs, action=act, reward=rew, done=done)
assert len(agent.buffer) == 2 # type: ignore
# Buffer is a deque, so shouldn't fill more
act = agent(observation=obs, action=act, reward=rew, done=done)
assert len(agent.buffer) == 2 # type: ignore
| 3,131 | 36.285714 | 116 | py |
autoagora-agents | autoagora-agents-master/tests/autoagora_agents/test_distribution.py | # Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import torch
from autoagora_agents import distribution
from ..fixture import *
def test_gaussiandistribution_reset(gaussianconfig):
dist = distribution.distributionfactory(**gaussianconfig)
v = dist.mean # type: ignore
dist._mean = torch.tensor([2.0]) # type: ignore
assert not torch.allclose(v, dist.mean) # type: ignore
dist.reset()
assert torch.allclose(v, dist.mean) # type: ignore
def test_gaussiandistribution_clamping(gaussianconfig):
dist = distribution.distributionfactory(**gaussianconfig)
dist._mean = torch.tensor([5.0]) # type: ignore
assert torch.allclose(dist.mean, torch.tensor([2.0])) # type: ignore
dist._logstddev = torch.tensor([5.0]) # type: ignore
assert torch.allclose(dist.stddev, torch.tensor([1.0])) # type: ignore
def test_gaussiandistribution_sample(gaussianconfig):
dist = distribution.distributionfactory(**gaussianconfig)
samples = torch.tensor([dist.sample() for _ in range(1000)])
assert torch.allclose(torch.std(samples), torch.tensor(0.5), atol=1e-1)
assert torch.allclose(torch.mean(samples), torch.tensor(1.0), atol=1e-1)
def test_degeneratedistribution_reset(degenerateconfig):
dist = distribution.distributionfactory(**degenerateconfig)
v = dist.mean # type: ignore
dist._mean = torch.tensor([2.0]) # type: ignore
dist.reset()
assert torch.allclose(v, dist.mean) # type: ignore
def test_degeneratedistribution_clamping(degenerateconfig):
dist = distribution.distributionfactory(**degenerateconfig)
dist._value = torch.tensor([5.0]) # type: ignore
assert torch.allclose(dist.mean, torch.tensor([2.0])) # type: ignore
def test_degeneratedistribution_sample(degenerateconfig):
dist = distribution.distributionfactory(**degenerateconfig)
samples = torch.tensor([dist.sample() for _ in range(10)])
assert torch.sum(samples) == 10
def test_degeneratedistribution_entropy(degenerateconfig):
dist = distribution.distributionfactory(**degenerateconfig)
assert torch.sum(dist.entropy()) == 0
def test_scaledgaussiandistribution_reset(scaledgaussianconfig):
dist = distribution.distributionfactory(**scaledgaussianconfig)
v = dist.mean # type: ignore
dist._mean = torch.tensor([2.0]) # type: ignore
assert not torch.allclose(v, dist.mean) # type: ignore
dist.reset()
assert torch.allclose(v, dist.mean) # type: ignore
def test_scaledgaussiandistribution_clamping(scaledgaussianconfig):
dist = distribution.distributionfactory(**scaledgaussianconfig)
dist._mean = torch.tensor([-1.0]) # type: ignore
assert torch.allclose(dist.mean, torch.tensor([0.0])) # type: ignore
dist._logstddev = torch.tensor([-100.0]) # type: ignore
assert torch.allclose(dist.stddev, torch.tensor([0.1])) # type: ignore
def test_scaledgaussiandistribution_unscaledsample(scaledgaussianconfig):
dist = distribution.distributionfactory(**scaledgaussianconfig)
samples = torch.tensor([dist.unscaledsample() for _ in range(1000)]) # type: ignore
assert torch.allclose(torch.std(samples), torch.tensor(1.0), atol=1e-1)
assert torch.allclose(torch.mean(samples), torch.tensor(0.0), atol=1e-1)
def test_scaledgaussiandistribution_scale(scaledgaussianconfig):
dist = distribution.distributionfactory(**scaledgaussianconfig)
torch.allclose(dist.scale(torch.tensor([0.0])), torch.tensor([1.0])) # type: ignore
| 3,506 | 39.310345 | 88 | py |
autoagora-agents | autoagora-agents-master/tests/autoagora_agents/test_buffer.py | # Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import torch
from autoagora_agents import buffer
def test_buffer():
maxlen = 10
b = buffer.buffer(maxlength=maxlen)
sample = {
"reward": torch.as_tensor([1, 2, 3]),
"action": torch.as_tensor([3, 2, 1]),
}
assert len(b) == 0
b.append(sample) # type: ignore
assert len(b) == 1
for _ in range(maxlen + 1):
b.append(sample) # type: ignore
assert buffer.isfull(b)
b.clear()
assert not buffer.isfull(b)
| 549 | 19.37037 | 45 | py |
autoagora-agents | autoagora-agents-master/autoagora_agents/algorithm.py | # Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
from abc import ABC, abstractmethod
import numpy as np
import torch
from torch import optim
import experiment
from autoagora_agents import buffer
from autoagora_agents.distribution import distributionfactory
class Algorithm(ABC):
"""Base class for algorithms.
Concretions must implement :meth:`__call__`.
Attributes:
niterations (int): Number of times the algorithm has been called.
nupdates (int): Number of times the algorithm has been updated.
group (str): The group to which the algorithm belongs.
i (int): The index of the algorithm.
name (str): The group and index of the algorithm.
"""
def __init__(self, *, group: str, i: int) -> None:
self.niterations = 0
self.nupdates = 0
self.group = group
self.i = i
self.name = f"{group}_{i}"
def reset(self) -> None:
"""Reset the algorithm's state."""
self.niterations = 0
def update(self) -> None:
"""Update the algorithm's parameters."""
self.nupdates += 1
@abstractmethod
def __call__(
self,
*,
observation: np.ndarray,
action: np.ndarray,
reward: float,
done: bool,
) -> np.ndarray:
"""Run the algorithm forward.
Keyword Arguments:
observation (np.ndarray): The observation seen by the agent.
action (np.ndarray): The previous action taken by the agent.
reward (float): The reward of the agent.
done (bool): If True, the agent is no longer in the game.
Returns:
np.ndarray: The next action taken by the agent.
"""
pass
@staticmethod
def advantage(rewards: torch.Tensor) -> torch.Tensor:
"""Compute a simple advantage estimate.
In effect, this is just standardising the samples to N(0, 1)
Arguments:
rewards (torch.Tensor): The reward-history using which to compute the advantage
Returns:
torch.Tensor: The advantage estimate
"""
std = rewards.std()
if torch.isnan(std) or std == 0:
adv = rewards
else:
adv = (rewards - rewards.mean()) / rewards.std()
return torch.unsqueeze(adv, dim=1)
class PredeterminedAlgorithm(Algorithm):
"""Change to a particular value at a given timestamp.
Attributes:
timestamps (list[int]): The timestamps at which to change the outputted value.
Must start with 0.
vals (list[np.ndarray]): The values outputted.
"""
def __init__(
self, *, group: str, i: int, timestamps: list[int], vals: list[np.ndarray]
) -> None:
super().__init__(group=group, i=i)
if timestamps[0] != 0:
raise ValueError("The first timestamp must be 0.")
if len(timestamps) != len(vals):
raise ValueError("The timestamps and vals lists must have the same length")
self.timestamps = timestamps
self.vals = vals
self.ix = 0
def reset(self) -> None:
super().reset()
self.ix = 0
def __call__(
self,
*,
observation: np.ndarray,
action: np.ndarray,
reward: float,
done: bool,
) -> np.ndarray:
if self.ix != len(self.timestamps) - 1:
if self.niterations >= self.timestamps[self.ix + 1]:
self.ix += 1
self.niterations += 1
return self.vals[self.ix]
class BanditAlgorithm(Algorithm):
"""Algorithms that have no observation other than the reward.
Keyword Arguments:
group (str): The group to which the algorithm belongs.
i (int): The id value of the object within the group.
bufferlength (int): The length of the buffer storing historical samples.
actiondistribution (dict): The config for the distribution representing the action.
optimizer (dict): The config for the optimizer.
Attributes:
actiondist (Distribution): The distribution modelling action-selection.
buffer (deque): The buffer storing historical samples.
optimizer (optim.Optimizer): A torch optimizer.
"""
def __init__(
self,
*,
group: str,
i: int,
bufferlength: int,
actiondistribution: dict,
optimizer: dict,
) -> None:
super().__init__(group=group, i=i)
self.actiondist = distributionfactory(**actiondistribution)
self.buffer = buffer.buffer(maxlength=bufferlength)
optimizer["params"] = self.actiondist.params
self.opt = optimizerfactory(**optimizer)
def reset(self):
super().reset()
self.actiondist.reset()
self.buffer.clear()
def __call__(
self,
*,
observation: np.ndarray,
action: np.ndarray,
reward: float,
done: bool,
) -> np.ndarray:
act = np.array(self.actiondist.sample())
logprob = self.actiondist.logprob(torch.as_tensor(action))
self.buffer.append(
{
"reward": reward,
"action": action,
"logprob": logprob,
}
)
self.niterations += 1
return act
def logprob(self, actions: torch.Tensor) -> torch.Tensor:
"""Compute the log probability of the action given the distribution.
Arguments:
actions (torch.Tensor): The actions for which to compute the log probability
Returns:
torch.Tensor: The log probability of the actions.
"""
return self.actiondist.logprob(actions)
# NOTE: This is experimental! Please do not use!
class VPGBandit(BanditAlgorithm):
"""Bandit using a Vanilla Policy Gradient update.
Keyword Arguments:
group (str): The group to which the algorithm belongs.
i (int): The id value of the object within the group.
bufferlength (int): The length of the buffer storing historical samples.
actiondistribution (dict): The config for the distribution representing the action.
optimizer (dict): The config for the optimizer.
"""
def __init__(
self,
*,
group: str,
i: int,
bufferlength: int,
actiondistribution: dict,
optimizer: dict,
) -> None:
super().__init__(
group=group,
i=i,
bufferlength=bufferlength,
actiondistribution=actiondistribution,
optimizer=optimizer,
)
def _vpgpiloss(self, *, reward: torch.Tensor, action: torch.Tensor) -> torch.Tensor:
"""Compute the VPG policy loss.
Tries to push the policy to maximise the probability of taking actions that
maximise the return via an advantage function, which is an lower-variance
Q-function.
Keyword Arguments:
reward (torch.Tensor): The rewards associated with taking each action.
action (torch.Tensor): The actions the agent took.
Returns:
torch.Tensor: The policy loss
"""
adv = self.advantage(reward)
logprob = self.logprob(action)
# Treat the different gaussians as independent. Don't mean across them.
loss = -torch.mean(logprob * adv, dim=0)
return loss
def update(self):
if not buffer.isfull(self.buffer):
return
super().update()
rewards = buffer.get("reward", self.buffer)
actions = buffer.get("action", self.buffer)
loss = self._vpgpiloss(reward=rewards, action=actions)
# The fudge factor has been found to empirically be the best balance between the
# standard deviation growing without exploding.
fudgefactor = -5
alexisterm = torch.exp(-self.actiondist.logstddev + fudgefactor) # type: ignore
loss += alexisterm
# Backprop
self.opt.zero_grad()
torch.sum(loss).backward()
self.opt.step()
self.buffer.clear()
# NOTE: This is experimental. Do not use!
class PPOBandit(BanditAlgorithm):
"""Bandit with a PPO update.
Keyword Arguments:
group (str): The group to which the algorithm belongs.
i (int): The id value of the object within the group.
bufferlength (int): The length of the buffer storing historical samples.
actiondistribution (dict): The config for the distribution representing the action.
optimizer (dict): The config for the optimizer.
ppoiterations (int): The number of iterations to update the policy for before
stopping the update step.
epsclip (float): The clip value.
entropycoeff (float): How much to weight the entropy term in the loss.
pullbackstrength (float): How strongly to apply pullback to the initial distribution.
stddevfallback (bool): Whether to do fallback for the standard deviation.
Attributes:
ppoiterations (int): The number of iterations to update the policy for before
stopping the update step.
epsclip (float): The clip value.
entropycoeff (float): How much to weight the entropy term in the loss.
pullbackstrength (float): How strongly to apply pullback to the initial distribution.
stddevfallback (bool): Whether to do fallback for the standard deviation.
"""
def __init__(
self,
*,
group: str,
i: int,
bufferlength: int,
actiondistribution: dict,
optimizer: dict,
ppoiterations: int,
epsclip: float,
entropycoeff: float,
pullbackstrength: float,
stddevfallback: bool,
) -> None:
super().__init__(
group=group,
i=i,
bufferlength=bufferlength,
actiondistribution=actiondistribution,
optimizer=optimizer,
)
self.ppoiterations = ppoiterations
self.epsclip = epsclip
self.entropycoeff = entropycoeff
self.pullbackstrength = pullbackstrength
self.stddevfallback = stddevfallback
def _ppoloss(
self, *, actions: torch.Tensor, logprob: torch.Tensor, adv: torch.Tensor
) -> torch.Tensor:
nlogprob = self.actiondist.logprob(actions)
ratio = torch.exp(nlogprob - logprob)
loss = -torch.min(
ratio * adv,
torch.clip(ratio, min=1 - self.epsclip, max=1 + self.epsclip) * adv,
)
return loss
def _entropyloss(self) -> torch.Tensor:
"""Penalise high entropies."""
return -self.actiondist.entropy() * self.entropycoeff
def _update(self) -> bool:
if not buffer.isfull(self.buffer):
return False
super().update()
rewards = buffer.get("reward", self.buffer)
actions = buffer.get("action", self.buffer)
adv = self.advantage(rewards)
logprob = self.logprob(actions).detach()
for _ in range(self.ppoiterations):
ppoloss = self._ppoloss(actions=actions, logprob=logprob, adv=adv)
entropyloss = self._entropyloss()
loss = torch.mean(ppoloss + entropyloss, dim=0)
# Pullback
loss += (
torch.abs(self.actiondist.unclampedmean - self.actiondist.initial_mean)
* self.pullbackstrength
)
if self.stddevfallback:
diff = self.actiondist.logstddev - torch.log(
self.actiondist.initial_stddev
)
loss += torch.where(
diff > 0.0, diff * self.pullbackstrength, torch.zeros_like(diff)
)
self.opt.zero_grad()
torch.sum(loss).backward()
self.opt.step()
return True
def update(self):
ran = self._update()
if ran:
self.buffer.clear()
# NOTE: This is experimental. Do not use!
class RollingMemoryPPOBandit(PPOBandit):
"""Bandit with a PPO update wherein the buffer is maintained in an off-policy way.
Keyword Arguments:
group (str): The group to which the algorithm belongs.
i (int): The id value of the object within the group.
bufferlength (int): The length of the buffer storing historical samples.
actiondistribution (dict): The config for the distribution representing the action.
optimizer (dict): The config for the optimizer.
ppoiterations (int): The number of iterations to update the policy for before
stopping the update step.
epsclip (float): The clip value.
entropycoeff (float): How much to weight the entropy term in the loss.
pullbackstrength (float): How strongly to apply pullback to the initial distribution.
stddevfallback (bool): Whether to do fallback for the standard deviation.
"""
def __init__(
self,
*,
group: str,
i: int,
bufferlength: int,
actiondistribution: dict,
optimizer: dict,
ppoiterations: int,
epsclip: float,
entropycoeff: float,
pullbackstrength: float,
stddevfallback: bool,
) -> None:
super().__init__(
group=group,
i=i,
bufferlength=bufferlength,
actiondistribution=actiondistribution,
optimizer=optimizer,
ppoiterations=ppoiterations,
epsclip=epsclip,
entropycoeff=entropycoeff,
pullbackstrength=pullbackstrength,
stddevfallback=stddevfallback,
)
def logprob(self, _):
return buffer.get("logprob", self.buffer).unsqueeze(dim=1)
def update(self):
_ = self._update()
def algorithmgroupfactory(*, kind: str, count: int, **kwargs) -> list[Algorithm]:
"""Instantiate new algorithms for a particular group.
Keyword Arguments:
kind (str): The type of algorithm to instantiate.
"vpgbandit" -> VPGBandit
"ppobandit" -> PPOBandit
"rmppobandit" -> RollingMemoryPPOBandit
"predetermined" -> PredeterminedAlgorithm
count (int): The number of entities in this group.
Returns:
list[Algorithm]: A list of instantiated algorithms.
"""
algs = {
"vpgbandit": VPGBandit,
"ppobandit": PPOBandit,
"rmppobandit": RollingMemoryPPOBandit,
"predetermined": PredeterminedAlgorithm,
}
group = [experiment.factory(kind, algs, i=i, **kwargs) for i in range(count)]
return group
def optimizerfactory(*, kind: str, **kwargs) -> optim.Optimizer:
"""Return the requested optimiser.
Keyword Arguments:
kind (str): The type of optimiser to instantiate.
"adam" -> optim.Adam
"sgd" -> optim.SGD
"rmsprop" -> optim.RMSprop
Returns:
optim.Optimizer: The optimiser
"""
opts = {"adam": optim.Adam, "sgd": optim.SGD, "rmsprop": optim.RMSprop}
opt = experiment.factory(kind, opts, **kwargs)
return opt
| 15,179 | 31.229299 | 93 | py |
autoagora-agents | autoagora-agents-master/autoagora_agents/distribution.py | # Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
from abc import ABC, abstractmethod, abstractproperty
from typing import Union
import numpy as np
import torch
from torch import nn
import experiment
ArrayLike = Union[np.ndarray, torch.Tensor]
class Distribution(ABC):
"""The base class for distributions."""
def __init__(self) -> None:
super().__init__()
@abstractmethod
def reset(self) -> None:
"""Reset the distribution to its initial values."""
pass
@abstractproperty
def initial_mean(self) -> torch.Tensor: # type: ignore
"""torch.Tensor: Initial mean of the distribution."""
pass
@abstractproperty
def initial_stddev(self) -> torch.Tensor: # type: ignore
"""torch.Tensor: Initial standard deviation of the distribution."""
pass
@abstractproperty
def logstddev(self) -> torch.Tensor: # type: ignore
"""torch.Tensor: The log standard deviation of the distribution."""
pass
@abstractproperty
def mean(self) -> torch.Tensor: # type: ignore
"""torch.Tensor: Mean of the distribution."""
pass
@abstractproperty
def unclampedmean(self) -> torch.Tensor: # type: ignore
"""torch.Tensor: Unclamped mean of the distribution."""
pass
@abstractproperty
def stddev(self) -> torch.Tensor: # type: ignore
"""torch.Tensor: Standard deviation of the distribution."""
pass
@abstractproperty
def distribution(self) -> torch.distributions.Distribution: # type: ignore
"""torch.distributions.Distribution: The torch distribution."""
pass
@abstractproperty
def params(self) -> list[torch.Tensor]: # type: ignore
"""list[torch.Tensor]: The trainable parameters."""
pass
@abstractmethod
def sample(self) -> torch.Tensor:
"""torch.Tensor: Sample the gaussian distribution."""
pass
@abstractmethod
def logprob(self, x: torch.Tensor) -> torch.Tensor:
"""The log probability of the PDF at x.
Arguments:
x (torch.Tensor): A sample.
Returns:
torch.Tensor: The log probability.
"""
pass
@abstractmethod
def entropy(self) -> torch.Tensor:
"""The entropy of the distribution."""
pass
class GaussianDistribution(Distribution):
"""A Gaussian distribution.
Keyword Arguments:
intial_mean (ArrayLike): The means of each gaussian distribution. For example,
for multi-product, you would set one initial mean per product.
minmean (ArrayLike): The minimum value the mean can take on.
maxmean (ArrayLike): The maximum value the mean can take on.
intial_stddev (ArrayLike): The standard deviations of each gaussian
distribution.
minstddev (ArrayLike): The minimum value the standard deviation can take on.
maxstddev (ArrayLike): The maximum value the standard deviation can take on.
Attributes:
mean (torch.Tensor): The clamped mean of the distribution.
minmean (torch.Tensor): The minimum value the mean can take on.
maxmean (torch.Tensor): The maximum value the mean can take on.
stddev (torch.Tensor): The clamped standard deviation of the distribution.
minstddev (torch.Tensor): The minimum value the standard deviation can take on.
maxstddev (torch.Tensor): The maximum value the standard deviation can take on.
"""
def __init__(
self,
*,
initial_mean: ArrayLike,
minmean: ArrayLike,
maxmean: ArrayLike,
initial_stddev: ArrayLike,
minstddev: ArrayLike,
maxstddev: ArrayLike,
) -> None:
super().__init__()
self._initial_mean = torch.as_tensor(initial_mean)
self.maxmean = torch.as_tensor(maxmean)
self.minmean = torch.as_tensor(minmean)
self._mean = nn.parameter.Parameter(self.initial_mean)
self._initial_stddev = torch.as_tensor(initial_stddev)
self.maxstddev = torch.as_tensor(maxstddev)
self.minstddev = torch.as_tensor(minstddev)
self._logstddev = nn.parameter.Parameter(torch.log(self.initial_stddev))
@property
def mean(self) -> torch.Tensor:
return torch.clip(self._mean, min=self.minmean, max=self.maxmean)
@property
def stddev(self) -> torch.Tensor:
return torch.clip(
torch.exp(self.logstddev), min=self.minstddev, max=self.maxstddev
)
@property
def logstddev(self) -> torch.Tensor: # type: ignore
"""torch.Tensor: The log standard deviation of the distribution."""
return self._logstddev
@property
def unclampedmean(self) -> torch.Tensor: # type: ignore
"""torch.Tensor: Unclamped mean of the distribution."""
return self._mean
@property
def initial_mean(self) -> torch.Tensor:
return self._initial_mean
@property
def initial_stddev(self) -> torch.Tensor:
return self._initial_stddev
def reset(self) -> None:
self._mean = nn.parameter.Parameter(self.initial_mean)
self._logstddev = nn.parameter.Parameter(torch.log(self.initial_stddev))
@property
def distribution(self) -> torch.distributions.Distribution:
return torch.distributions.Normal(loc=self.mean, scale=self.stddev)
@property
def params(self) -> list[torch.Tensor]:
return [self._mean, self._logstddev]
def sample(self) -> torch.Tensor:
return self.distribution.rsample().detach()
def logprob(self, x: torch.Tensor) -> torch.Tensor:
return self.distribution.log_prob(x)
def entropy(self) -> torch.Tensor:
return self.distribution.entropy()
class ScaledGaussianDistribution(GaussianDistribution):
"""A Gaussian distribution wherein the gaussian is in a scaled space.
In the scaled space, the mean is multiplied by the inverse scale factor and then put
into log space. This also applies to the bounds on the mean below.
Keyword Arguments:
scalefactor (np.ndarray): The scale factor for each gaussian distribution.
Attributes:
scalefactor (torch.Tensor): The scale factor for each gaussian distribution.
"""
def __init__(
self,
*,
initial_mean: ArrayLike,
minmean: ArrayLike,
maxmean: ArrayLike,
initial_stddev: ArrayLike,
minstddev: ArrayLike,
maxstddev: ArrayLike,
scalefactor: np.ndarray,
) -> None:
self.scalefactor = torch.as_tensor(scalefactor)
super().__init__(
initial_mean=self.inversescale(torch.as_tensor(initial_mean)),
minmean=self.inversescale(torch.as_tensor(minmean)),
maxmean=self.inversescale(torch.as_tensor(maxmean)),
initial_stddev=initial_stddev,
maxstddev=maxstddev,
minstddev=minstddev,
)
@property
def invscalefactor(self) -> torch.Tensor:
"""torch.Tensor: The inverse scale factor for each gaussian distribution."""
return 1 / self.scalefactor
def inversescale(self, x: torch.Tensor) -> torch.Tensor:
"""Apply the inverse scaling operation to x."""
return torch.log(torch.multiply(self.invscalefactor, x))
def scale(self, x: torch.Tensor) -> torch.Tensor:
"""Apply the scaling operation to x."""
return torch.multiply(self.scalefactor, torch.exp(x))
def sample(self) -> torch.Tensor:
"""Sample and return values in the scaled space."""
return self.scale(self.unscaledsample())
def unscaledsample(self) -> torch.Tensor:
"""Sample and return values in the unscaled space."""
return self.distribution.rsample().detach()
def logprob(self, x: torch.Tensor) -> torch.Tensor:
"""The log probability of the PDF at x.
Arguments:
x (torch.Tensor): A sample in the scaled space.
Returns:
torch.Tensor: The log probability.
"""
y = self.inversescale(x)
return self.distribution.log_prob(y)
# We don't make this a subclass of GaussianDistribution with stddev 0
# because torch.distributions.Normal doesn't allow stddev = 0
class DegenerateDistribution(Distribution):
"""A degenerate (deterministic) distribution.
Keyword Arguments:
initial_value (np.ndarray): The initial value of the distribution.
minvalue (np.ndarray): The minimum value of the distribution.
maxvalue (np.ndarray): The maximum value of the distribution.
Attributes:
initial_value (torch.Tensor): The initial value of the distribution.
minvalue (torch.Tensor): The minimum value of the distribution.
maxvalue (torch.Tensor): The maximum value of the distribution.
value (torch.Tensor): The clamped value of the distribution.
"""
def __init__(
self,
*,
initial_value: np.ndarray,
minvalue: np.ndarray,
maxvalue: np.ndarray,
) -> None:
super().__init__()
self.initial_value = torch.as_tensor(initial_value)
self.minvalue = torch.as_tensor(minvalue)
self.maxvalue = torch.as_tensor(maxvalue)
self._value = nn.parameter.Parameter(self.initial_value)
@property
def value(self) -> torch.Tensor:
return torch.clip(self._value, min=self.minvalue, max=self.maxvalue)
@property
def mean(self) -> torch.Tensor:
return self.value
@property
def stddev(self) -> torch.Tensor:
return torch.zeros_like(self.value)
@property
def logstddev(self) -> torch.Tensor:
return torch.log(self.stddev)
@property
def unclampedmean(self) -> torch.Tensor: # type: ignore
"""torch.Tensor: Unclamped mean of the distribution."""
return self._value
@property
def initial_mean(self) -> torch.Tensor:
return self.initial_value
@property
def initial_stddev(self) -> torch.Tensor:
return self.stddev
@property
def params(self) -> list[torch.Tensor]:
return [self._value]
def reset(self) -> None:
self._value = nn.parameter.Parameter(self.initial_value)
def sample(self) -> torch.Tensor:
return self.value
def logprob(self, _: torch.Tensor) -> torch.Tensor:
return torch.zeros_like(self._value)
def entropy(self) -> torch.Tensor:
return torch.zeros_like(self._value)
@property
def distribution(self) -> torch.distributions.Distribution:
return torch.distributions.Normal(
loc=self.value, scale=torch.zeros_like(self.value)
)
def distributionfactory(*, kind: str, **kwargs) -> Distribution:
"""Instantiate a new distribution.
Keyword Arguments:
kind (str): The type of distribution to instantiate.
"gaussian" -> GaussianDistribution
"scaledgaussian" -> ScaledGaussianDistribution
"degenerate" -> DegenerateDistribution
Returns:
Distribution: An instantiated distribution.
"""
dists = {
"gaussian": GaussianDistribution,
"scaledgaussian": ScaledGaussianDistribution,
"degenerate": DegenerateDistribution,
}
return experiment.factory(kind, dists, **kwargs)
| 11,376 | 31.229462 | 88 | py |
autoagora-agents | autoagora-agents-master/autoagora_agents/buffer.py | # Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
from collections import deque
from typing import Any
import torch
def buffer(*, maxlength: int) -> deque[dict[str, Any]]:
"""Create a buffer.
Keyword Arguments:
maxlength (int): The maximum length of the buffer.
Returns:
deque[dict[str, Any]]: The empty buffer.
"""
b: deque[dict[str, Any]] = deque(maxlen=maxlength)
return b
def isfull(b: deque[dict[str, torch.Tensor]]) -> bool:
"""Return true if the buffer is full. Else false."""
return len(b) == b.maxlen
def get(k: str, b: deque[dict[str, Any]]) -> torch.Tensor:
"""Get key from elements of the buffer.
Arguments:
k (str): The key.
b (deque[dict[str, Any]]): The empty buffer.
Returns:
torch.TensorList: The matching elements
"""
return torch.as_tensor([_b[k] for _b in b])
| 909 | 22.333333 | 58 | py |
autoagora-agents | autoagora-agents-master/autoagora_agents/controller.py | # Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import random
from typing import Any
import numpy as np
import torch
from autoagora_agents.algorithm import Algorithm, algorithmgroupfactory
class Controller:
"""Holds all algorithms and routes information to each.
Keyword Arguments:
agents (list[dict[str, Any]]): A list of the configs for each agent
group.
seed (int): The seed for torch.
Attributes:
groups (dict[str, Algorithm]): A dictionary mapping agent groups to algorithms.
"""
def __init__(self, *, agents: list[dict[str, Any]], seed: int) -> None:
self.groups = {a["group"]: algorithmgroupfactory(**a) for a in agents}
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
def __call__(
self,
*,
observations: dict[str, np.ndarray],
actions: dict[str, np.ndarray],
rewards: dict[str, float],
dones: dict[str, bool]
) -> dict[str, np.ndarray]:
"""Call each algorithm.
Keyword Arguments:
observations (dict[str, np.ndarray]): The observations of each agent.
actions (dict[str, np.ndarray]): The action of each agent.
rewards (dict[str, float]): The reward received by each agent.
dones (dict[str, bool]): Whether each agent is done.
Returns:
dict[str, np.ndarray]: The next actions of each agent.
"""
acts = {}
for alg in self.algorithmslist:
acts[alg.name] = alg(
observation=observations[alg.name],
action=actions[alg.name],
reward=rewards[alg.name],
done=dones[alg.name],
)
return acts
def update(self) -> None:
"""Update each algorithm."""
for alg in self.algorithmslist:
alg.update()
@property
def algorithmslist(self) -> list[Algorithm]:
"""The algorithms for agents in each group."""
algs = []
for a in self.groups.values():
algs.extend(a)
return algs
| 2,147 | 28.833333 | 87 | py |
flink | flink-master/flink-python/docs/conf.py | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from __future__ import print_function
import os
import sys
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
# project = u'Flink Python Table API'
project = u'PyFlink'
copyright = u''
author = u'Author'
version_file = os.path.join("..", 'pyflink/version.py')
try:
exec(open(version_file).read())
except IOError:
print("Failed to load PyFlink version file for packaging. " +
"'%s' not found!" % version_file,
file=sys.stderr)
sys.exit(-1)
# The short X.Y version
version = __version__ # noqa
# The full version, including alpha/beta/rc tags
release = os.environ.get('RELEASE_VERSION', version)
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.mathjax',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.doctest',
'sphinx.ext.autosummary',
'sphinx_mdinclude'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Look at the first line of the docstring for function and method signatures.
autosummary_generate = True
autodoc_docstring_signature = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pydata_sphinx_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"collapse_navigation": True,
"navigation_depth": 0
}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../../docs/static/navbar-brand-logo.jpg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = html_logo
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyflinkdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pyflink.tex', u'pyflink Documentation',
[author], 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyflink', u'pyflink Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pyflink', u'pyflink Documentation',
author, 'pyflink', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'pyflink'
epub_author = u'Author'
epub_publisher = u'Author'
epub_copyright = u'2019, Author'
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
| 7,437 | 32.205357 | 80 | py |
flink | flink-master/flink-python/pyflink/datastream/connectors/cassandra.py | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from enum import Enum
from pyflink.common import Duration
from pyflink.java_gateway import get_gateway
__all__ = [
'CassandraSink',
'ConsistencyLevel',
'MapperOptions',
'ClusterBuilder',
'CassandraCommitter',
'CassandraFailureHandler'
]
# ---- Classes introduced to construct the MapperOptions ----
class ConsistencyLevel(Enum):
"""
The consistency level
"""
ANY = 0
ONE = 1
TWO = 2
THREE = 3
QUORUM = 4
ALL = 5
LOCAL_QUORUM = 6
EACH_QUORUM = 7
SERIAL = 8
LOCAL_SERIAL = 9
LOCAL_ONE = 10
def _to_j_consistency_level(self):
JConsistencyLevel = get_gateway().jvm.com.datastax.driver.core.ConsistencyLevel
return getattr(JConsistencyLevel, self.name)
class MapperOptions(object):
"""
This class is used to configure a Mapper after deployment.
"""
def __init__(self):
"""
A simple method to construct MapperOptions.
Example:
::
>>> mapper_option = MapperOptions() \\
... .ttl(1800) \\
... .timestamp(3600) \\
... .consistency_level(ConsistencyLevel.ANY) \\
... .tracing(True) \\
... .save_null_fields(True)
"""
JSimpleMapperOptions = get_gateway().jvm.org.apache.flink.streaming.connectors. \
cassandra.SimpleMapperOptions
self._j_mapper_options = JSimpleMapperOptions()
def ttl(self, ttl: int) -> 'MapperOptions':
"""
Creates a new Option object to add time-to-live to a mapper operation. This is only
valid for save operations.
"""
self._j_mapper_options.ttl(ttl)
return self
def timestamp(self, timestamp: int) -> 'MapperOptions':
"""
Creates a new Option object to add a timestamp to a mapper operation. This is only
valid for save and delete operations.
"""
self._j_mapper_options.timestamp(timestamp)
return self
def consistency_level(self, cl: ConsistencyLevel) -> 'MapperOptions':
"""
Creates a new Option object to add a consistency level value to a mapper operation.
This is valid for save, delete and get operations.
"""
self._j_mapper_options.consistencyLevel(cl._to_j_consistency_level())
return self
def tracing(self, enabled: bool) -> 'MapperOptions':
"""
Creates a new Option object to enable query tracing for a mapper operation. This is
valid for save, delete and get operations.
"""
self._j_mapper_options.tracing(enabled)
return self
def save_null_fields(self, enabled: bool) -> 'MapperOptions':
"""
Creates a new Option object to specify whether null entity fields should be included in
insert queries. This option is valid only for save operations.
"""
self._j_mapper_options.saveNullFields(enabled)
return self
def if_not_exists(self, enabled: bool) -> 'MapperOptions':
"""
Creates a new Option object to specify whether an IF NOT EXISTS clause should be included in
insert queries. This option is valid only for save operations.
If this option is not specified, it defaults to false (IF NOT EXISTS statements are not
used).
"""
self._j_mapper_options.ifNotExists(enabled)
return self
class ClusterBuilder(object):
"""
This class is used to configure a Cluster after deployment. The cluster represents the
connection that will be established to Cassandra.
"""
def __init__(self, j_cluster_builder):
self._j_cluster_builder = j_cluster_builder
class CassandraCommitter(object):
"""
CheckpointCommitter that saves information about completed checkpoints within a separate table
in a cassandra database.
"""
def __init__(self, j_checkpoint_committer):
self._j_checkpoint_committer = j_checkpoint_committer
@staticmethod
def default_checkpoint_committer(builder: ClusterBuilder, key_space: str = None) \
-> 'CassandraCommitter':
"""
CheckpointCommitter that saves information about completed checkpoints within a separate
table in a cassandra database.
Entries are in the form: | operator_id | subtask_id | last_completed_checkpoint |
"""
JCassandraCommitter = get_gateway().jvm.org.apache.flink.streaming.connectors.\
cassandra.CassandraCommitter
if key_space is None:
j_checkpoint_committer = JCassandraCommitter(builder._j_cluster_builder)
else:
j_checkpoint_committer = JCassandraCommitter(builder._j_cluster_builder, key_space)
return CassandraCommitter(j_checkpoint_committer)
class CassandraFailureHandler(object):
"""
Handle a failed Throwable.
"""
def __init__(self, j_cassandra_failure_handler):
self._j_cassandra_failure_handler = j_cassandra_failure_handler
@staticmethod
def no_op() -> 'CassandraFailureHandler':
"""
A CassandraFailureHandler that simply fails the sink on any failures.
This is also the default failure handler if not specified.
"""
return CassandraFailureHandler(get_gateway().jvm.org.apache.flink.streaming.connectors.
cassandra.NoOpCassandraFailureHandler())
# ---- CassandraSink ----
class CassandraSink(object):
"""
Sets the ClusterBuilder for this sink. A ClusterBuilder is used to configure the connection to
cassandra.
"""
def __init__(self, j_cassandra_sink):
self._j_cassandra_sink = j_cassandra_sink
def name(self, name: str) -> 'CassandraSink':
"""
Set the name of this sink. This name is used by the visualization and logging during
runtime.
"""
self._j_cassandra_sink.name(name)
return self
def uid(self, uid: str) -> 'CassandraSink':
"""
Sets an ID for this operator. The specified ID is used to assign the same operator ID
across job submissions (for example when starting a job from a savepoint).
Note that this ID needs to be unique per transformation and job. Otherwise, job submission
will fail.
"""
self._j_cassandra_sink.uid(uid)
return self
def set_uid_hash(self, uid_hash: str) -> 'CassandraSink':
"""
Sets an user provided hash for this operator. This will be used AS IS the create the
JobVertexID.
The user provided hash is an alternative to the generated hashes, that is considered when
identifying an operator through the default hash mechanics fails (e.g. because of changes
between Flink versions).
Note that this should be used as a workaround or for trouble shooting. The provided hash
needs to be unique per transformation and job. Otherwise, job submission will fail.
Furthermore, you cannot assign user-specified hash to intermediate nodes in an operator
chain and trying so will let your job fail.
A use case for this is in migration between Flink versions or changing the jobs in a way
that changes the automatically generated hashes. In this case, providing the previous hashes
directly through this method (e.g. obtained from old logs) can help to reestablish a lost
mapping from states to their target operator.
"""
self._j_cassandra_sink.setUidHash(uid_hash)
return self
def set_parallelism(self, parallelism: int) -> 'CassandraSink':
"""
Sets the parallelism for this sink. The degree must be higher than zero.
"""
self._j_cassandra_sink.setParallelism(parallelism)
return self
def disable_chaining(self) -> 'CassandraSink':
"""
Turns off chaining for this operator so thread co-location will not be used as an
optimization.
"""
self._j_cassandra_sink.disableChaining()
return self
def slot_sharing_group(self, slot_sharing_group: str) -> 'CassandraSink':
"""
Sets the slot sharing group of this operation. Parallel instances of operations that are in
the same slot sharing group will be co-located in the same TaskManager slot, if possible.
Operations inherit the slot sharing group of input operations if all input operations are in
the same slot sharing group and no slot sharing group was explicitly specified.
Initially an operation is in the default slot sharing group. An operation can be put into
the default group explicitly by setting the slot sharing group to {@code "default"}.
"""
self._j_cassandra_sink.slotSharingGroup(slot_sharing_group)
return self
@staticmethod
def add_sink(input) -> 'CassandraSinkBuilder':
"""
Writes a DataStream into a Cassandra database.
"""
JCassandraSink = get_gateway().jvm \
.org.apache.flink.streaming.connectors.cassandra.CassandraSink
j_cassandra_sink_builder = JCassandraSink.addSink(input._j_data_stream)
return CassandraSink.CassandraSinkBuilder(j_cassandra_sink_builder)
class CassandraSinkBuilder(object):
"""
Builder for a CassandraSink.
"""
def __init__(self, j_cassandra_sink_builder):
self._j_cassandra_sink_builder = j_cassandra_sink_builder
def set_query(self, query: str) -> 'CassandraSink.CassandraSinkBuilder':
"""
Sets the query that is to be executed for every record.
"""
self._j_cassandra_sink_builder.setQuery(query)
return self
def set_host(self, host: str, port: int = 9042) -> 'CassandraSink.CassandraSinkBuilder':
"""
Sets the cassandra host/port to connect to.
"""
self._j_cassandra_sink_builder.setHost(host, port)
return self
def set_cluster_builder(self, builder: ClusterBuilder) \
-> 'CassandraSink.CassandraSinkBuilder':
"""
Sets the ClusterBuilder for this sink. A ClusterBuilder is used to configure the
connection to cassandra.
"""
self._j_cassandra_sink_builder.setClusterBuilder(builder._j_cluster_builder)
return self
def enable_write_ahead_log(self, committer: CassandraCommitter = None) \
-> 'CassandraSink.CassandraSinkBuilder':
"""
Enables the write-ahead log, which allows exactly-once processing for non-deterministic
algorithms that use idempotent updates.
"""
if committer is None:
self._j_cassandra_sink_builder.enableWriteAheadLog()
else:
self._j_cassandra_sink_builder.enableWriteAheadLog(
committer._j_checkpoint_committer)
return self
def set_mapper_options(self, options: MapperOptions) \
-> 'CassandraSink.CassandraSinkBuilder':
"""
Sets the mapper options for this sink. The mapper options are used to configure the
DataStax com.datastax.driver.mapping.Mapper when writing POJOs.
This call has no effect if the input DataStream for this sink does not contain POJOs.
"""
self._j_cassandra_sink_builder.setMapperOptions(options._j_mapper_options)
return self
def set_failure_handler(self, failure_handler: CassandraFailureHandler) \
-> 'CassandraSink.CassandraSinkBuilder':
"""
Sets the failure handler for this sink. The failure handler is used to provide custom
error handling.
"""
self._j_cassandra_sink_builder.setFailureHandler(
failure_handler._j_cassandra_failure_handler)
return self
def set_max_concurrent_requests(self,
max_concurrent_requests: int,
duration: Duration = None) \
-> 'CassandraSink.CassandraSinkBuilder':
"""
Sets the maximum allowed number of concurrent requests for this sink.
"""
if duration is None:
self._j_cassandra_sink_builder.setMaxConcurrentRequests(max_concurrent_requests)
else:
self._j_cassandra_sink_builder.setMaxConcurrentRequests(
max_concurrent_requests, duration._j_duration)
return self
def enable_ignore_null_fields(self) -> 'CassandraSink.CassandraSinkBuilder':
"""
Enables ignoring null values, treats null values as unset and avoids writing null fields
and creating tombstones.
This call has no effect if CassandraSinkBuilder.enableWriteAheadLog() is called.
"""
self._j_cassandra_sink_builder.enableIgnoreNullFields()
return self
def build(self) -> 'CassandraSink':
"""
Finalizes the configuration of this sink.
"""
return CassandraSink(self._j_cassandra_sink_builder.build())
| 14,269 | 37.567568 | 100 | py |
Robust-Training-for-Time-Series | Robust-Training-for-Time-Series-main/CNNmodel.py | import sys
import os
import numpy as np
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
import pickle as pkl
from GAK import tf_gak
def clip_tensor(X, eps, norm=np.inf):
if norm not in [np.inf, 2]:
raise ValueError('Inadequate norm')
axis = list(range(1, len(X.get_shape())))
avoid_zero_div = 1e-12
if norm == np.inf:
X = tf.clip_by_value(X, -eps, eps)
elif norm == 2:
norm = tf.sqrt(tf.maximum(avoid_zero_div, tf.reduce_sum(tf.square(X), axis, keepdims=True)))
factor = tf.minimum(1., tf.math.divide(eps, norm))
X = X * factor
return X
def dtw_differntiable(path, x, y, tf_norm=2):
"""
Make the optimal path a distance function
"""
x_path = tf.convert_to_tensor(path[0])
y_path = tf.convert_to_tensor(path[1])
if len(x_path) != len(y_path):
raise ValueError("Error in DTW path length")
else:
dtw_dist = tf.norm(x[x_path[0]] - y[y_path[0]], ord=tf_norm)
for i in range(1, len(x_path)):
dtw_dist = tf.add(dtw_dist, tf.norm(x[x_path[i]] - y[y_path[i]], ord=tf_norm))
return dtw_dist
#CNN Architecture
class cnn_class():
def __init__(self, name, seg_size, channel_nb, class_nb, arch='1'):
self.name = name
self.seg_size = seg_size
self.channel_nb = channel_nb
self.class_nb = class_nb
self.x_holder = []
self.y_holder = []
self.y_ =[]
if arch=='0':
self.trunk_model = tf.keras.Sequential([
#Layers
tf.keras.layers.Conv2D(20,[1, 12], padding="same", input_shape=(1, self.seg_size, self.channel_nb)),
tf.keras.layers.MaxPooling2D((1, 2), strides=2),
#Fully connected layer
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.BatchNormalization(),
#Logits layer
tf.keras.layers.Dense(self.class_nb)
])
if arch=='1':
self.trunk_model = tf.keras.Sequential([
#Layers
tf.keras.layers.Conv2D(66,[1, 12], padding="same", input_shape=(1, self.seg_size, self.channel_nb)),
tf.keras.layers.MaxPooling2D((1, 4), strides=4),
#Fully connected layer
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(1024, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.15),
tf.keras.layers.BatchNormalization(),
#Logits layer
tf.keras.layers.Dense(self.class_nb)
])
elif arch=='2':
self.trunk_model = tf.keras.Sequential([
#Layers
tf.keras.layers.Conv2D(100,[1, 12], padding="same", input_shape=(1, self.seg_size, self.channel_nb)),
tf.keras.layers.MaxPooling2D((1, 4), strides=1),
tf.keras.layers.Conv2D(50,[1, 5], padding="same", input_shape=(1, self.seg_size, self.channel_nb)),
tf.keras.layers.MaxPooling2D((1, 4), strides=1),
tf.keras.layers.Conv2D(50,[1, 3], padding="same", input_shape=(1, self.seg_size, self.channel_nb)),
tf.keras.layers.MaxPooling2D((1, 2), strides=1),
#Fully connected layer
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(200, activation=tf.nn.relu),
tf.keras.layers.Dense(100, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.BatchNormalization(),
#Logits layer
tf.keras.layers.Dense(self.class_nb)
])
elif arch=='3':
self.trunk_model = tf.keras.Sequential([
#Layers
tf.keras.layers.Conv2D(100,[1, 12], padding="same", input_shape=(1, self.seg_size, self.channel_nb)),
tf.keras.layers.MaxPooling2D((1, 4), strides=1),
tf.keras.layers.Conv2D(50,[1, 6], padding="same", input_shape=(1, self.seg_size, self.channel_nb)),
tf.keras.layers.MaxPooling2D((1, 4), strides=1),
tf.keras.layers.Conv2D(25,[1, 3], padding="same", input_shape=(1, self.seg_size, self.channel_nb)),
tf.keras.layers.MaxPooling2D((1, 2), strides=1),
#Fully connected layer
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(100, activation=tf.nn.relu),
tf.keras.layers.Dense(50, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.15),
tf.keras.layers.BatchNormalization(),
#Logits layer
tf.keras.layers.Dense(self.class_nb)
])
self.model = tf.keras.Sequential([self.trunk_model,
tf.keras.layers.Softmax()])
#Training Functions
self.loss_fn = tf.keras.losses.SparseCategoricalCrossentropy()
self.optimizer = tf.keras.optimizers.Adam(1e-3)
def train(self, train_set, checkpoint_path="TrainingRes/model_target", epochs=10, new_train=False):
@tf.function
def train_step(X, y):
with tf.GradientTape() as tape:
pred = self.model(X, training=True)
pred_loss = self.loss_fn(y, pred)
total_loss = pred_loss
gradients = tape.gradient(total_loss, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))
if not new_train:
self.model.load_weights(checkpoint_path)
sys.stdout.write("\nWeights loaded!")
else:
for ep in range(epochs):
sys.stdout.write("\r{}: Epochs {}/{} . . .".format(self.name, ep+1, epochs))
sys.stdout.flush()
for X, y in train_set:
train_step(X, y)
self.model.save_weights(checkpoint_path)
sys.stdout.write("\n")
def rots_train(self, train_set, a_shape, K, checkpoint_path="TrainingRes/rots_model",
gamma_gak=1, gak_sampled_paths=100, path_limit=100, gak_random_kill=5,
lbda=1.0, gamma_k=1, eta_k=1e-2, beta=5e-2, a_init=1e-2, omega=1e-3,
X_valid=[], y_valid=[],
uses_L2=False, new_train=False, verbose=False):
model_path = checkpoint_path+'/'+self.name
def sample_function(input_data):
rand_batch = np.random.randint(0, nb_batches)
i=0
warnings.warn("\nSample function details: nb_batches:{} - rand_batch:{}".format(nb_batches, rand_batch))
for X, y in input_data:
warnings.warn("\nSample function In-Loop: i:{} - X:{}".format(i, X))
if i==rand_batch:
return X, y
i += 1
def dist_func(x1, x2, use_log=True, path_limit=path_limit):
if use_log:
return -tf.math.log(tf_gak(x1, x2, gamma_gak, path_limit=path_limit, random_kill=gak_random_kill))
else:
return tf_gak(x1, x2, gamma_gak, path_limit=path_limit, random_kill=gak_random_kill)
@tf.function
def GW_ro_train_step(X, y, a, lbda):
with tf.GradientTape() as tape1:
pred = self.model(tf.add(X, a), training=True)
loss_it = self.loss_fn(y, pred)
G_w = tape1.gradient(loss_it, self.model.trainable_variables)
if verbose: sys.stdout.write("\n---Current Loss_w:", loss_it)
return G_w
@tf.function
def Ga_ro_train_step(X, y, a, lbda):
with tf.GradientTape() as tape2:
tape2.watch(a)
D_nl = dist_func(X, tf.add(X, a), use_log=False, path_limit=path_limit) #D no log = h_ij
self.omega = tf.add(tf.multiply(tf.subtract(tf.cast(1, dtype=tf.float64),beta), self.omega), tf.multiply(beta, D_nl))#line 8
G_omega = tape2.gradient(-tf.math.log(self.omega), a)
with tf.GradientTape() as tape2:
tape2.watch(a)
pred_a = self.model(tf.add(X, a), training=True)
loss_it_a = tf.cast(self.loss_fn(y, pred_a), tf.float64)
G_a_pred = tape2.gradient(loss_it_a, a)
G_a = tf.add(G_a_pred, tf.multiply(lbda, tf.add(G_omega,a)))
return G_a
@tf.function
def Ga_euclidean(X, y, a, lbda):
with tf.GradientTape() as tape2:
tape2.watch(a)
pred = self.model(tf.add(X, a), training=True)
loss_it = tf.cast(self.loss_fn(y, pred), tf.float64)
D = tf.norm(a, ord='euclidean')
loss_a = tf.add(loss_it, tf.multiply(lbda, D))
grad_a = tape2.gradient(loss_a, a)
G_a = tf.add(grad_a, tf.multiply(lbda, a))
return G_a
def rots_train_step(X, y):
G_w = GW_ro_train_step(X, y, self.a, self.lbda) #Get the gradient
self.ro_optimizer.apply_gradients(zip(G_w, self.model.trainable_variables)) #line 6
if not uses_L2: #line 7
G_a = Ga_ro_train_step(X, y, self.a, self.lbda)
else:
G_a = Ga_euclidean(X, y, self.a, self.lbda)
self.a = tf.add(self.a, tf.multiply(self.gamma_k_value, G_a)) #line 13
if not new_train:
self.model.load_weights(model_path)
sys.stdout.write("\nWeights loaded!")
else:
self.omega = omega
#decaying l_r of eta_k
boundaries = list(np.arange(np.ceil(K/4),K, 1e-2*K))
values = [eta_k]
for i, _ in enumerate(boundaries):
values.append(eta_k/(2**(i+1)))
lr_schedule_fn = tf.keras.optimizers.schedules.PiecewiseConstantDecay(boundaries, values)
self.ro_optimizer = tf.keras.optimizers.SGD(learning_rate=lr_schedule_fn(tf.Variable(0)))
#decaying l_r of gamma_k
gamma_k_init = gamma_k
gamma_k_decay = [gamma_k_init]
for i in range(1,K):
if i%10==0:
gamma_k_init /= 10
gamma_k_decay.append(gamma_k_init)
gamma_k_decay = tf.convert_to_tensor(gamma_k_decay, dtype=tf.float64)
sys.stdout.write("\nROTS training ...")
self.a = a_init * tf.ones(a_shape, dtype=tf.float64)
beta= tf.Variable(beta, dtype=tf.float64)
self.lbda = tf.cast(lbda, dtype=tf.float64)
min_loss = np.inf
k = 0
for X, y in train_set:
k += 1
if k%10==1:sys.stdout.write("\nK={}/{}".format(k, K))
sys.stdout.flush()
#X, y = sample_function(train_set) #line 4
self.gamma_k_value = gamma_k_decay[k]
rots_train_step(X, y)
sys.stdout.flush()
self.model.save_weights(model_path)
### Save best weights
if len(X_valid)>0:
pred_t = self.model(X_valid)
loss_t = self.loss_fn(y_valid, pred_t)
if loss_t <= min_loss:
best_W_T = self.ro_optimizer.get_weights()
if verbose: sys.stdout.write("\nBest weight validatio score: {:.2f}".format(self.score(X_valid, y_valid)))
min_loss = loss_t
sys.stdout.write(" . . . Validation Score: {:.2f}\n".format((self.score(X_valid, y_valid))))
sys.stdout.flush()
if len(X_valid)>0:
self.ro_optimizer.set_weights(best_W_T)
self.model.save_weights(model_path)
print()
def predict(self, X):
return tf.argmax(self.model(X, training=False), 1)
def predict_stmax(self, X):
return self.trunk_model(X, training=False)
def score(self, X, y):
X = tf.cast(X, tf.float64)
acc = tf.keras.metrics.Accuracy()
acc.reset_states()
pred = self.predict(X)
acc.update_state(pred, y)
return acc.result().numpy()
| 12,895 | 42.275168 | 140 | py |
LightDepth | LightDepth-main/torch_implementation/scripts/dataloaders.py | # This file is mostly taken from BTS; author: Jin Han Lee, with only slight modifications
import os
import random
import numpy as np
import torch
import torch.utils.data.distributed
from torch.nn import MaxPool2d
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
def _is_pil_image(img):
return isinstance(img, Image.Image)
def _is_numpy_image(img):
return isinstance(img, np.ndarray) and (img.ndim in {2, 3})
def preprocessing_transforms(mode):
return transforms.Compose([
ToTensor(mode=mode)
])
class DepthDataLoader(object):
def __init__(self, config, mode):
self._early_stopping_patience = config._early_stopping_patience
if mode == 'train':
self.data_samples = DataLoadPreprocess(config, mode, transform=preprocessing_transforms(mode))
if config.distributed:
self.train_sampler = torch.utils.data.distributed.DistributedSampler(self.data_samples)
else:
self.train_sampler = None
self.data = DataLoader(self.data_samples, config.train_batch_size,
shuffle=(self.train_sampler is None),
num_workers=config.num_threads,
pin_memory=True,
sampler=self.train_sampler)
elif mode == 'online_eval':
self.data_samples = DataLoadPreprocess(config, mode, transform=preprocessing_transforms(mode))
if config.distributed: # redundant. here only for readability and to be more explicit
# Give whole test set to all processes (and perform/report evaluation only on one) regardless
self.eval_sampler = None
else:
self.eval_sampler = None
self.data = DataLoader(self.data_samples, 1,
shuffle=False,
num_workers=1,
pin_memory=False,
sampler=self.eval_sampler)
elif mode == 'test':
self.data_samples = DataLoadPreprocess(config, mode, transform=preprocessing_transforms(mode))
self.data = DataLoader(self.data_samples, 1, shuffle=False, num_workers=1)
else:
print('mode should be one of \'train, test, online_eval\'. Got {}'.format(mode))
@property
def early_stopping_patience(self):
return self._early_stopping_patience[self.data_samples.current_strategy]
def remove_leading_slash(s):
if s[0] == '/' or s[0] == '\\':
return s[1:]
return s
class DataLoadPreprocess(Dataset):
def __init__(self, config, mode, transform=None):
self.config = config
if mode == 'online_eval':
with open(config.test_filenames_file, 'r') as f:
self.filenames = f.readlines()
else:
with open(config.train_filenames_file, 'r') as f:
self.filenames = f.readlines()
self.mode = mode
self.transform = transform
self.strategies = config.strategies
self.current_strategy = 0
self.multiple_strategy = config.multiple_strategy
self.data_path = config.input_data_path
self.gt_path = config.groundtruth_data_path
self.do_kb_crop = config.do_kb_crop
self.dataset = config.dataset
self.do_random_rotate = config.do_random_rotate
self.rotation_degree = config.rotation_degree
self.input_height = config.input_height
self.input_width = config.input_width
def __getitem__(self, idx):
sample_path = self.filenames[idx]
image_path = os.path.join(self.data_path, remove_leading_slash(sample_path.split()[0]))
depth_path = os.path.join(self.gt_path, remove_leading_slash(sample_path.split()[1]))
image = Image.open(image_path)
depth_gt = Image.open(depth_path)
height = image.height
width = image.width
image = np.asarray(image, dtype=np.float32) / 255.0
depth_gt = np.asarray(depth_gt, dtype=np.float32)
depth_gt = np.expand_dims(depth_gt, axis=2)
if self.do_kb_crop:
top_margin = int(height - 352)
left_margin = int((width - 1216) / 2)
depth_gt = depth_gt[top_margin:top_margin + 352,left_margin:left_margin + 1216]
image = image[top_margin:top_margin + 352,left_margin:left_margin + 1216,:]
if self.mode == 'train':
# To avoid blank boundaries due to pixel registration
if self.dataset == 'nyu':
depth_gt = depth_gt.crop((43, 45, 608, 472))
image = image.crop((43, 45, 608, 472))
if self.do_random_rotate:
random_angle = (random.random() - 0.5) * 2 * self.rotation_degree
image = self.rotate_image(image, random_angle)
depth_gt = self.rotate_image(depth_gt, random_angle, flag=Image.NEAREST)
image, depth_gt = self.random_crop(image, depth_gt, self.input_height, self.input_width)
image, depth_gt = self.train_augment(image, depth_gt)
if self.multiple_strategy:
depth_gt = self.dilation(depth_gt,**self.strategies[self.current_strategy])
if self.dataset == 'nyu':
depth_gt = depth_gt / 1000.0
else:
depth_gt = depth_gt / 256.0
sample = {'image': image, 'depth': depth_gt}
if self.transform:
sample = self.transform(sample)
return sample
def dilation(self,depth_gt,pool_size=(2,2),iterations=1): # TODO
if iterations> 0:
for _ in range(iterations):
depth_gt = MaxPool2d(kernel_size=pool_size)(depth_gt)
print(depth_gt.shape)
return depth_gt
def rotate_image(self, image, angle, flag=Image.BILINEAR):
result = image.rotate(angle, resample=flag)
return result
def random_crop(self, img, depth, height, width):
assert img.shape[0] >= height
assert img.shape[1] >= width
assert img.shape[0] == depth.shape[0]
assert img.shape[1] == depth.shape[1]
x = random.randint(0, img.shape[1] - width)
y = random.randint(0, img.shape[0] - height)
img = img[y:y + height, x:x + width, :]
depth = depth[y:y + height, x:x + width, :]
return img, depth
def train_augment(self, image, depth_gt):
# Random flipping
do_flip = random.random()
if do_flip > 0.5:
image = (image[:, ::-1, :]).copy()
depth_gt = (depth_gt[:, ::-1, :]).copy()
# Random gamma, brightness, color augmentation
do_augment = random.random()
if do_augment > 0.5:
image = self.augment_image(image)
return image, depth_gt
def augment_image(self, image):
# gamma augmentation
gamma = random.uniform(0.9, 1.1)
image_aug = image ** gamma
# brightness augmentation
if self.dataset == 'nyu':
brightness = random.uniform(0.75, 1.25)
else:
brightness = random.uniform(0.9, 1.1)
image_aug = image_aug * brightness
# color augmentation
colors = np.random.uniform(0.9, 1.1, size=3)
white = np.ones((image.shape[0], image.shape[1]))
color_image = np.stack([white * colors[i] for i in range(3)], axis=2)
image_aug *= color_image
image_aug = np.clip(image_aug, 0, 1)
return image_aug
def __len__(self):
return len(self.filenames)
class ToTensor(object):
def __init__(self, mode):
self.mode = mode
self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
def __call__(self, sample):
image = sample['image']
image = self.to_tensor(image)
image = self.normalize(image)
depth = sample['depth']
if self.mode == 'train':
depth = self.to_tensor(depth)
return {'image': image, 'depth': depth}
def to_tensor(self, pic):
if not (_is_pil_image(pic) or _is_numpy_image(pic)):
raise TypeError(
'pic should be PIL Image or ndarray. Got {}'.format(type(pic)))
if isinstance(pic, np.ndarray):
img = torch.from_numpy(pic.transpose((2, 0, 1)))
return img
# handle PIL Image
if pic.mode == 'I':
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == 'I;16':
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
# PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == 'YCbCr':
nchannel = 3
elif pic.mode == 'I;16':
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float()
else:
return img | 9,302 | 36.063745 | 109 | py |
LightDepth | LightDepth-main/torch_implementation/scripts/models/ordinary_unet.py | import torch
class OrdinaryUNet():
def __init__(self,config):
def UpConv2D(tensor, filters, name, concat_with):
up_i = torch.nn.Upsample((2, 2),mode='bilinear')(tensor)
up_i = torch.cat([up_i, encoder.get_layer(concat_with).output]) # Skip connection
up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name+'_convA')(up_i)
up_i = LeakyReLU(alpha=0.2)(up_i)
up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name+'_convB')(up_i)
up_i = LeakyReLU(alpha=0.2)(up_i)
return up_i | 625 | 51.166667 | 110 | py |
LightDepth | LightDepth-main/tf_implementation/scripts/dataloaders.py | import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
from tensorflow.keras.layers import MaxPooling2D
import os
class OrdinaryDataloader(object):
def __init__(self, config,is_training=True,debug=False):
self.do_flip = config.do_flip
self.do_augment = config.do_augment
self.do_rotate = config.do_rotate
self.do_kb_crop = config.do_kb_crop
self.use_normalized_image = config.use_normalized_image
self.is_training = is_training
self.debug = debug
if self.is_training:
self.multiple_strategy = config.multiple_strategy
else:
self.multiple_strategy = False
self.current_strategy = 0
self._early_stopping_patience = config._early_stopping_patience
self.degree = np.deg2rad(tf.constant(config.rotation_degree,dtype=tf.float16))
self.height = config.input_height
self.width = config.input_width
self.input_data_path = config.input_data_path
self.groundtruth_data_path = config.groundtruth_data_path
self.strategies = config.strategies
if self.is_training:
filenames = list(open(config.train_filenames_file))
self.batch_size = config.train_batch_size
else:
filenames = list(open(config.test_filenames_file))
self.batch_size = config.test_batch_size
if config.train_only_on_the_first_image:
filenames = [tf.identity(filenames[0]) for i in range(8)]
self.num_elements = len(filenames)
self.loader = tf.data.Dataset.from_tensor_slices(filenames)
if self.is_training:
if not self.debug:
self.loader = self.loader.shuffle(self.num_elements,
reshuffle_each_iteration=True)
self.loader = self.loader.repeat()
self.loader = self.loader.map(self.parse_function)
self.loader = self.loader.map(self.train_preprocess)
self.loader = self.loader.map(
lambda x,y: tf.py_function(
self.lazy_preprocess,
[x,y],
[tf.float32,
tf.float32]))
else:
self.loader = self.loader.map(self.parse_function)
self.loader = self.loader.map(self.test_preprocess)
self.loader = self.loader.batch(self.batch_size).prefetch(2)
@property
def early_stopping_patience(self):
if self.multiple_strategy:
return self._early_stopping_patience[self.current_strategy]
else:
return self._early_stopping_patience[-1]
@property
def num_strategies(self):
return len(self.strategies)
def parse_function(self, line):
paths = tf.strings.split(line)
image = tf.image.decode_png(tf.io.read_file(self.input_data_path+paths[0]))
image = tf.image.convert_image_dtype(image, tf.float32)
depth_gt = tf.image.decode_png(tf.io.read_file(self.groundtruth_data_path+paths[1]),
channels=0,
dtype=tf.uint16)
depth_gt = tf.cast(depth_gt, tf.float32) / 256.0
if self.do_kb_crop:
print('Cropping training images as kitti benchmark images')
height = tf.shape(image)[0]
width = tf.shape(image)[1]
top_margin = tf.cast(height - 352,dtype=tf.int32)
left_margin = tf.cast((width - 1216) / 2,dtype=tf.int32)
depth_gt = depth_gt[top_margin:top_margin + 352, left_margin:left_margin + 1216, :]
image = image[top_margin:top_margin + 352, left_margin:left_margin + 1216, :]
return image, depth_gt
def train_preprocess(self, image,depth_gt):
if self.do_rotate:
print('Rotating training images')
random_angle = tf.random.uniform([], - self.degree, self.degree)
image = tfa.image.rotate(image, random_angle, interpolation='nearest')
depth_gt = tfa.image.rotate(depth_gt, random_angle, interpolation='nearest')
image, depth_gt = self.crop_fixed_size(image, depth_gt)
if self.do_flip:
do_flip = tf.random.uniform([], 0, 1)
image = tf.cond(do_flip > 0.5, lambda: tf.image.flip_left_right(image), lambda: image)
depth_gt = tf.cond(do_flip > 0.5, lambda: tf.image.flip_left_right(depth_gt), lambda: depth_gt)
if self.do_augment:
do_augment = tf.random.uniform([], 0, 1)
image = tf.cond(do_augment > 0.5, lambda: self.augment_image(image), lambda: image)
image.set_shape([self.height, self.width, 3])
depth_gt.set_shape([self.height, self.width, 1])
if self.use_normalized_image:
image *= 255.0
image = self.mean_image_subtraction(image,
[123.68, 116.78, 103.94])
return image, depth_gt
def test_preprocess(self,image,depth_gt):
image.set_shape([None, None, 3])
depth_gt.set_shape([None, None, 1])
if self.use_normalized_image:
image *= 255.0
image = self.mean_image_subtraction(image,
[123.68, 116.78, 103.94])
return image, depth_gt
def lazy_preprocess(self, image, depth_gt):
if self.multiple_strategy:
strategy = self.strategies[self.current_strategy]
else:
strategy = self.strategies[-1]
depth_gt = self.dilation(depth_gt,**strategy)
return image, depth_gt
def dilation(self, depth_gt, pool_size=(2,2), iterations=1):
if iterations > 0:
depth_gt = tf.expand_dims(depth_gt, axis=0)
for _ in range(iterations):
depth_gt = MaxPooling2D(pool_size=pool_size)(depth_gt)
depth_gt = tf.squeeze(depth_gt,axis=0)
depth_gt = tf.image.resize(depth_gt,
(self.height,self.width),
method='nearest')
return depth_gt
def crop_fixed_size(self, image, depth_gt):
image_depth = tf.concat([image, depth_gt], 2)
if not self.debug:
image_depth_cropped = tf.image.random_crop(image_depth, [self.height, self.width, 4])
else:
image_depth_cropped = image_depth[100:100+self.height, 365:365+self.width, :]
image_cropped = image_depth_cropped[:, :, 0:3]
depth_gt_cropped = tf.expand_dims(image_depth_cropped[:, :, 3], 2)
return image_cropped, depth_gt_cropped
def augment_image(self, image):
# gamma augmentation
gamma = tf.random.uniform([], 0.9, 1.1)
image_aug = image ** gamma
brightness = tf.random.uniform([], 0.9, 1.1)
image_aug = image_aug * brightness
# color augmentation
colors = tf.random.uniform([3], 0.9, 1.1)
white = tf.ones([tf.shape(image)[0], tf.shape(image)[1]])
color_image = tf.stack([white * colors[i] for i in range(3)], axis=2)
image_aug *= color_image
return image_aug
@staticmethod
def mean_image_subtraction(image, means):
"""Subtracts the given means from each image channel.
For example:
means = [123.68, 116.779, 103.939]
image = mean_image_subtraction(image, means)
Note that the rank of `image` must be known.
Args:
image: a tensor of size [height, width, C].
means: a C-vector of values to subtract from each channel.
Returns:
the centered image.
Raises:
ValueError: If the rank of `image` is unknown, if `image` has a rank other
than three or if the number of channels in `image` doesn't match the
number of values in `means`.
"""
if image.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
num_channels = image.get_shape().as_list()[-1]
if len(means) != num_channels:
raise ValueError(f'len(means)==3 must match the number of channels == {num_channels}')
channels = tf.split(axis=2, num_or_size_splits=num_channels, value=image)
for i in range(num_channels):
channels[i] -= means[i]
return tf.concat(axis=2, values=channels)
| 8,542 | 39.488152 | 107 | py |
LightDepth | LightDepth-main/tf_implementation/scripts/models/efficient_unet.py | from tensorflow.keras.layers import Layer, InputSpec
import tensorflow as tf
from tensorflow.keras import applications
from tensorflow.keras.layers import Conv2D, Concatenate, LeakyReLU, UpSampling2D
import keras.backend as K
import keras.utils.conv_utils as conv_utils
from tensorflow.keras.models import Model
import numpy as np
def normalize_data_format(value):
if value is None:
value = K.image_data_format()
data_format = value.lower()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('The `data_format` argument must be one of '
'"channels_first", "channels_last". Received: ' +
str(value))
return data_format
class BilinearUpSampling2D(Layer):
def __init__(self, size=(2, 2), data_format=None, **kwargs):
super(BilinearUpSampling2D, self).__init__(**kwargs)
self.data_format = normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 2, 'size')
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
height = self.size[0] * input_shape[2] if input_shape[2] is not None else None
width = self.size[1] * input_shape[3] if input_shape[3] is not None else None
return (input_shape[0],
input_shape[1],
height,
width)
elif self.data_format == 'channels_last':
height = self.size[0] * input_shape[1] if input_shape[1] is not None else None
width = self.size[1] * input_shape[2] if input_shape[2] is not None else None
return (input_shape[0],
height,
width,
input_shape[3])
def call(self, inputs):
input_shape = K.shape(inputs)
if self.data_format == 'channels_first':
height = self.size[0] * input_shape[2] if input_shape[2] is not None else None
width = self.size[1] * input_shape[3] if input_shape[3] is not None else None
elif self.data_format == 'channels_last':
height = self.size[0] * input_shape[1] if input_shape[1] is not None else None
width = self.size[1] * input_shape[2] if input_shape[2] is not None else None
return tf.image.resize(inputs, [height, width], method=tf.image.ResizeMethod.BILINEAR)
def get_config(self):
config = {'size': self.size, 'data_format': self.data_format}
base_config = super(BilinearUpSampling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class EfficientUNet():
def __init__(self,config):
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
config.initial_learning_rate,
decay_steps=config.decay_steps,
decay_rate=config.decay_rate)
self.optimizer = getattr(tf.optimizers,config.optimizer)(learning_rate=lr_schedule)
self.max_depth = tf.constant(config.max_depth)
self.min_depth = config.min_depth
self.model_loss = getattr(self,config.loss_fn)
self.garg_crop = config.garg_crop
self.eigen_crop = config.eigen_crop
self.do_flip_predict = config.do_flip_predict
self.eps = 1e-5
def UpConv2D(tensor, filters, name, concat_with):
up_i = BilinearUpSampling2D((2, 2), name=name+'_upsampling2d')(tensor)
up_i = Concatenate(name=name+'_concat')([up_i, encoder.get_layer(concat_with).output]) # Skip connection
up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name+'_convA')(up_i)
up_i = LeakyReLU(alpha=0.2)(up_i)
up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name+'_convB')(up_i)
up_i = LeakyReLU(alpha=0.2)(up_i)
return up_i
encoder = getattr(applications.efficientnet,config.encoder)(input_shape=(None, None, 3), include_top=False)
encoder_output_shape = encoder.output.shape
decode_filters = int(encoder_output_shape[-1])
decoder = Conv2D(filters=decode_filters,
kernel_size=1, padding='same',
input_shape=encoder_output_shape,
name='conv2')(encoder.output)
decoder = UpConv2D(decoder, int(decode_filters/2),
'up1', concat_with='block4a_dwconv')
decoder = UpConv2D(decoder, int(decode_filters/4),
'up2', concat_with='block3a_dwconv')
decoder = UpConv2D(decoder, int(decode_filters/8),
'up3', concat_with='block2a_dwconv')
decoder = UpConv2D(decoder, int(decode_filters/16),
'up4', concat_with='block1c_activation')
# decoder = UpConv2D(decoder, int(decode_filters/32),
# 'up5', concat_with=encoder.input.name)
outputs = Conv2D(filters=1,
kernel_size=3,
strides=1,
padding='same',
name='conv3',
activation=config.decoder_last_layer_activation_fn)(decoder)
outputs = UpSampling2D()(outputs)
if config.decoder_last_layer_activation_fn == 'sigmoid':
outputs=outputs*self.max_depth + self.eps
else:
outputs = outputs - tf.reduce_min(outputs)
outputs = outputs / tf.reduce_max(outputs)
outputs = (outputs*(self.max_depth-self.min_depth))+self.min_depth
self.model = Model(inputs=encoder.input, outputs=outputs)
@tf.function
def test_step(self,image,depth_gt):
depth_est = self.model(image, training=False)
loss_value = self.model_loss(depth_est, depth_gt)
return loss_value,depth_est
@tf.function
def train_step(self, image, depth_gt):
with tf.GradientTape() as tape:
depth_est = self.model(image, training=True)
loss_value = self.model_loss(depth_est, depth_gt)
grads = tape.gradient(loss_value, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
return loss_value,tf.reduce_max(depth_est),tf.reduce_min(depth_est)
def compute_metrics(self,image,depth_gt):
valid_mask = np.logical_and(depth_gt > self.min_depth,
depth_gt < self.max_depth)
if self.garg_crop or self.eigen_crop:
batches, gt_height, gt_width, channels = depth_gt.shape
eval_mask = np.zeros(valid_mask.shape)
if self.garg_crop:
eval_mask[:,int(0.40810811 * gt_height):int(0.99189189 * gt_height),
int(0.03594771 * gt_width):int(0.96405229 * gt_width),:] = 1
elif self.eigen_crop:
# if self.dataset == 'kitti':
eval_mask[:,int(0.3324324 * gt_height):int(0.91351351 * gt_height),
int(0.0359477 * gt_width):int(0.96405229 * gt_width),:] = 1
# else:
# eval_mask[:,45:471, 41:601,:] = 1
depth_est = self.model(image, training=False)
if self.do_flip_predict:
depth_est_lr = self.model(image[...,::-1,:], training=False)
depth_est_final = (0.5*(depth_est + depth_est_lr))[valid_mask]
else:
depth_est_final = depth_est[valid_mask]
depth_gt = depth_gt[valid_mask]
thresh = np.maximum((depth_gt / depth_est_final), (depth_est_final / depth_gt))
a1 = (thresh < 1.25).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
abs_rel = np.mean(np.abs(depth_gt - depth_est_final) / depth_gt)
sq_rel = np.mean(((depth_gt - depth_est_final) ** 2) / depth_gt)
rmse = (depth_gt - depth_est_final) ** 2
rmse = np.sqrt(np.mean(rmse))
rmse_log = (np.log(depth_gt) - np.log(depth_est_final)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
err = np.log(depth_est_final) - np.log(depth_gt)
silog = np.sqrt(np.mean(err ** 2) - np.mean(err) ** 2) * 100
log_10 = (np.abs(np.log10(depth_gt) - np.log10(depth_est_final))).mean()
return dict(a1=a1, a2=a2, a3=a3,
abs_rel=abs_rel, rmse=rmse, log_10=log_10,
rmse_log=rmse_log, silog=silog, sq_rel=sq_rel)
@tf.function
def bts_loss(self,depth_est,depth_gt):
mask = depth_gt > self.min_depth
depth_gt_masked = tf.boolean_mask(depth_gt, mask)
depth_est_masked = tf.boolean_mask(depth_est, mask)
d = tf.math.log(depth_est_masked) - tf.math.log(depth_gt_masked)
return tf.sqrt(tf.reduce_mean(d ** 2) - 0.85 * (tf.reduce_mean(d) ** 2)) * 10.0
@tf.function
def kitti_loss(self,depth_est,depth_gt):
mask = depth_gt > self.min_depth
depth_gt_masked = tf.boolean_mask(depth_gt, mask)
depth_est_masked = tf.boolean_mask(depth_est, mask)
d = tf.math.log(depth_est_masked) - tf.math.log(depth_gt_masked)
return tf.reduce_mean(d ** 2) - (tf.reduce_mean(d) ** 2)
@tf.function
def densedepth_loss(self,depth_est, depth_gt, theta=0.1, maxDepthVal=1000.0/10.0):
l_depth = K.mean(K.abs(depth_est - depth_gt), axis=-1)
dy_true, dx_true = tf.image.image_gradients(depth_gt)
dy_pred, dx_pred = tf.image.image_gradients(depth_est)
l_edges = K.mean(K.abs(dy_pred - dy_true) + K.abs(dx_pred - dx_true), axis=-1)
l_ssim = K.clip((1 - tf.image.ssim(depth_gt, depth_est, maxDepthVal)) * 0.5, 0, 1)
w1 = 1.0
w2 = 1.0
w3 = theta
return tf.reduce_mean((w1 * l_ssim) + (w2 * K.mean(l_edges)) + (w3 * K.mean(l_depth)))
| 9,917 | 45.345794 | 116 | py |
LightDepth | LightDepth-main/tf_implementation/scripts/models/ordinary_unet.py | from tensorflow.keras.layers import Layer, InputSpec
import tensorflow as tf
from tensorflow.keras import applications
from tensorflow.keras.layers import Conv2D, Concatenate, LeakyReLU, UpSampling2D
import keras.backend as K
import keras.utils.conv_utils as conv_utils
from tensorflow.keras.models import Model
import numpy as np
def normalize_data_format(value):
if value is None:
value = K.image_data_format()
data_format = value.lower()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('The `data_format` argument must be one of '
'"channels_first", "channels_last". Received: ' +
str(value))
return data_format
class BilinearUpSampling2D(Layer):
def __init__(self, size=(2, 2), data_format=None, **kwargs):
super(BilinearUpSampling2D, self).__init__(**kwargs)
self.data_format = normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 2, 'size')
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
height = self.size[0] * input_shape[2] if input_shape[2] is not None else None
width = self.size[1] * input_shape[3] if input_shape[3] is not None else None
return (input_shape[0],
input_shape[1],
height,
width)
elif self.data_format == 'channels_last':
height = self.size[0] * input_shape[1] if input_shape[1] is not None else None
width = self.size[1] * input_shape[2] if input_shape[2] is not None else None
return (input_shape[0],
height,
width,
input_shape[3])
def call(self, inputs):
input_shape = K.shape(inputs)
if self.data_format == 'channels_first':
height = self.size[0] * input_shape[2] if input_shape[2] is not None else None
width = self.size[1] * input_shape[3] if input_shape[3] is not None else None
elif self.data_format == 'channels_last':
height = self.size[0] * input_shape[1] if input_shape[1] is not None else None
width = self.size[1] * input_shape[2] if input_shape[2] is not None else None
return tf.image.resize(inputs, [height, width], method=tf.image.ResizeMethod.BILINEAR)
def get_config(self):
config = {'size': self.size, 'data_format': self.data_format}
base_config = super(BilinearUpSampling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class OrdinaryUNet():
def __init__(self,config):
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
config.initial_learning_rate,
decay_steps=config.decay_steps,
decay_rate=config.decay_rate)
self.optimizer = getattr(tf.optimizers,config.optimizer)(learning_rate=lr_schedule)
self.max_depth = tf.constant(config.max_depth)
self.min_depth = config.min_depth
self.model_loss = getattr(self,config.loss_fn)
self.garg_crop = config.garg_crop
self.eigen_crop = config.eigen_crop
self.do_flip_predict = config.do_flip_predict
self.eps = 1e-5
def UpConv2D(tensor, filters, name, concat_with):
up_i = BilinearUpSampling2D((2, 2), name=name+'_upsampling2d')(tensor)
up_i = Concatenate(name=name+'_concat')([up_i, encoder.get_layer(concat_with).output]) # Skip connection
up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name+'_convA')(up_i)
up_i = LeakyReLU(alpha=0.2)(up_i)
up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name+'_convB')(up_i)
up_i = LeakyReLU(alpha=0.2)(up_i)
return up_i
encoder = getattr(applications,config.encoder)(input_shape=(512, 256, 3), include_top=False)
encoder_output_shape = encoder.output.shape
decode_filters = int(encoder_output_shape[-1])
decoder = Conv2D(filters=decode_filters,
kernel_size=1, padding='same',
input_shape=encoder_output_shape,
name='conv2')(encoder.output)
decoder = UpConv2D(decoder, int(decode_filters/2),
'up1', concat_with='pool3_pool')
decoder = UpConv2D(decoder, int(decode_filters/4),
'up2', concat_with='pool2_pool')
decoder = UpConv2D(decoder, int(decode_filters/8),
'up3', concat_with='pool1')
decoder = UpConv2D(decoder, int(decode_filters/16),
'up4', concat_with='conv1/relu')
# decoder = UpConv2D(decoder, int(decode_filters/32),
# 'up5', concat_with=encoder.input.name)
outputs = Conv2D(filters=1, kernel_size=3, strides=1,
padding='same', name='conv3',activation=config.decoder_last_layer_activation_fn)(decoder)
outputs = UpSampling2D()(outputs)
if config.decoder_last_layer_activation_fn == 'sigmoid':
outputs=outputs*self.max_depth + self.eps
else:
outputs = outputs - tf.reduce_min(outputs)
outputs = outputs / tf.reduce_max(outputs)
outputs = (outputs*(self.max_depth-self.min_depth))+self.min_depth
self.model = Model(inputs=encoder.input, outputs=outputs)
@tf.function
def test_step(self,image,depth_gt):
depth_est = self.model(image, training=False)
loss_value = self.model_loss(depth_est, depth_gt)
return loss_value,depth_est
@tf.function
def train_step(self, image, depth_gt):
with tf.GradientTape() as tape:
depth_est = self.model(image, training=True)
loss_value = self.model_loss(depth_est, depth_gt)
grads = tape.gradient(loss_value, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
return loss_value,tf.reduce_max(depth_est),tf.reduce_min(depth_est)
def compute_metrics(self,image,depth_gt):
valid_mask = np.logical_and(depth_gt > self.min_depth,
depth_gt < self.max_depth)
if self.garg_crop or self.eigen_crop:
batches, gt_height, gt_width, channels = depth_gt.shape
eval_mask = np.zeros(valid_mask.shape)
if self.garg_crop:
eval_mask[:,int(0.40810811 * gt_height):int(0.99189189 * gt_height),
int(0.03594771 * gt_width):int(0.96405229 * gt_width),:] = 1
elif self.eigen_crop:
# if self.dataset == 'kitti':
eval_mask[:,int(0.3324324 * gt_height):int(0.91351351 * gt_height),
int(0.0359477 * gt_width):int(0.96405229 * gt_width),:] = 1
# else:
# eval_mask[:,45:471, 41:601,:] = 1
depth_est = self.model(image, training=False)
if self.do_flip_predict:
depth_est_lr = self.model(image[...,::-1,:], training=False)
depth_est_final = (0.5*(depth_est + depth_est_lr))[valid_mask]
else:
depth_est_final = depth_est[valid_mask]
depth_gt = depth_gt[valid_mask]
thresh = np.maximum((depth_gt / depth_est_final), (depth_est_final / depth_gt))
a1 = (thresh < 1.25).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
abs_rel = np.mean(np.abs(depth_gt - depth_est_final) / depth_gt)
sq_rel = np.mean(((depth_gt - depth_est_final) ** 2) / depth_gt)
rmse = (depth_gt - depth_est_final) ** 2
rmse = np.sqrt(np.mean(rmse))
rmse_log = (np.log(depth_gt) - np.log(depth_est_final)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
err = np.log(depth_est_final) - np.log(depth_gt)
silog = np.sqrt(np.mean(err ** 2) - np.mean(err) ** 2) * 100
log_10 = (np.abs(np.log10(depth_gt) - np.log10(depth_est_final))).mean()
return dict(a1=a1, a2=a2, a3=a3,
abs_rel=abs_rel, rmse=rmse, log_10=log_10,
rmse_log=rmse_log, silog=silog, sq_rel=sq_rel)
@tf.function
def bts_loss(self,depth_est,depth_gt):
mask = depth_gt > self.min_depth
depth_gt_masked = tf.boolean_mask(depth_gt, mask)
depth_est_masked = tf.boolean_mask(depth_est, mask)
d = tf.math.log(depth_est_masked) - tf.math.log(depth_gt_masked)
return tf.sqrt(tf.reduce_mean(d ** 2) - 0.85 * (tf.reduce_mean(d) ** 2)) * 10.0
@tf.function
def kitti_loss(self,depth_est,depth_gt):
mask = depth_gt > self.min_depth
depth_gt_masked = tf.boolean_mask(depth_gt, mask)
depth_est_masked = tf.boolean_mask(depth_est, mask)
d = tf.math.log(depth_est_masked) - tf.math.log(depth_gt_masked)
return tf.reduce_mean(d ** 2) - (tf.reduce_mean(d) ** 2)
@tf.function
def densedepth_loss(self,depth_est, depth_gt, theta=0.1, maxDepthVal=1000.0/10.0):
l_depth = K.mean(K.abs(depth_est - depth_gt), axis=-1)
dy_true, dx_true = tf.image.image_gradients(depth_gt)
dy_pred, dx_pred = tf.image.image_gradients(depth_est)
l_edges = K.mean(K.abs(dy_pred - dy_true) + K.abs(dx_pred - dx_true), axis=-1)
l_ssim = K.clip((1 - tf.image.ssim(depth_gt, depth_est, maxDepthVal)) * 0.5, 0, 1)
w1 = 1.0
w2 = 1.0
w3 = theta
return tf.reduce_mean((w1 * l_ssim) + (w2 * K.mean(l_edges)) + (w3 * K.mean(l_depth)))
| 9,771 | 45.312796 | 116 | py |
OPTMLSTM | OPTMLSTM-main/example_OPTM_LSTM.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Adam Ntakaris (adamantios.ntakaris@ed.ac.uk, @gmail.com)
"""
from keras.layers import Dense
import keras
import numpy as np
import OPTMCell
# Note: Random data example for illustration purposes only
# OPTM-LSTM is a narrow artificial intelligence model
# Input Dim --> (num_of_data_samples, 1, 41)
# 41 = 40 LOB Price and Volume levels + Current mid_price/Guarantor
three_dim_inpt = np.random.rand(600, 1, 41)
# Regression Labels --> [mid_prices,]
lbls = np.random.rand(600,)
batch_size = 1
num_of_hidden_units = 8
input_1 = keras.Input(batch_shape = (batch_size, 1, 41))
layer_1 = keras.layers.RNN(OPTMCell.OPTMLSTMCell(num_of_hidden_units),
return_sequences=True, stateful=False)(input_1)
output_1 = Dense(1)(layer_1)
model = keras.Model(inputs=input_1, outputs=output_1)
model.compile(optimizer='adam', loss='mean_squared_error', metrics=['mse'])
model.fit(three_dim_inpt, lbls, batch_size=1, epochs=5)
| 1,004 | 27.714286 | 75 | py |
OPTMLSTM | OPTMLSTM-main/OPTMCell.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Adam Ntakaris (adamantios.ntakaris@ed.ac.uk, @gmail.com)
Important: This is an extension based on
https://github.com/keras-team/keras/blob/v2.10.0/keras/layers/rnn/lstm.py
"""
import tensorflow.compat.v2 as tf
from keras import activations
from keras import backend
from keras import constraints
from keras import initializers
from keras import regularizers
from keras.layers.recurrent import DropoutRNNCellMixin
from keras.utils import tf_utils
from keras.engine.base_layer import Layer
# isort: off
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
# allow Numpy
from tensorflow.python.ops.numpy_ops import np_config
np_config.enable_numpy_behavior()
RECURRENT_DROPOUT_WARNING_MSG = (
'RNN `implementation=2` is not supported when `recurrent_dropout` is set. '
'Using `implementation=1`.')
@keras_export(v1=['keras.layers.LSTMCell'])
class OPTMLSTMCell(DropoutRNNCellMixin, Layer):
""" Optimum Output Long Short-Term Memory Layer
This is an optimized extension of the current TensorFlow LSTM layer.
This class processes each individual LOB time-step input.
Args (similar to prototype LSTM layer):
units: Positive integer, dimensionality of the output space.
Input Placeholder (updated based on the Revised LSTM layer):
inputs: A 3D tensor, with shape of `[batch=1, timesteps, features +
guarantor]`.
"""
def __init__(
self,
units ,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs
):
if units < 0:
raise ValueError(
f'Received an invalid value for argument `units`, '
f'expected a positive integer, got {units}.'
)
# By default use cached variable under v2 mode, see b/143699808.
if tf.compat.v1.executing_eagerly_outside_functions():
self._enable_caching_device = kwargs.pop(
'enable_caching_device', True
)
else:
self._enable_caching_device = kwargs.pop(
'enable_caching_device', False
)
super(OPTMLSTMCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
implementation = kwargs.pop('implementation', 1)
if self.recurrent_dropout != 0 and implementation != 1:
logging.debug(RECURRENT_DROPOUT_WARNING_MSG)
self.implementation = 1
else:
self.implementation = implementation
self.state_size = [self.units, self.units]
self.output_size = self.units
@tf_utils.shape_type_conversion
def build(self, input_shape):
input_dim = input_shape[-1]-1
self.kernel = self.add_weight(
shape=(input_dim, self.units * 4),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint,
)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return backend.concatenate(
[
self.bias_initializer(
(self.units,), *args, **kwargs
),
initializers.get('ones')(
(self.units,), *args, **kwargs
),
self.bias_initializer(
(self.units * 2,), *args, **kwargs
),
]
)
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(
shape=(self.units * 4,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
else:
self.bias = None
self.built = True
def _compute_carry_and_output(self, x, h_tm1, c_tm1):
"""Computes carry and output using split kernels."""
x_i, x_f, x_c, x_o = x
h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o = h_tm1
i = self.recurrent_activation(
x_i + backend.dot(h_tm1_i, self.recurrent_kernel[:, :self.units]
)
)
f = self.recurrent_activation(
x_f
+ backend.dot(
h_tm1_f, self.recurrent_kernel[:, self.units:self.units * 2]
)
)
c_t = self.activation(
x_c
+ backend.dot(
h_tm1_c,
self.recurrent_kernel[:, self.units * 2:self.units * 3]
)
)
c = f * c_tm1 + i * c_t
o = self.recurrent_activation(
x_o + backend.dot(
h_tm1_o, self.recurrent_kernel[:, self.units * 3:]
)
)
return c, o, i, f, c_t
def _compute_carry_and_output_fused(self, z, c_tm1):
"""Computes carry and output using fused kernels."""
z0, z1, z2, z3 = z
i = self.recurrent_activation(z0)
f = self.recurrent_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3)
return c, o
def call(self, inputs, states, training=None):
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
inputs_1 = inputs[0][0:-1].reshape(1, -1)
dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=4)
rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(
h_tm1, training, count=4)
if self.implementation == 1:
if 0 < self.dropout < 1.:
inputs_i = inputs_1 * dp_mask[0]
inputs_f = inputs_1 * dp_mask[1]
inputs_c = inputs_1 * dp_mask[2]
inputs_o = inputs_1 * dp_mask[3]
else:
inputs_i = inputs_1
inputs_f = inputs_1
inputs_c = inputs_1
inputs_o = inputs_1
k_i, k_f, k_c, k_o = tf.split(
self.kernel, num_or_size_splits=4, axis=1)
x_i = backend.dot(inputs_i, k_i)
x_f = backend.dot(inputs_f, k_f)
x_c = backend.dot(inputs_c, k_c)
x_o = backend.dot(inputs_o, k_o)
if self.use_bias:
b_i, b_f, b_c, b_o = tf.split(
self.bias, num_or_size_splits=4, axis=0)
x_i = backend.bias_add(x_i, b_i)
x_f = backend.bias_add(x_f, b_f)
x_c = backend.bias_add(x_c, b_c)
x_o = backend.bias_add(x_o, b_o)
if 0 < self.recurrent_dropout < 1.:
h_tm1_i = h_tm1 * rec_dp_mask[0]
h_tm1_f = h_tm1 * rec_dp_mask[1]
h_tm1_c = h_tm1 * rec_dp_mask[2]
h_tm1_o = h_tm1 * rec_dp_mask[3]
else:
h_tm1_i = h_tm1
h_tm1_f = h_tm1
h_tm1_c = h_tm1
h_tm1_o = h_tm1
x = (x_i, x_f, x_c, x_o)
h_tm1 = (h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o)
c, o, i, f, c_t = self._compute_carry_and_output(x, h_tm1, c_tm1)
else:
if 0. < self.dropout < 1.:
inputs_1 = inputs_1 * dp_mask[0]
z = backend.dot(inputs_1, self.kernel)
z += backend.dot(h_tm1, self.recurrent_kernel)
if self.use_bias:
z = backend.bias_add(z, self.bias)
z = tf.split(z, num_or_size_splits=4, axis=1)
c, o = self._compute_carry_and_output_fused(z, c_tm1)
h_temp = o * self.activation(c)
# Guarantor
copies = 1 # Curse of Dimensionality Helper
gated_vector = tf.concat([i, f, c_t, c, o, h_temp], axis=1)
gated_vector_copy = tf.tile(gated_vector, (copies, 1))
gated_labels = tf.tile(inputs[0][-1].reshape(1, -1), (copies, 1))
n_epoch = 13
learning_rate = 0.0001
# Gradient Descent
theta_1 = tf.ones([self.units*6, 1])
for epoch in range(n_epoch):
y_pred = tf.matmul(gated_vector_copy, theta_1)
error = y_pred - gated_labels
gradients = 2/copies * tf.matmul(
tf.transpose(gated_vector_copy), tf.cast(error, tf.float32)
)
theta_1 = theta_1 - learning_rate * gradients
importance = theta_1
# Collect Gates and States
i_gate_out = importance[:self.units, :]
f_gate_out = importance[self.units:self.units*2, :]
can_gate_out = importance[self.units*2:self.units*3, :]
c_gate_out = importance[self.units*3:self.units*4, :]
o_gate_out = importance[self.units*4:self.units*5, :]
h_gate_out = importance[self.units*5:self.units*6, :]
# Importance Score
improtance_i = tf.math.reduce_mean(i_gate_out, axis=0)
importance_f = tf.math.reduce_mean(f_gate_out, axis=0)
importance_can = tf.math.reduce_mean(can_gate_out, axis=0)
importance_c = tf.math.reduce_mean(c_gate_out, axis=0)
importance_o = tf.math.reduce_mean(o_gate_out, axis=0)
importance_h = tf.math.reduce_mean(h_gate_out, axis=0)
# Final/Optimized Ouput
merge_output = tf.stack(
[improtance_i,
importance_f,
importance_can,
importance_c,
importance_o,
importance_h],
axis=0
)
result = tf.where(
merge_output == tf.math.reduce_max(merge_output, axis=0)
)
# Best Gate Filter
if result[0][0] == 0:
h = tf.transpose(i_gate_out)
elif result[0][0] == 1:
h = tf.transpose(f_gate_out)
elif result[0][0] == 2:
h = tf.transpose(can_gate_out)
elif result[0][0] == 3:
h = tf.transpose(c_gate_out)
elif result[0][0] == 4:
h = tf.transpose(o_gate_out)
else:
h = tf.transpose(h_gate_out)
return h, [h, c]
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'unit_forget_bias':
self.unit_forget_bias,
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout,
'implementation':
self.implementation
}
base_config = super(RevisedLSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 13,682 | 35.782258 | 79 | py |
InDuDoNet | InDuDoNet-main/test_clinic.py | import os.path
import os
import os.path
import argparse
import numpy as np
import torch
from CLINIC_metal.preprocess_clinic.preprocessing_clinic import clinic_input_data
from network.indudonet import InDuDoNet
import nibabel
import time
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
parser = argparse.ArgumentParser(description="YU_Test")
parser.add_argument("--model_dir", type=str, default="models", help='path to model and log files')
parser.add_argument("--data_path", type=str, default="CLINIC_metal/test/", help='path to training data')
parser.add_argument("--use_GPU", type=bool, default=True, help='use GPU or not')
parser.add_argument("--save_path", type=str, default="results/CLINIC_metal/", help='path to training data')
parser.add_argument('--num_channel', type=int, default=32, help='the number of dual channels')
parser.add_argument('--T', type=int, default=4, help='the number of ResBlocks in every ProxNet')
parser.add_argument('--S', type=int, default=10, help='the number of total iterative stages')
parser.add_argument('--eta1', type=float, default=1, help='initialization for stepsize eta1')
parser.add_argument('--eta2', type=float, default=5, help='initialization for stepsize eta2')
parser.add_argument('--alpha', type=float, default=0.5, help='initialization for weight factor')
opt = parser.parse_args()
def mkdir(path):
folder = os.path.exists(path)
if not folder:
os.makedirs(path)
print("--- new folder... ---")
print("--- " + path + " ---")
else:
print("--- There exsits folder " + path + " ! ---")
Pred_nii = opt.save_path +'/X_mar/'
mkdir(Pred_nii)
def image_get_minmax():
return 0.0, 1.0
def proj_get_minmax():
return 0.0, 4.0
def normalize(data, minmax):
data_min, data_max = minmax
data = np.clip(data, data_min, data_max)
data = (data - data_min) / (data_max - data_min)
data = data * 255.0
data = data.astype(np.float32)
data = np.expand_dims(np.transpose(np.expand_dims(data, 2), (2, 0, 1)),0)
return data
def test_image(allXma, allXLI, allM, allSma, allSLI, allTr, vol_idx, slice_idx):
Xma = allXma[vol_idx][...,slice_idx]
XLI = allXLI[vol_idx][...,slice_idx]
M = allM[vol_idx][...,slice_idx]
Sma = allSma[vol_idx][...,slice_idx]
SLI = allSLI[vol_idx][...,slice_idx]
Tr = allTr[vol_idx][...,slice_idx]
Xma = normalize(Xma, image_get_minmax()) # *255
XLI = normalize(XLI, image_get_minmax())
Sma = normalize(Sma, proj_get_minmax())
SLI = normalize(SLI, proj_get_minmax())
Tr = 1-Tr.astype(np.float32)
Tr = np.expand_dims(np.transpose(np.expand_dims(Tr, 2), (2, 0, 1)),0) # 1*1*h*w
Mask = M.astype(np.float32)
Mask = np.expand_dims(np.transpose(np.expand_dims(Mask, 2), (2, 0, 1)),0)
return torch.Tensor(Xma).cuda(), torch.Tensor(XLI).cuda(), torch.Tensor(Mask).cuda(), \
torch.Tensor(Sma).cuda(), torch.Tensor(SLI).cuda(), torch.Tensor(Tr).cuda()
def main():
# Build model
print('Loading model ...\n')
net = InDuDoNet(opt).cuda()
net.load_state_dict(torch.load(os.path.join(opt.model_dir)))
net.eval()
print('--------------load---------------all----------------nii-------------')
allXma, allXLI, allM, allSma, allSLI, allTr, allaffine, allfilename = clinic_input_data(opt.data_path)
print('--------------test---------------all----------------nii-------------')
for vol_idx in range(len(allXma)):
print('test %d th volume.......' % vol_idx)
num_s = allXma[vol_idx].shape[2]
pre_Xout = np.zeros_like(allXma[vol_idx])
pre_name = allfilename[vol_idx]
for slice_idx in range(num_s):
Xma, XLI, M, Sma, SLI, Tr = test_image(allXma, allXLI, allM, allSma, allSLI, allTr, vol_idx, slice_idx)
with torch.no_grad():
if opt.use_GPU:
torch.cuda.synchronize()
start_time = time.time()
ListX, ListS, ListYS= net(Xma, XLI, M, Sma, SLI, Tr)
Xout= ListX[-1] / 255.0
pre_Xout[..., slice_idx] = Xout.data.cpu().numpy().squeeze()
nibabel.save(nibabel.Nifti1Image(pre_Xout, allaffine[vol_idx]), Pred_nii + pre_name)
if __name__ == "__main__":
main()
| 4,225 | 43.484211 | 116 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.