repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
OpenFWI
|
OpenFWI-main/test.py
|
# © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import os
import sys
import time
import datetime
import json
import torch
import torch.nn as nn
from torch.utils.data import SequentialSampler
from torch.utils.data.dataloader import default_collate
import torchvision
from torchvision.transforms import Compose
import numpy as np
import utils
import network
from vis import *
from dataset import FWIDataset
import transforms as T
import pytorch_ssim
def evaluate(model, criterions, dataloader, device, k, ctx,
vis_path, vis_batch, vis_sample, missing, std):
model.eval()
label_list, label_pred_list= [], [] # store denormalized predcition & gt in numpy
label_tensor, label_pred_tensor = [], [] # store normalized prediction & gt in tensor
if missing or std:
data_list, data_noise_list = [], [] # store original data and noisy/muted data
with torch.no_grad():
batch_idx = 0
for data, label in dataloader:
data = data.type(torch.FloatTensor).to(device, non_blocking=True)
label = label.type(torch.FloatTensor).to(device, non_blocking=True)
label_np = T.tonumpy_denormalize(label, ctx['label_min'], ctx['label_max'], exp=False)
label_list.append(label_np)
label_tensor.append(label)
if missing or std:
# Add gaussian noise
data_noise = torch.clip(data + (std ** 0.5) * torch.randn(data.shape).to(device, non_blocking=True), min=-1, max=1)
# Mute some traces
mute_idx = np.random.choice(data.shape[3], size=missing, replace=False)
data_noise[:, :, :, mute_idx] = data[0, 0, 0, 0]
data_np = T.tonumpy_denormalize(data, ctx['data_min'], ctx['data_max'], k=k)
data_noise_np = T.tonumpy_denormalize(data_noise, ctx['data_min'], ctx['data_max'], k=k)
data_list.append(data_np)
data_noise_list.append(data_noise_np)
pred = model(data_noise)
else:
pred = model(data)
label_pred_np = T.tonumpy_denormalize(pred, ctx['label_min'], ctx['label_max'], exp=False)
label_pred_list.append(label_pred_np)
label_pred_tensor.append(pred)
# Visualization
if vis_path and batch_idx < vis_batch:
for i in range(vis_sample):
plot_velocity(label_pred_np[i, 0], label_np[i, 0], f'{vis_path}/V_{batch_idx}_{i}.png') #, vmin=ctx['label_min'], vmax=ctx['label_max'])
if missing or std:
for ch in [2]: # range(data.shape[1]):
plot_seismic(data_np[i, ch], data_noise_np[i, ch], f'{vis_path}/S_{batch_idx}_{i}_{ch}.png',
vmin=ctx['data_min'] * 0.01, vmax=ctx['data_max'] * 0.01)
batch_idx += 1
label, label_pred = np.concatenate(label_list), np.concatenate(label_pred_list)
label_t, pred_t = torch.cat(label_tensor), torch.cat(label_pred_tensor)
l1 = nn.L1Loss()
l2 = nn.MSELoss()
print(f'MAE: {l1(label_t, pred_t)}')
print(f'MSE: {l2(label_t, pred_t)}')
ssim_loss = pytorch_ssim.SSIM(window_size=11)
print(f'SSIM: {ssim_loss(label_t / 2 + 0.5, pred_t / 2 + 0.5)}') # (-1, 1) to (0, 1)
for name, criterion in criterions.items():
print(f' * Velocity {name}: {criterion(label, label_pred)}')
# print(f' | Velocity 2 layers {name}: {criterion(label[:1000], label_pred[:1000])}')
# print(f' | Velocity 3 layers {name}: {criterion(label[1000:2000], label_pred[1000:2000])}')
# print(f' | Velocity 4 layers {name}: {criterion(label[2000:], label_pred[2000:])}')
def main(args):
print(args)
print("torch version: ", torch.__version__)
print("torchvision version: ", torchvision.__version__)
utils.mkdir(args.output_path)
device = torch.device(args.device)
torch.backends.cudnn.benchmark = True
with open('dataset_config.json') as f:
try:
ctx = json.load(f)[args.dataset]
except KeyError:
print('Unsupported dataset.')
sys.exit()
if args.file_size is not None:
ctx['file_size'] = args.file_size
print("Loading data")
print("Loading validation data")
log_data_min = T.log_transform(ctx['data_min'], k=args.k)
log_data_max = T.log_transform(ctx['data_max'], k=args.k)
transform_valid_data = Compose([
T.LogTransform(k=args.k),
T.MinMaxNormalize(log_data_min, log_data_max),
])
transform_valid_label = Compose([
T.MinMaxNormalize(ctx['label_min'], ctx['label_max'])
])
if args.val_anno[-3:] == 'txt':
dataset_valid = FWIDataset(
args.val_anno,
sample_ratio=args.sample_temporal,
file_size=ctx['file_size'],
transform_data=transform_valid_data,
transform_label=transform_valid_label
)
else:
dataset_valid = torch.load(args.val_anno)
print("Creating data loaders")
valid_sampler = SequentialSampler(dataset_valid)
dataloader_valid = torch.utils.data.DataLoader(
dataset_valid, batch_size=args.batch_size,
sampler=valid_sampler, num_workers=args.workers,
pin_memory=True, collate_fn=default_collate)
print("Creating model")
if args.model not in network.model_dict:
print('Unsupported model.')
sys.exit()
model = network.model_dict[args.model](upsample_mode=args.up_mode,
sample_spatial=args.sample_spatial, sample_temporal=args.sample_temporal, norm=args.norm).to(device)
criterions = {
'MAE': lambda x, y: np.mean(np.abs(x - y)),
'MSE': lambda x, y: np.mean((x - y) ** 2)
}
if args.resume:
print(args.resume)
checkpoint = torch.load(args.resume, map_location='cpu')
model.load_state_dict(network.replace_legacy(checkpoint['model']))
print('Loaded model checkpoint at Epoch {} / Step {}.'.format(checkpoint['epoch'], checkpoint['step']))
if args.vis:
# Create folder to store visualization results
vis_folder = f'visualization_{args.vis_suffix}' if args.vis_suffix else 'visualization'
vis_path = os.path.join(args.output_path, vis_folder)
utils.mkdir(vis_path)
else:
vis_path = None
print("Start testing")
start_time = time.time()
evaluate(model, criterions, dataloader_valid, device, args.k, ctx,
vis_path, args.vis_batch, args.vis_sample, args.missing, args.std)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Testing time {}'.format(total_time_str))
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='FCN Testing')
parser.add_argument('-d', '--device', default='cuda', help='device')
parser.add_argument('-ds', '--dataset', default='flatfault-b', type=str, help='dataset name')
parser.add_argument('-fs', '--file-size', default=None, type=int, help='number of samples in each npy file')
# Path related
parser.add_argument('-ap', '--anno-path', default='split_files', help='annotation files location')
parser.add_argument('-v', '--val-anno', default='flatfault_b_val_invnet.txt', help='name of val anno')
parser.add_argument('-o', '--output-path', default='Invnet_models', help='path to parent folder to save checkpoints')
parser.add_argument('-n', '--save-name', default='fcn_l1loss_ffb', help='folder name for this experiment')
parser.add_argument('-s', '--suffix', type=str, default=None, help='subfolder name for this run')
# Model related
parser.add_argument('-m', '--model', type=str, help='inverse model name')
parser.add_argument('-no', '--norm', default='bn', help='normalization layer type, support bn, in, ln (default: bn)')
parser.add_argument('-um', '--up-mode', default=None, help='upsampling layer mode such as "nearest", "bicubic", etc.')
parser.add_argument('-ss', '--sample-spatial', type=float, default=1.0, help='spatial sampling ratio')
parser.add_argument('-st', '--sample-temporal', type=int, default=1, help='temporal sampling ratio')
# Test related
parser.add_argument('-b', '--batch-size', default=50, type=int)
parser.add_argument('-j', '--workers', default=16, type=int, help='number of data loading workers (default: 16)')
parser.add_argument('--k', default=1, type=float, help='k in log transformation')
parser.add_argument('-r', '--resume', default=None, help='resume from checkpoint')
parser.add_argument('--vis', help='visualization option', action="store_true")
parser.add_argument('-vsu','--vis-suffix', default=None, type=str, help='visualization suffix')
parser.add_argument('-vb','--vis-batch', help='number of batch to be visualized', default=0, type=int)
parser.add_argument('-vsa', '--vis-sample', help='number of samples in a batch to be visualized', default=0, type=int)
parser.add_argument('--missing', default=0, type=int, help='number of missing traces')
parser.add_argument('--std', default=0, type=float, help='standard deviation of gaussian noise')
args = parser.parse_args()
args.output_path = os.path.join(args.output_path, args.save_name, args.suffix or '')
args.val_anno = os.path.join(args.anno_path, args.val_anno)
args.resume = os.path.join(args.output_path, args.resume)
return args
if __name__ == '__main__':
args = parse_args()
main(args)
| 10,383
| 42.814346
| 156
|
py
|
OpenFWI
|
OpenFWI-main/gan_train.py
|
# © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import os
import sys
import time
import datetime
import json
import torch
from torch import nn
from torch.utils.data import RandomSampler, DataLoader
from torch.utils.data.dataloader import default_collate
from torch.utils.data.distributed import DistributedSampler
from torch.utils.tensorboard import SummaryWriter
import torchvision
from torchvision.transforms import Compose
import utils
import network
from dataset import FWIDataset
from scheduler import WarmupMultiStepLR
import transforms as T
# Need to use parallel in apex, torch ddp can cause bugs when computing gradient penalty
import apex.parallel as parallel
step = 0
def train_one_epoch(model, model_d, criterion_g, criterion_d, optimizer_g, optimizer_d,
lr_schedulers, dataloader, device, epoch, print_freq, writer, n_critic=5):
global step
model.train()
model_d.train()
# Logger setup
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr_g', utils.SmoothedValue(window_size=1, fmt='{value}'))
metric_logger.add_meter('lr_d', utils.SmoothedValue(window_size=1, fmt='{value}'))
metric_logger.add_meter('samples/s', utils.SmoothedValue(window_size=10, fmt='{value:.3f}'))
header = 'Epoch: [{}]'.format(epoch)
itr = 0 # step in this epoch
max_itr = len(dataloader)
for data, label in metric_logger.log_every(dataloader, print_freq, header):
start_time = time.time()
data, label = data.to(device), label.to(device)
# Update discribminator first
optimizer_d.zero_grad()
with torch.no_grad():
pred = model(data)
loss_d, loss_diff, loss_gp = criterion_d(label, pred, model_d)
loss_d.backward()
optimizer_d.step()
metric_logger.update(loss_diff=loss_diff, loss_gp=loss_gp)
# Update generator occasionally
if ((itr + 1) % n_critic == 0) or (itr == max_itr - 1):
optimizer_g.zero_grad()
pred = model(data)
loss_g, loss_g1v, loss_g2v = criterion_g(pred, label, model_d)
loss_g.backward()
optimizer_g.step()
metric_logger.update(loss_g1v=loss_g1v, loss_g2v=loss_g2v)
batch_size = data.shape[0]
metric_logger.update(lr_g=optimizer_g.param_groups[0]['lr'],
lr_d=optimizer_d.param_groups[0]['lr'])
metric_logger.meters['samples/s'].update(batch_size / (time.time() - start_time))
if writer:
writer.add_scalar('loss_diff', loss_diff, step)
writer.add_scalar('loss_gp', loss_gp, step)
if ((itr + 1) % n_critic == 0) or (itr == max_itr - 1):
writer.add_scalar('loss_g1v', loss_g1v, step)
writer.add_scalar('loss_g2v', loss_g2v, step)
step += 1
itr += 1
for lr_scheduler in lr_schedulers:
lr_scheduler.step()
def evaluate(model, criterion, dataloader, device, writer):
model.eval()
metric_logger = utils.MetricLogger(delimiter=' ')
header = 'Test:'
with torch.no_grad():
for data, label in metric_logger.log_every(dataloader, 20, header):
data = data.to(device, non_blocking=True)
label = label.to(device, non_blocking=True)
pred = model(data)
loss, loss_g1v, loss_g2v = criterion(pred, label)
metric_logger.update(loss=loss.item(),
loss_g1v=loss_g1v.item(), loss_g2v=loss_g2v.item())
# Gather the stats from all processes
metric_logger.synchronize_between_processes()
print(' * Loss {loss.global_avg:.8f}\n'.format(loss=metric_logger.loss))
if writer:
writer.add_scalar('loss', metric_logger.loss.global_avg, step)
writer.add_scalar('loss_g1v', metric_logger.loss_g1v.global_avg, step)
writer.add_scalar('loss_g2v', metric_logger.loss_g2v.global_avg, step)
return metric_logger.loss.global_avg
def main(args):
global step
print(args)
print('torch version: ', torch.__version__)
print('torchvision version: ', torchvision.__version__)
utils.mkdir(args.output_path) # create folder to store checkpoints
utils.init_distributed_mode(args) # distributed mode initialization
# Set up tensorboard summary writer
train_writer, val_writer = None, None
if args.tensorboard:
utils.mkdir(args.log_path) # create folder to store tensorboard logs
if not args.distributed or (args.rank == 0) and (args.local_rank == 0):
train_writer = SummaryWriter(os.path.join(args.output_path, 'logs', 'train'))
val_writer = SummaryWriter(os.path.join(args.output_path, 'logs', 'val'))
device = torch.device(args.device)
torch.backends.cudnn.benchmark = True
with open('dataset_config.json') as f:
try:
ctx = json.load(f)[args.dataset]
except KeyError:
print('Unsupported dataset.')
sys.exit()
if args.file_size is not None:
ctx['file_size'] = args.file_size
# Create dataset and dataloader
print('Loading data')
print('Loading training data')
log_data_min = T.log_transform(ctx['data_min'], k=args.k)
log_data_max = T.log_transform(ctx['data_max'], k=args.k)
transform_data = Compose([
T.LogTransform(k=args.k),
T.MinMaxNormalize(log_data_min, log_data_max)
])
transform_label = Compose([
T.MinMaxNormalize(ctx['label_min'], ctx['label_max'])
])
if args.train_anno[-3:] == 'txt':
dataset_train = FWIDataset(
args.train_anno,
preload=True,
sample_ratio=args.sample_temporal,
file_size=ctx['file_size'],
transform_data=transform_data,
transform_label=transform_label
)
else:
dataset_train = torch.load(args.train_anno)
print('Loading validation data')
if args.val_anno[-3:] == 'txt':
dataset_valid = FWIDataset(
args.val_anno,
preload=True,
sample_ratio=args.sample_temporal,
file_size=ctx['file_size'],
transform_data=transform_data,
transform_label=transform_label
)
else:
dataset_valid = torch.load(args.val_anno)
print('Creating data loaders')
if args.distributed:
train_sampler = DistributedSampler(dataset_train, shuffle=True)
valid_sampler = DistributedSampler(dataset_valid, shuffle=True)
else:
train_sampler = RandomSampler(dataset_train)
valid_sampler = RandomSampler(dataset_valid)
dataloader_train = DataLoader(
dataset_train, batch_size=args.batch_size,
sampler=train_sampler, num_workers=args.workers,
pin_memory=True, drop_last=True, collate_fn=default_collate)
dataloader_valid = DataLoader(
dataset_valid, batch_size=args.batch_size,
sampler=valid_sampler, num_workers=args.workers,
pin_memory=True, collate_fn=default_collate)
print('Creating model')
if args.model not in network.model_dict or args.model_d not in network.model_dict:
print('Unsupported model.')
sys.exit()
model = network.model_dict[args.model](upsample_mode=args.up_mode,
sample_spatial=args.sample_spatial, sample_temporal=args.sample_temporal).to(device)
model_d = network.model_dict[args.model_d]().to(device)
if args.distributed and args.sync_bn:
model = parallel.convert_syncbn_model(model)
model_d = parallel.convert_syncbn_model(model_d)
# Define loss function
l1loss = nn.L1Loss()
l2loss = nn.MSELoss()
def criterion_g(pred, gt, model_d=None):
loss_g1v = l1loss(pred, gt)
loss_g2v = l2loss(pred, gt)
loss = args.lambda_g1v * loss_g1v + args.lambda_g2v * loss_g2v
if model_d is not None:
loss_adv = -torch.mean(model_d(pred))
loss += args.lambda_adv * loss_adv
return loss, loss_g1v, loss_g2v
criterion_d = utils.Wasserstein_GP(device, args.lambda_gp)
# Scale lr according to effective batch size
lr_g = args.lr_g * args.world_size
lr_d = args.lr_d * args.world_size
optimizer_g = torch.optim.AdamW(model.parameters(), lr=lr_g, betas=(0, 0.9), weight_decay=args.weight_decay)
optimizer_d = torch.optim.AdamW(model_d.parameters(), lr=lr_d, betas=(0, 0.9), weight_decay=args.weight_decay)
# Convert scheduler to be per iteration instead of per epoch
warmup_iters = args.lr_warmup_epochs * len(dataloader_train)
lr_milestones = [len(dataloader_train) * m for m in args.lr_milestones]
lr_schedulers = [WarmupMultiStepLR(
optimizer, milestones=lr_milestones, gamma=args.lr_gamma,
warmup_iters=warmup_iters, warmup_factor=1e-5) for optimizer in [optimizer_g, optimizer_d]]
model_without_ddp = model
model_d_without_ddp = model_d
if args.distributed:
model = parallel.DistributedDataParallel(model)
model_d = parallel.DistributedDataParallel(model_d)
model_without_ddp = model.module
model_d_without_ddp = model_d.module
if args.resume:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(network.replace_legacy(checkpoint['model']))
model_d_without_ddp.load_state_dict(network.replace_legacy(checkpoint['model_d']))
optimizer_g.load_state_dict(checkpoint['optimizer_g'])
optimizer_d.load_state_dict(checkpoint['optimizer_d'])
args.start_epoch = checkpoint['epoch'] + 1
step = checkpoint['step']
for i in range(len(lr_schedulers)):
lr_schedulers[i].load_state_dict(checkpoint['lr_schedulers'][i])
for lr_scheduler in lr_schedulers:
lr_scheduler.milestones = lr_milestones
print('Start training')
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train_one_epoch(model, model_d, criterion_g, criterion_d, optimizer_g, optimizer_d,
lr_schedulers, dataloader_train, device, epoch,
args.print_freq, train_writer, args.n_critic)
evaluate(model, criterion_g, dataloader_valid, device, val_writer)
checkpoint = {
'model': model_without_ddp.state_dict(),
'model_d': model_d_without_ddp.state_dict(),
'optimizer_g': optimizer_g.state_dict(),
'optimizer_d': optimizer_d.state_dict(),
'lr_schedulers': [scheduler.state_dict() for scheduler in lr_schedulers],
'epoch': epoch,
'step': step,
'args': args}
# Save checkpoint per epoch
utils.save_on_master(
checkpoint,
os.path.join(args.output_path, 'checkpoint.pth'))
# Save checkpoint every epoch block
if args.output_path and (epoch + 1) % args.epoch_block == 0:
utils.save_on_master(
checkpoint,
os.path.join(args.output_path, 'model_{}.pth'.format(epoch + 1)))
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='GAN Training')
parser.add_argument('-d', '--device', default='cuda', help='device')
parser.add_argument('-ds', '--dataset', default='flat', type=str, help='dataset name')
parser.add_argument('-fs', '--file-size', default=None, type=str, help='number of samples in each npy file')
# Path related
parser.add_argument('-ap', '--anno-path', default='/vast/home/aicyd/Desktop/OpenFWI/src/', help='annotation files location')
parser.add_argument('-t', '--train-anno', default='train_flatvel.json', help='name of train anno')
parser.add_argument('-v', '--val-anno', default='val_flatvel.json', help='name of val anno')
parser.add_argument('-o', '--output-path', default='models', help='path to parent folder to save checkpoints')
parser.add_argument('-l', '--log-path', default='models', help='path to parent folder to save logs')
parser.add_argument('-n', '--save-name', default='gan', help='folder name for this experiment')
parser.add_argument('-s', '--suffix', type=str, default=None, help='subfolder name for this run')
# Model related
parser.add_argument('-m', '--model', type=str, help='generator name')
parser.add_argument('-md', '--model-d', default='Discriminator', help='discriminator name')
parser.add_argument('-um', '--up-mode', default=None, help='upsampling layer mode such as "nearest", "bicubic", etc.')
parser.add_argument('-ss', '--sample-spatial', type=float, default=1.0, help='spatial sampling ratio')
parser.add_argument('-st', '--sample-temporal', type=int, default=1, help='temporal sampling ratio')
# Training related
parser.add_argument('-nc', '--n_critic', default=5, type=int, help='generator & discriminator update ratio')
parser.add_argument('-b', '--batch-size', default=64, type=int)
parser.add_argument('--lr_g', default=0.0001, type=float, help='initial learning rate of generator')
parser.add_argument('--lr_d', default=0.0001, type=float, help='initial learning rate of discriminator')
parser.add_argument('-lm', '--lr-milestones', nargs='+', default=[], type=int, help='decrease lr on milestones')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--weight-decay', default=1e-4 , type=float, help='weight decay (default: 1e-4)')
parser.add_argument('--lr-gamma', default=0.1, type=float, help='decrease lr by a factor of lr-gamma')
parser.add_argument('--lr-warmup-epochs', default=0, type=int, help='number of warmup epochs')
parser.add_argument('-eb', '--epoch_block', type=int, default=20, help='epochs in a saved block')
parser.add_argument('-nb', '--num_block', type=int, default=25, help='number of saved block')
parser.add_argument('-j', '--workers', default=16, type=int, help='number of data loading workers (default: 16)')
parser.add_argument('--k', default=1, type=float, help='k in log transformation')
parser.add_argument('--print-freq', default=20, type=int, help='print frequency')
parser.add_argument('-r', '--resume', default=None, help='resume from checkpoint')
parser.add_argument('--start-epoch', default=0, type=int, help='start epoch')
# Loss related
parser.add_argument('-g1v', '--lambda_g1v', type=float, default=100.0)
parser.add_argument('-g2v', '--lambda_g2v', type=float, default=100.0)
parser.add_argument('-adv', '--lambda_adv', type=float, default=1.0)
parser.add_argument('-gp', '--lambda_gp', type=float, default=10.0)
# Distributed training related
parser.add_argument('--sync-bn', action='store_true', help='Use sync batch norm')
parser.add_argument('--world-size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')
# Tensorboard related
parser.add_argument('--tensorboard', action='store_true', help='Use tensorboard for logging.')
args = parser.parse_args()
args.output_path = os.path.join(args.output_path, args.save_name, args.suffix or '')
args.log_path = os.path.join(args.log_path, args.save_name, args.suffix or '')
args.train_anno = os.path.join(args.anno_path, args.train_anno)
args.val_anno = os.path.join(args.anno_path, args.val_anno)
args.epochs = args.epoch_block * args.num_block
if args.resume:
args.resume = os.path.join(args.output_path, args.resume)
return args
if __name__ == '__main__':
args = parse_args()
main(args)
| 16,662
| 43.553476
| 128
|
py
|
OpenFWI
|
OpenFWI-main/network.py
|
# © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import ceil
from collections import OrderedDict
NORM_LAYERS = { 'bn': nn.BatchNorm2d, 'in': nn.InstanceNorm2d, 'ln': nn.LayerNorm }
# Replace the key names in the checkpoint in which legacy network building blocks are used
def replace_legacy(old_dict):
li = []
for k, v in old_dict.items():
k = (k.replace('Conv2DwithBN', 'layers')
.replace('Conv2DwithBN_Tanh', 'layers')
.replace('Deconv2DwithBN', 'layers')
.replace('ResizeConv2DwithBN', 'layers'))
li.append((k, v))
return OrderedDict(li)
class Conv2DwithBN(nn.Module):
def __init__(self, in_fea, out_fea,
kernel_size=3, stride=1, padding=1,
bn=True, relu_slop=0.2, dropout=None):
super(Conv2DwithBN,self).__init__()
layers = [nn.Conv2d(in_channels=in_fea, out_channels=out_fea, kernel_size=kernel_size, stride=stride, padding=padding)]
if bn:
layers.append(nn.BatchNorm2d(num_features=out_fea))
layers.append(nn.LeakyReLU(relu_slop, inplace=True))
if dropout:
layers.append(nn.Dropout2d(0.8))
self.Conv2DwithBN = nn.Sequential(*layers)
def forward(self, x):
return self.Conv2DwithBN(x)
class ResizeConv2DwithBN(nn.Module):
def __init__(self, in_fea, out_fea, scale_factor=2, mode='nearest'):
super(ResizeConv2DwithBN, self).__init__()
layers = [nn.Upsample(scale_factor=scale_factor, mode=mode)]
layers.append(nn.Conv2d(in_channels=in_fea, out_channels=out_fea, kernel_size=3, stride=1, padding=1))
layers.append(nn.BatchNorm2d(num_features=out_fea))
layers.append(nn.LeakyReLU(0.2, inplace=True))
self.ResizeConv2DwithBN = nn.Sequential(*layers)
def forward(self, x):
return self.ResizeConv2DwithBN(x)
class Conv2DwithBN_Tanh(nn.Module):
def __init__(self, in_fea, out_fea, kernel_size=3, stride=1, padding=1):
super(Conv2DwithBN_Tanh, self).__init__()
layers = [nn.Conv2d(in_channels=in_fea, out_channels=out_fea, kernel_size=kernel_size, stride=stride, padding=padding)]
layers.append(nn.BatchNorm2d(num_features=out_fea))
layers.append(nn.Tanh())
self.Conv2DwithBN = nn.Sequential(*layers)
def forward(self, x):
return self.Conv2DwithBN(x)
class ConvBlock(nn.Module):
def __init__(self, in_fea, out_fea, kernel_size=3, stride=1, padding=1, norm='bn', relu_slop=0.2, dropout=None):
super(ConvBlock,self).__init__()
layers = [nn.Conv2d(in_channels=in_fea, out_channels=out_fea, kernel_size=kernel_size, stride=stride, padding=padding)]
if norm in NORM_LAYERS:
layers.append(NORM_LAYERS[norm](out_fea))
layers.append(nn.LeakyReLU(relu_slop, inplace=True))
if dropout:
layers.append(nn.Dropout2d(0.8))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class ConvBlock_Tanh(nn.Module):
def __init__(self, in_fea, out_fea, kernel_size=3, stride=1, padding=1, norm='bn'):
super(ConvBlock_Tanh, self).__init__()
layers = [nn.Conv2d(in_channels=in_fea, out_channels=out_fea, kernel_size=kernel_size, stride=stride, padding=padding)]
if norm in NORM_LAYERS:
layers.append(NORM_LAYERS[norm](out_fea))
layers.append(nn.Tanh())
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class DeconvBlock(nn.Module):
def __init__(self, in_fea, out_fea, kernel_size=2, stride=2, padding=0, output_padding=0, norm='bn'):
super(DeconvBlock, self).__init__()
layers = [nn.ConvTranspose2d(in_channels=in_fea, out_channels=out_fea, kernel_size=kernel_size, stride=stride, padding=padding, output_padding=output_padding)]
if norm in NORM_LAYERS:
layers.append(NORM_LAYERS[norm](out_fea))
layers.append(nn.LeakyReLU(0.2, inplace=True))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class ResizeBlock(nn.Module):
def __init__(self, in_fea, out_fea, scale_factor=2, mode='nearest', norm='bn'):
super(ResizeBlock, self).__init__()
layers = [nn.Upsample(scale_factor=scale_factor, mode=mode)]
layers.append(nn.Conv2d(in_channels=in_fea, out_channels=out_fea, kernel_size=3, stride=1, padding=1))
if norm in NORM_LAYERS:
layers.append(NORM_LAYERS[norm](out_fea))
layers.append(nn.LeakyReLU(0.2, inplace=True))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
# FlatFault/CurveFault
# 1000, 70 -> 70, 70
class InversionNet(nn.Module):
def __init__(self, dim1=32, dim2=64, dim3=128, dim4=256, dim5=512, sample_spatial=1.0, **kwargs):
super(InversionNet, self).__init__()
self.convblock1 = ConvBlock(5, dim1, kernel_size=(7, 1), stride=(2, 1), padding=(3, 0))
self.convblock2_1 = ConvBlock(dim1, dim2, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
self.convblock2_2 = ConvBlock(dim2, dim2, kernel_size=(3, 1), padding=(1, 0))
self.convblock3_1 = ConvBlock(dim2, dim2, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
self.convblock3_2 = ConvBlock(dim2, dim2, kernel_size=(3, 1), padding=(1, 0))
self.convblock4_1 = ConvBlock(dim2, dim3, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
self.convblock4_2 = ConvBlock(dim3, dim3, kernel_size=(3, 1), padding=(1, 0))
self.convblock5_1 = ConvBlock(dim3, dim3, stride=2)
self.convblock5_2 = ConvBlock(dim3, dim3)
self.convblock6_1 = ConvBlock(dim3, dim4, stride=2)
self.convblock6_2 = ConvBlock(dim4, dim4)
self.convblock7_1 = ConvBlock(dim4, dim4, stride=2)
self.convblock7_2 = ConvBlock(dim4, dim4)
self.convblock8 = ConvBlock(dim4, dim5, kernel_size=(8, ceil(70 * sample_spatial / 8)), padding=0)
self.deconv1_1 = DeconvBlock(dim5, dim5, kernel_size=5)
self.deconv1_2 = ConvBlock(dim5, dim5)
self.deconv2_1 = DeconvBlock(dim5, dim4, kernel_size=4, stride=2, padding=1)
self.deconv2_2 = ConvBlock(dim4, dim4)
self.deconv3_1 = DeconvBlock(dim4, dim3, kernel_size=4, stride=2, padding=1)
self.deconv3_2 = ConvBlock(dim3, dim3)
self.deconv4_1 = DeconvBlock(dim3, dim2, kernel_size=4, stride=2, padding=1)
self.deconv4_2 = ConvBlock(dim2, dim2)
self.deconv5_1 = DeconvBlock(dim2, dim1, kernel_size=4, stride=2, padding=1)
self.deconv5_2 = ConvBlock(dim1, dim1)
self.deconv6 = ConvBlock_Tanh(dim1, 1)
def forward(self,x):
# Encoder Part
x = self.convblock1(x) # (None, 32, 500, 70)
x = self.convblock2_1(x) # (None, 64, 250, 70)
x = self.convblock2_2(x) # (None, 64, 250, 70)
x = self.convblock3_1(x) # (None, 64, 125, 70)
x = self.convblock3_2(x) # (None, 64, 125, 70)
x = self.convblock4_1(x) # (None, 128, 63, 70)
x = self.convblock4_2(x) # (None, 128, 63, 70)
x = self.convblock5_1(x) # (None, 128, 32, 35)
x = self.convblock5_2(x) # (None, 128, 32, 35)
x = self.convblock6_1(x) # (None, 256, 16, 18)
x = self.convblock6_2(x) # (None, 256, 16, 18)
x = self.convblock7_1(x) # (None, 256, 8, 9)
x = self.convblock7_2(x) # (None, 256, 8, 9)
x = self.convblock8(x) # (None, 512, 1, 1)
# Decoder Part
x = self.deconv1_1(x) # (None, 512, 5, 5)
x = self.deconv1_2(x) # (None, 512, 5, 5)
x = self.deconv2_1(x) # (None, 256, 10, 10)
x = self.deconv2_2(x) # (None, 256, 10, 10)
x = self.deconv3_1(x) # (None, 128, 20, 20)
x = self.deconv3_2(x) # (None, 128, 20, 20)
x = self.deconv4_1(x) # (None, 64, 40, 40)
x = self.deconv4_2(x) # (None, 64, 40, 40)
x = self.deconv5_1(x) # (None, 32, 80, 80)
x = self.deconv5_2(x) # (None, 32, 80, 80)
x = F.pad(x, [-5, -5, -5, -5], mode="constant", value=0) # (None, 32, 70, 70) 125, 100
x = self.deconv6(x) # (None, 1, 70, 70)
return x
class FCN4_Deep_Resize_2(nn.Module):
def __init__(self, dim1=32, dim2=64, dim3=128, dim4=256, dim5=512, ratio=1.0, upsample_mode='nearest'):
super(FCN4_Deep_Resize_2, self).__init__()
self.convblock1 = Conv2DwithBN(5, dim1, kernel_size=(7, 1), stride=(2, 1), padding=(3, 0))
self.convblock2_1 = Conv2DwithBN(dim1, dim2, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
self.convblock2_2 = Conv2DwithBN(dim2, dim2, kernel_size=(3, 1), padding=(1, 0))
self.convblock3_1 = Conv2DwithBN(dim2, dim2, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
self.convblock3_2 = Conv2DwithBN(dim2, dim2, kernel_size=(3, 1), padding=(1, 0))
self.convblock4_1 = Conv2DwithBN(dim2, dim3, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
self.convblock4_2 = Conv2DwithBN(dim3, dim3, kernel_size=(3, 1), padding=(1, 0))
self.convblock5_1 = Conv2DwithBN(dim3, dim3, stride=2)
self.convblock5_2 = Conv2DwithBN(dim3, dim3)
self.convblock6_1 = Conv2DwithBN(dim3, dim4, stride=2)
self.convblock6_2 = Conv2DwithBN(dim4, dim4)
self.convblock7_1 = Conv2DwithBN(dim4, dim4, stride=2)
self.convblock7_2 = Conv2DwithBN(dim4, dim4)
self.convblock8 = Conv2DwithBN(dim4, dim5, kernel_size=(8, ceil(70 * ratio / 8)), padding=0)
self.deconv1_1 = ResizeConv2DwithBN(dim5, dim5, scale_factor=5, mode=upsample_mode)
self.deconv1_2 = Conv2DwithBN(dim5, dim5)
self.deconv2_1 = ResizeConv2DwithBN(dim5, dim4, scale_factor=2, mode=upsample_mode)
self.deconv2_2 = Conv2DwithBN(dim4, dim4)
self.deconv3_1 = ResizeConv2DwithBN(dim4, dim3, scale_factor=2, mode=upsample_mode)
self.deconv3_2 = Conv2DwithBN(dim3, dim3)
self.deconv4_1 = ResizeConv2DwithBN(dim3, dim2, scale_factor=2, mode=upsample_mode)
self.deconv4_2 = Conv2DwithBN(dim2, dim2)
self.deconv5_1 = ResizeConv2DwithBN(dim2, dim1, scale_factor=2, mode=upsample_mode)
self.deconv5_2 = Conv2DwithBN(dim1, dim1)
self.deconv6 = Conv2DwithBN_Tanh(dim1, 1)
def forward(self,x):
# Encoder Part
x = self.convblock1(x) # (None, 32, 500, 70)
x = self.convblock2_1(x) # (None, 64, 250, 70)
x = self.convblock2_2(x) # (None, 64, 250, 70)
x = self.convblock3_1(x) # (None, 64, 125, 70)
x = self.convblock3_2(x) # (None, 64, 125, 70)
x = self.convblock4_1(x) # (None, 128, 63, 70)
x = self.convblock4_2(x) # (None, 128, 63, 70)
x = self.convblock5_1(x) # (None, 128, 32, 35)
x = self.convblock5_2(x) # (None, 128, 32, 35)
x = self.convblock6_1(x) # (None, 256, 16, 18)
x = self.convblock6_2(x) # (None, 256, 16, 18)
x = self.convblock7_1(x) # (None, 256, 8, 9)
x = self.convblock7_2(x) # (None, 256, 8, 9)
x = self.convblock8(x) # (None, 512, 1, 1)
# Decoder Part
x = self.deconv1_1(x) # (None, 512, 5, 5)
x = self.deconv1_2(x) # (None, 512, 5, 5)
x = self.deconv2_1(x) # (None, 256, 10, 10)
x = self.deconv2_2(x) # (None, 256, 10, 10)
x = self.deconv3_1(x) # (None, 128, 20, 20)
x = self.deconv3_2(x) # (None, 128, 20, 20)
x = self.deconv4_1(x) # (None, 64, 40, 40)
x = self.deconv4_2(x) # (None, 64, 40, 40)
x = self.deconv5_1(x) # (None, 32, 80, 80)
x = self.deconv5_2(x) # (None, 32, 80, 80)
x = F.pad(x, [-5, -5, -5, -5], mode="constant", value=0) # (None, 32, 70, 70)
x = self.deconv6(x) # (None, 1, 70, 70)
return x
class Discriminator(nn.Module):
def __init__(self, dim1=32, dim2=64, dim3=128, dim4=256, **kwargs):
super(Discriminator, self).__init__()
self.convblock1_1 = ConvBlock(1, dim1, stride=2)
self.convblock1_2 = ConvBlock(dim1, dim1)
self.convblock2_1 = ConvBlock(dim1, dim2, stride=2)
self.convblock2_2 = ConvBlock(dim2, dim2)
self.convblock3_1 = ConvBlock(dim2, dim3, stride=2)
self.convblock3_2 = ConvBlock(dim3, dim3)
self.convblock4_1 = ConvBlock(dim3, dim4, stride=2)
self.convblock4_2 = ConvBlock(dim4, dim4)
self.convblock5 = ConvBlock(dim4, 1, kernel_size=5, padding=0)
def forward(self, x):
x = self.convblock1_1(x)
x = self.convblock1_2(x)
x = self.convblock2_1(x)
x = self.convblock2_2(x)
x = self.convblock3_1(x)
x = self.convblock3_2(x)
x = self.convblock4_1(x)
x = self.convblock4_2(x)
x = self.convblock5(x)
x = x.view(x.shape[0], -1)
return x
class Conv_HPGNN(nn.Module):
def __init__(self, in_fea, out_fea, kernel_size=None, stride=None, padding=None, **kwargs):
super(Conv_HPGNN, self).__init__()
layers = [
ConvBlock(in_fea, out_fea, relu_slop=0.1, dropout=0.8),
ConvBlock(out_fea, out_fea, relu_slop=0.1, dropout=0.8),
]
if kernel_size is not None:
layers.append(nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class Deconv_HPGNN(nn.Module):
def __init__(self, in_fea, out_fea, kernel_size, **kwargs):
super(Deconv_HPGNN, self).__init__()
layers = [
nn.ConvTranspose2d(in_fea, in_fea, kernel_size=kernel_size, stride=2, padding=0),
ConvBlock(in_fea, out_fea, relu_slop=0.1, dropout=0.8),
ConvBlock(out_fea, out_fea, relu_slop=0.1, dropout=0.8)
]
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
model_dict = {
'InversionNet': InversionNet,
'Discriminator': Discriminator,
'UPFWI': FCN4_Deep_Resize_2
}
| 14,861
| 45.15528
| 167
|
py
|
OpenFWI
|
OpenFWI-main/vis.py
|
import os
import torch
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import ListedColormap
# Load colormap for velocity map visualization
rainbow_cmap = ListedColormap(np.load('rainbow256.npy'))
def plot_velocity(output, target, path, vmin=None, vmax=None):
fig, ax = plt.subplots(1, 2, figsize=(11, 5))
if vmin is None or vmax is None:
vmax, vmin = np.max(target), np.min(target)
im = ax[0].matshow(output, cmap=rainbow_cmap, vmin=vmin, vmax=vmax)
ax[0].set_title('Prediction', y=1.08)
ax[1].matshow(target, cmap=rainbow_cmap, vmin=vmin, vmax=vmax)
ax[1].set_title('Ground Truth', y=1.08)
for axis in ax:
# axis.set_xticks(range(0, 70, 10))
# axis.set_xticklabels(range(0, 1050, 150))
# axis.set_yticks(range(0, 70, 10))
# axis.set_yticklabels(range(0, 1050, 150))
axis.set_xticks(range(0, 70, 10))
axis.set_xticklabels(range(0, 700, 100))
axis.set_yticks(range(0, 70, 10))
axis.set_yticklabels(range(0, 700, 100))
axis.set_ylabel('Depth (m)', fontsize=12)
axis.set_xlabel('Offset (m)', fontsize=12)
fig.colorbar(im, ax=ax, shrink=0.75, label='Velocity(m/s)')
plt.savefig(path)
plt.close('all')
def plot_single_velocity(label, path):
plt.rcParams.update({'font.size': 16})
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
vmax, vmin = np.max(label), np.min(label)
im = ax.matshow(label, cmap=rainbow_cmap, vmin=vmin, vmax=vmax)
# im = ax.matshow(label, cmap="gist_rainbow", vmin=vmin, vmax=vmax)
# nx = label.shape[0]
# ax.set_aspect(aspect=1)
# ax.set_xticks(range(0, nx, int(150//(1050/nx)))[:7])
# ax.set_xticklabels(range(0, 1050, 150))
# ax.set_yticks(range(0, nx, int(150//(1050/nx)))[:7])
# ax.set_yticklabels(range(0, 1050, 150))
# ax.set_title('Offset (m)', y=1.08)
# ax.set_ylabel('Depth (m)', fontsize=18)
fig.colorbar(im, ax=ax, shrink=1.0, label='Velocity(m/s)')
plt.savefig(path)
plt.close('all')
# def plot_seismic(output, target, path, vmin=-1e-5, vmax=1e-5):
# fig, ax = plt.subplots(1, 3, figsize=(15, 6))
# im = ax[0].matshow(output, aspect='auto', cmap='gray', vmin=vmin, vmax=vmax)
# ax[0].set_title('Prediction')
# ax[1].matshow(target, aspect='auto', cmap='gray', vmin=vmin, vmax=vmax)
# ax[1].set_title('Ground Truth')
# ax[2].matshow(output - target, aspect='auto', cmap='gray', vmin=vmin, vmax=vmax)
# ax[2].set_title('Difference')
# fig.colorbar(im, ax=ax, format='%.1e')
# plt.savefig(path)
# plt.close('all')
def plot_seismic(output, target, path, vmin=-1e-5, vmax=1e-5):
fig, ax = plt.subplots(1, 3, figsize=(20, 5))
# fig, ax = plt.subplots(1, 2, figsize=(11, 5))
aspect = output.shape[1]/output.shape[0]
im = ax[0].matshow(target, aspect=aspect, cmap='gray', vmin=vmin, vmax=vmax)
ax[0].set_title('Ground Truth')
ax[1].matshow(output, aspect=aspect, cmap='gray', vmin=vmin, vmax=vmax)
ax[1].set_title('Prediction')
ax[2].matshow(output - target, aspect='auto', cmap='gray', vmin=vmin, vmax=vmax)
ax[2].set_title('Difference')
# for axis in ax:
# axis.set_xticks(range(0, 70, 10))
# axis.set_xticklabels(range(0, 1050, 150))
# axis.set_title('Offset (m)', y=1.1)
# axis.set_ylabel('Time (ms)', fontsize=12)
# fig.colorbar(im, ax=ax, shrink=1.0, pad=0.01, label='Amplitude')
fig.colorbar(im, ax=ax, shrink=0.75, label='Amplitude')
plt.savefig(path)
plt.close('all')
def plot_single_seismic(data, path):
nz, nx = data.shape
plt.rcParams.update({'font.size': 18})
vmin, vmax = np.min(data), np.max(data)
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
im = ax.matshow(data, aspect='auto', cmap='gray', vmin=vmin * 0.01, vmax=vmax * 0.01)
ax.set_aspect(aspect=nx/nz)
ax.set_xticks(range(0, nx, int(300//(1050/nx)))[:5])
ax.set_xticklabels(range(0, 1050, 300))
ax.set_title('Offset (m)', y=1.08)
ax.set_yticks(range(0, nz, int(200//(1000/nz)))[:5])
ax.set_yticklabels(range(0, 1000, 200))
ax.set_ylabel('Time (ms)', fontsize=18)
fig.colorbar(im, ax=ax, shrink=1.0, pad=0.01, label='Amplitude')
plt.savefig(path)
plt.close('all')
| 4,324
| 38.318182
| 89
|
py
|
OpenFWI
|
OpenFWI-main/utils.py
|
# © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
from collections import defaultdict, deque
import datetime
import time
import torch
import torch.distributed as dist
import torch.autograd as autograd
from torch.autograd import Variable
import torch.nn as nn
import errno
import os
import itertools
from torchvision.models import vgg16
import numpy as np
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
if isinstance(iterable, list):
length = max(len(x) for x in iterable)
iterable = [x if len(x) == length else itertools.cycle(x) for x in iterable]
iterable = zip(*iterable)
else:
length = len(iterable)
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(length))) + 'd'
if torch.cuda.is_available():
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}',
'max mem: {memory:.0f}'
])
else:
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
])
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj # <-- yield the batch in for loop
iter_time.update(time.time() - end)
if i % print_freq == 0:
eta_seconds = iter_time.global_avg * (length - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, length, eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, length, eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {}'.format(header, total_time_str))
# Legacy code
class ContentLoss(nn.Module):
def __init__(self, args):
super(ContentLoss, self).__init__()
names = ['l1', 'l2']
self.loss_names = ['loss_' + n for n in names]
for key in ['lambda_' + n for n in names]:
setattr(self, key, getattr(args, key))
self.l1loss = nn.L1Loss()
self.l2loss = nn.MSELoss()
def forward(self, model, input, target):
pred = model(input)
loss_l1 = self.l1loss(target, pred)
loss_l2 = self.l2loss(target, pred)
loss = loss_l1 * self.lambda_l1 + loss_l2 * self.lambda_l2
scope = locals()
return loss, { k: eval(k, scope) for k in self.loss_names }
# Legacy code
class IdenticalLoss(nn.Module):
def __init__(self, args):
super(IdenticalLoss, self).__init__()
names = ['id1s', 'id2s']
self.loss_names = ['loss_' + n for n in names]
for key in ['lambda_' + n for n in names]:
setattr(self, key, getattr(args, key))
self.l1loss = nn.L1Loss()
self.l2loss = nn.MSELoss()
def forward(self, model_s2v, model_v2s, input):
mid = model_s2v(input)
pred = model_v2s(mid)
cal_loss = lambda x, y: (self.l1loss(x, y), self.l2loss(x, y))
loss_id1s, loss_id2s = cal_loss(input, pred)
loss = loss_id1s * self.lambda_id1s + loss_id2s * self.lambda_id2s
scope = locals()
return loss, { k: eval(k, scope) for k in self.loss_names }
# Implemented according to H-PGNN, not useful
class NMSELoss(nn.Module):
def __init__(self):
super(NMSELoss, self).__init__()
def forward(self, pred, gt):
return torch.mean(((pred - gt) / (torch.amax(gt, (-2, -1), keepdim=True) + 1e-5)) ** 2)
class CycleLoss(nn.Module):
def __init__(self, args):
super(CycleLoss, self).__init__()
names = ['g1v', 'g2v', 'g1s', 'g2s', 'c1v', 'c2v', 'c1s', 'c2s']
self.loss_names = ['loss_' + n for n in names]
for key in ['lambda_' + n for n in names]:
setattr(self, key, getattr(args, key))
self.l1loss = nn.L1Loss()
self.l2loss = nn.MSELoss()
def forward(self, data, label, pred_s=None, pred_v=None, recon_s=None, recon_v=None):
cal_loss = lambda x, y: (self.l1loss(x, y), self.l2loss(x, y))
loss_g1v, loss_g2v, loss_g1s, loss_g2s = [0] * 4
if pred_v is not None:
loss_g1v, loss_g2v = cal_loss(pred_v, label)
if pred_s is not None:
loss_g1s, loss_g2s = cal_loss(pred_s, data)
loss_c1v, loss_c2v, loss_c1s , loss_c2s = [0] * 4
if recon_v is not None:
loss_c1v, loss_c2v = cal_loss(recon_v, label)
if recon_s is not None:
loss_c1s, loss_c2s = cal_loss(recon_s, data)
loss = loss_g1v * self.lambda_g1v + loss_g2v * self.lambda_g2v + \
loss_g1s * self.lambda_g1s + loss_g2s * self.lambda_g2s + \
loss_c1v * self.lambda_c1v + loss_c2v * self.lambda_c2v + \
loss_c1s * self.lambda_c1s + loss_c2s * self.lambda_c2s
scope = locals()
return loss, { k: eval(k, scope) for k in self.loss_names }
# Legacy code
class _CycleLoss(nn.Module):
def __init__(self, args):
super(_CycleLoss, self).__init__()
names = ['g1v', 'g2v', 'g1s', 'g2s', 'c1v', 'c2v', 'c1s', 'c2s']
self.loss_names = ['loss_' + n for n in names]
for key in ['lambda_' + n for n in names]:
setattr(self, key, getattr(args, key))
self.l1loss = nn.L1Loss()
self.l2loss = nn.MSELoss()
def forward(self, data, label, pred_s=None, pred_v=None, recon_s=None, recon_v=None):
cal_loss = lambda x, y: (self.l1loss(x, y), self.l2loss(x, y))
loss_g1v, loss_g2v, loss_g1s, loss_g2s = [0] * 4
if pred_v is not None and (self.lambda_g1v != 0 or self.lambda_g2v != 0):
loss_g1v, loss_g2v = cal_loss(pred_v, label)
if pred_s is not None and (self.lambda_g1s != 0 or self.lambda_g2s != 0):
loss_g1s, loss_g2s = cal_loss(pred_s, data)
loss_c1v, loss_c2v, loss_c1s , loss_c2s = [0] * 4
if recon_v is not None and (self.lambda_c1v != 0 or self.lambda_c2v != 0):
loss_c1v, loss_c2v = cal_loss(recon_v, label)
if recon_s is not None and (self.lambda_c1s != 0 or self.lambda_c2s != 0):
loss_c1s, loss_c2s = cal_loss(recon_s, data)
loss = loss_g1v * self.lambda_g1v + loss_g2v * self.lambda_g2v + \
loss_g1s * self.lambda_g1s + loss_g2s * self.lambda_g2s + \
loss_c1v * self.lambda_c1v + loss_c2v * self.lambda_c2v + \
loss_c1s * self.lambda_c1s + loss_c2s * self.lambda_c2s
scope = locals()
return loss, { k: eval(k, scope) for k in self.loss_names }
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target[None])
res = []
for k in topk:
correct_k = correct[:k].flatten().sum(dtype=torch.float32)
res.append(correct_k * (100.0 / batch_size))
return res
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.local_rank = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ and args.world_size > 1:
args.rank = int(os.environ['SLURM_PROCID'])
args.local_rank = args.rank % torch.cuda.device_count()
elif hasattr(args, "rank"):
pass
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.local_rank)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
setup_for_distributed(args.rank == 0)
class Wasserstein_GP(nn.Module):
def __init__(self, device, lambda_gp):
super(Wasserstein_GP, self).__init__()
self.device = device
self.lambda_gp = lambda_gp
def forward(self, real, fake, model):
gradient_penalty = self.compute_gradient_penalty(model, real, fake)
loss_real = torch.mean(model(real))
loss_fake = torch.mean(model(fake))
loss = -loss_real + loss_fake + gradient_penalty * self.lambda_gp
return loss, loss_real-loss_fake, gradient_penalty
def compute_gradient_penalty(self, model, real_samples, fake_samples):
alpha = torch.rand(real_samples.size(0), 1, 1, 1, device=self.device)
interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True)
d_interpolates = model(interpolates)
gradients = autograd.grad(
outputs=d_interpolates,
inputs=interpolates,
grad_outputs=torch.ones(real_samples.size(0), d_interpolates.size(1)).to(self.device),
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty
# Modified from https://gist.github.com/alper111/8233cdb0414b4cb5853f2f730ab95a49
class VGGPerceptualLoss(nn.Module):
def __init__(self, resize=True):
super(VGGPerceptualLoss, self).__init__()
blocks = []
blocks.append(vgg16(pretrained=True).features[:4].eval()) # relu1_2
blocks.append(vgg16(pretrained=True).features[4:9].eval()) # relu2_2
blocks.append(vgg16(pretrained=True).features[9:16].eval()) # relu3_3
blocks.append(vgg16(pretrained=True).features[16:23].eval()) # relu4_3
for bl in blocks:
for p in bl:
p.requires_grad = False
self.blocks = nn.ModuleList(blocks)
self.transform = nn.functional.interpolate
self.resize = resize
self.register_buffer("mean", torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
self.register_buffer("std", torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
self.l1loss = nn.L1Loss()
self.l2loss = nn.MSELoss()
def forward(self, input, target, rescale=True, feature_layers=[1]):
input = input.view(-1, 1, input.shape[-2], input.shape[-1]).repeat(1, 3, 1, 1)
target = target.view(-1, 1, target.shape[-2], target.shape[-1]).repeat(1, 3, 1, 1)
if rescale: # from [-1, 1] to [0, 1]
input = input / 2 + 0.5
target = target / 2 + 0.5
input = (input-self.mean) / self.std
target = (target-self.mean) / self.std
if self.resize:
input = self.transform(input, mode='bilinear', size=(224, 224), align_corners=False)
target = self.transform(target, mode='bilinear', size=(224, 224), align_corners=False)
loss_l1, loss_l2 = 0.0, 0.0
x = input
y = target
for i, block in enumerate(self.blocks):
x = block(x)
y = block(y)
if i in feature_layers:
loss_l1 += self.l1loss(x, y)
loss_l2 += self.l2loss(x, y)
return loss_l1, loss_l2
def cal_psnr(gt, data, max_value):
mse = np.mean((gt - data) ** 2)
if (mse == 0):
return 100
return 20 * np.log10(max_value / np.sqrt(mse))
| 17,006
| 34.804211
| 105
|
py
|
OpenFWI
|
OpenFWI-main/dataset.py
|
# © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import os
import numpy as np
from torch.utils.data import Dataset
from torchvision.transforms import Compose
import transforms as T
class FWIDataset(Dataset):
''' FWI dataset
For convenience, in this class, a batch refers to a npy file
instead of the batch used during training.
Args:
anno: path to annotation file
preload: whether to load the whole dataset into memory
sample_ratio: downsample ratio for seismic data
file_size: # of samples in each npy file
transform_data|label: transformation applied to data or label
'''
def __init__(self, anno, preload=True, sample_ratio=1, file_size=500,
transform_data=None, transform_label=None):
if not os.path.exists(anno):
print(f'Annotation file {anno} does not exists')
self.preload = preload
self.sample_ratio = sample_ratio
self.file_size = file_size
self.transform_data = transform_data
self.transform_label = transform_label
with open(anno, 'r') as f:
self.batches = f.readlines()
if preload:
self.data_list, self.label_list = [], []
for batch in self.batches:
data, label = self.load_every(batch)
self.data_list.append(data)
if label is not None:
self.label_list.append(label)
# Load from one line
def load_every(self, batch):
batch = batch.split('\t')
data_path = batch[0] if len(batch) > 1 else batch[0][:-1]
data = np.load(data_path)[:, :, ::self.sample_ratio, :]
data = data.astype('float32')
if len(batch) > 1:
label_path = batch[1][:-1]
label = np.load(label_path)
label = label.astype('float32')
else:
label = None
return data, label
def __getitem__(self, idx):
batch_idx, sample_idx = idx // self.file_size, idx % self.file_size
if self.preload:
data = self.data_list[batch_idx][sample_idx]
label = self.label_list[batch_idx][sample_idx] if len(self.label_list) != 0 else None
else:
data, label = self.load_every(self.batches[batch_idx])
data = data[sample_idx]
label = label[sample_idx] if label is not None else None
if self.transform_data:
data = self.transform_data(data)
if self.transform_label and label is not None:
label = self.transform_label(label)
return data, label if label is not None else np.array([])
def __len__(self):
return len(self.batches) * self.file_size
if __name__ == '__main__':
transform_data = Compose([
T.LogTransform(k=1),
T.MinMaxNormalize(T.log_transform(-61, k=1), T.log_transform(120, k=1))
])
transform_label = Compose([
T.MinMaxNormalize(2000, 6000)
])
dataset = FWIDataset(f'relevant_files/temp.txt', transform_data=transform_data, transform_label=transform_label, file_size=1)
data, label = dataset[0]
print(data.shape)
print(label is None)
| 3,920
| 37.441176
| 129
|
py
|
OpenFWI
|
OpenFWI-main/scheduler.py
|
# © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import torch
from bisect import bisect_right
# Scheduler adopted from the original repo
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
milestones,
gamma=0.1,
warmup_factor=1.0 / 3,
warmup_iters=5,
warmup_method="linear",
last_epoch=-1,
):
if not milestones == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}",
milestones,
)
if warmup_method not in ("constant", "linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted"
"got {}".format(warmup_method)
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if self.last_epoch < self.warmup_iters:
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = float(self.last_epoch) / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
return [
base_lr *
warmup_factor *
self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
| 2,380
| 35.075758
| 105
|
py
|
OpenFWI
|
OpenFWI-main/train.py
|
# © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import os
import sys
import time
import datetime
import json
import torch
from torch import nn
from torch.utils.data import RandomSampler, DataLoader
from torch.utils.data.dataloader import default_collate
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel
from torch.utils.tensorboard import SummaryWriter
import torchvision
from torchvision.transforms import Compose
import utils
import network
from dataset import FWIDataset
from scheduler import WarmupMultiStepLR
import transforms as T
step = 0
def train_one_epoch(model, criterion, optimizer, lr_scheduler,
dataloader, device, epoch, print_freq, writer):
global step
model.train()
# Logger setup
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value}'))
metric_logger.add_meter('samples/s', utils.SmoothedValue(window_size=10, fmt='{value:.3f}'))
header = 'Epoch: [{}]'.format(epoch)
for data, label in metric_logger.log_every(dataloader, print_freq, header):
start_time = time.time()
optimizer.zero_grad()
data, label = data.to(device), label.to(device)
output = model(data)
loss, loss_g1v, loss_g2v = criterion(output, label)
loss.backward()
optimizer.step()
loss_val = loss.item()
loss_g1v_val = loss_g1v.item()
loss_g2v_val = loss_g2v.item()
batch_size = data.shape[0]
metric_logger.update(loss=loss_val, loss_g1v=loss_g1v_val,
loss_g2v=loss_g2v_val, lr=optimizer.param_groups[0]['lr'])
metric_logger.meters['samples/s'].update(batch_size / (time.time() - start_time))
if writer:
writer.add_scalar('loss', loss_val, step)
writer.add_scalar('loss_g1v', loss_g1v_val, step)
writer.add_scalar('loss_g2v', loss_g2v_val, step)
step += 1
lr_scheduler.step()
def evaluate(model, criterion, dataloader, device, writer):
model.eval()
metric_logger = utils.MetricLogger(delimiter=' ')
header = 'Test:'
with torch.no_grad():
for data, label in metric_logger.log_every(dataloader, 20, header):
data = data.to(device, non_blocking=True)
label = label.to(device, non_blocking=True)
output = model(data)
loss, loss_g1v, loss_g2v = criterion(output, label)
metric_logger.update(loss=loss.item(),
loss_g1v=loss_g1v.item(),
loss_g2v=loss_g2v.item())
# Gather the stats from all processes
metric_logger.synchronize_between_processes()
print(' * Loss {loss.global_avg:.8f}\n'.format(loss=metric_logger.loss))
if writer:
writer.add_scalar('loss', metric_logger.loss.global_avg, step)
writer.add_scalar('loss_g1v', metric_logger.loss_g1v.global_avg, step)
writer.add_scalar('loss_g2v', metric_logger.loss_g2v.global_avg, step)
return metric_logger.loss.global_avg
def main(args):
global step
print(args)
print('torch version: ', torch.__version__)
print('torchvision version: ', torchvision.__version__)
utils.mkdir(args.output_path) # create folder to store checkpoints
utils.init_distributed_mode(args) # distributed mode initialization
# Set up tensorboard summary writer
train_writer, val_writer = None, None
if args.tensorboard:
utils.mkdir(args.log_path) # create folder to store tensorboard logs
if not args.distributed or (args.rank == 0) and (args.local_rank == 0):
train_writer = SummaryWriter(os.path.join(args.output_path, 'logs', 'train'))
val_writer = SummaryWriter(os.path.join(args.output_path, 'logs', 'val'))
device = torch.device(args.device)
torch.backends.cudnn.benchmark = True
with open('dataset_config.json') as f:
try:
ctx = json.load(f)[args.dataset]
except KeyError:
print('Unsupported dataset.')
sys.exit()
if args.file_size is not None:
ctx['file_size'] = args.file_size
# Create dataset and dataloader
print('Loading data')
print('Loading training data')
# Normalize data and label to [-1, 1]
transform_data = Compose([
T.LogTransform(k=args.k),
T.MinMaxNormalize(T.log_transform(ctx['data_min'], k=args.k), T.log_transform(ctx['data_max'], k=args.k))
])
transform_label = Compose([
T.MinMaxNormalize(ctx['label_min'], ctx['label_max'])
])
if args.train_anno[-3:] == 'txt':
dataset_train = FWIDataset(
args.train_anno,
preload=True,
sample_ratio=args.sample_temporal,
file_size=ctx['file_size'],
transform_data=transform_data,
transform_label=transform_label
)
else:
dataset_train = torch.load(args.train_anno)
print('Loading validation data')
if args.val_anno[-3:] == 'txt':
dataset_valid = FWIDataset(
args.val_anno,
preload=True,
sample_ratio=args.sample_temporal,
file_size=ctx['file_size'],
transform_data=transform_data,
transform_label=transform_label
)
else:
dataset_valid = torch.load(args.val_anno)
print('Creating data loaders')
if args.distributed:
train_sampler = DistributedSampler(dataset_train, shuffle=True)
valid_sampler = DistributedSampler(dataset_valid, shuffle=True)
else:
train_sampler = RandomSampler(dataset_train)
valid_sampler = RandomSampler(dataset_valid)
dataloader_train = DataLoader(
dataset_train, batch_size=args.batch_size,
sampler=train_sampler, num_workers=args.workers,
pin_memory=True, drop_last=True, collate_fn=default_collate)
dataloader_valid = DataLoader(
dataset_valid, batch_size=args.batch_size,
sampler=valid_sampler, num_workers=args.workers,
pin_memory=True, collate_fn=default_collate)
print('Creating model')
if args.model not in network.model_dict:
print('Unsupported model.')
sys.exit()
model = network.model_dict[args.model](upsample_mode=args.up_mode,
sample_spatial=args.sample_spatial, sample_temporal=args.sample_temporal).to(device)
if args.distributed and args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
# Define loss function
l1loss = nn.L1Loss()
l2loss = nn.MSELoss()
def criterion(pred, gt):
loss_g1v = l1loss(pred, gt)
loss_g2v = l2loss(pred, gt)
loss = args.lambda_g1v * loss_g1v + args.lambda_g2v * loss_g2v
return loss, loss_g1v, loss_g2v
# Scale lr according to effective batch size
lr = args.lr * args.world_size
optimizer = torch.optim.AdamW(model.parameters(), lr=lr, betas=(0.9, 0.999), weight_decay=args.weight_decay)
# Convert scheduler to be per iteration instead of per epoch
warmup_iters = args.lr_warmup_epochs * len(dataloader_train)
lr_milestones = [len(dataloader_train) * m for m in args.lr_milestones]
lr_scheduler = WarmupMultiStepLR(
optimizer, milestones=lr_milestones, gamma=args.lr_gamma,
warmup_iters=warmup_iters, warmup_factor=1e-5)
model_without_ddp = model
if args.distributed:
model = DistributedDataParallel(model, device_ids=[args.local_rank])
model_without_ddp = model.module
if args.resume:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(network.replace_legacy(checkpoint['model']))
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
step = checkpoint['step']
lr_scheduler.milestones=lr_milestones
print('Start training')
start_time = time.time()
best_loss = 10
chp=1
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train_one_epoch(model, criterion, optimizer, lr_scheduler, dataloader_train,
device, epoch, args.print_freq, train_writer)
loss = evaluate(model, criterion, dataloader_valid, device, val_writer)
checkpoint = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'step': step,
'args': args}
# Save checkpoint per epoch
if loss < best_loss:
utils.save_on_master(
checkpoint,
os.path.join(args.output_path, 'checkpoint.pth'))
print('saving checkpoint at epoch: ', epoch)
chp = epoch
best_loss = loss
# Save checkpoint every epoch block
print('current best loss: ', best_loss)
print('current best epoch: ', chp)
if args.output_path and (epoch + 1) % args.epoch_block == 0:
utils.save_on_master(
checkpoint,
os.path.join(args.output_path, 'model_{}.pth'.format(epoch + 1)))
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='FCN Training')
parser.add_argument('-d', '--device', default='cuda', help='device')
parser.add_argument('-ds', '--dataset', default='flatfault-b', type=str, help='dataset name')
parser.add_argument('-fs', '--file-size', default=None, type=int, help='number of samples in each npy file')
# Path related
parser.add_argument('-ap', '--anno-path', default='split_files', help='annotation files location')
parser.add_argument('-t', '--train-anno', default='flatfault_b_train_invnet.txt', help='name of train anno')
parser.add_argument('-v', '--val-anno', default='flatfault_b_val_invnet.txt', help='name of val anno')
parser.add_argument('-o', '--output-path', default='Invnet_models', help='path to parent folder to save checkpoints')
parser.add_argument('-l', '--log-path', default='Invnet_models', help='path to parent folder to save logs')
parser.add_argument('-n', '--save-name', default='fcn_l1loss_ffb', help='folder name for this experiment')
parser.add_argument('-s', '--suffix', type=str, default=None, help='subfolder name for this run')
# Model related
parser.add_argument('-m', '--model', type=str, help='inverse model name')
parser.add_argument('-um', '--up-mode', default=None, help='upsampling layer mode such as "nearest", "bicubic", etc.')
parser.add_argument('-ss', '--sample-spatial', type=float, default=1.0, help='spatial sampling ratio')
parser.add_argument('-st', '--sample-temporal', type=int, default=1, help='temporal sampling ratio')
# Training related
parser.add_argument('-b', '--batch-size', default=256, type=int)
parser.add_argument('--lr', default=0.0001, type=float, help='initial learning rate')
parser.add_argument('-lm', '--lr-milestones', nargs='+', default=[], type=int, help='decrease lr on milestones')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--weight-decay', default=1e-4 , type=float, help='weight decay (default: 1e-4)')
parser.add_argument('--lr-gamma', default=0.1, type=float, help='decrease lr by a factor of lr-gamma')
parser.add_argument('--lr-warmup-epochs', default=0, type=int, help='number of warmup epochs')
parser.add_argument('-eb', '--epoch_block', type=int, default=40, help='epochs in a saved block')
parser.add_argument('-nb', '--num_block', type=int, default=3, help='number of saved block')
parser.add_argument('-j', '--workers', default=16, type=int, help='number of data loading workers (default: 16)')
parser.add_argument('--k', default=1, type=float, help='k in log transformation')
parser.add_argument('--print-freq', default=50, type=int, help='print frequency')
parser.add_argument('-r', '--resume', default=None, help='resume from checkpoint')
parser.add_argument('--start-epoch', default=0, type=int, help='start epoch')
# Loss related
parser.add_argument('-g1v', '--lambda_g1v', type=float, default=1.0)
parser.add_argument('-g2v', '--lambda_g2v', type=float, default=1.0)
# Distributed training related
parser.add_argument('--sync-bn', action='store_true', help='Use sync batch norm')
parser.add_argument('--world-size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')
# Tensorboard related
parser.add_argument('--tensorboard', action='store_true', help='Use tensorboard for logging.')
args = parser.parse_args()
args.output_path = os.path.join(args.output_path, args.save_name, args.suffix or '')
args.log_path = os.path.join(args.log_path, args.save_name, args.suffix or '')
args.train_anno = os.path.join(args.anno_path, args.train_anno)
args.val_anno = os.path.join(args.anno_path, args.val_anno)
args.epochs = args.epoch_block * args.num_block
if args.resume:
args.resume = os.path.join(args.output_path, args.resume)
return args
if __name__ == '__main__':
args = parse_args()
main(args)
| 14,469
| 41.558824
| 122
|
py
|
OpenFWI
|
OpenFWI-main/transforms.py
|
# © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import torch
import numpy as np
import random
from sklearn.decomposition import PCA
def crop(vid, i, j, h, w):
return vid[..., i:(i + h), j:(j + w)]
def center_crop(vid, output_size):
h, w = vid.shape[-2:]
th, tw = output_size
i = int(round((h - th) / 2.))
j = int(round((w - tw) / 2.))
return crop(vid, i, j, th, tw)
def hflip(vid):
return vid.flip(dims=(-1,))
# NOTE: for those functions, which generally expect mini-batches, we keep them
# as non-minibatch so that they are applied as if they were 4d (thus image).
# this way, we only apply the transformation in the spatial domain
def resize(vid, size, interpolation='bilinear'):
# NOTE: using bilinear interpolation because we don't work on minibatches
# at this level
scale = None
if isinstance(size, int):
scale = float(size) / min(vid.shape[-2:])
size = None
return torch.nn.functional.interpolate(
vid, size=size, scale_factor=scale, mode=interpolation, align_corners=False)
def random_resize(vid, size, random_factor, interpolation='bilinear'):
# NOTE: using bilinear interpolation because we don't work on minibatches
# at this level
scale = None
r = 1 + random.random() * (random_factor - 1)
if isinstance(size, int):
scale = float(size) / min(vid.shape[-2:]) * r
size = None
else:
size = tuple([int(elem * r) for elem in list(size)])
return torch.nn.functional.interpolate(
vid, size=size, scale_factor=scale, mode=interpolation, align_corners=False)
def pad(vid, padding, fill=0, padding_mode="constant"):
# NOTE: don't want to pad on temporal dimension, so let as non-batch
# (4d) before padding. This works as expected
return torch.nn.functional.pad(vid, padding, value=fill, mode=padding_mode)
def to_normalized_float_tensor(vid):
return vid.permute(3, 0, 1, 2).to(torch.float32) / 255
def normalize(vid, mean, std):
shape = (-1,) + (1,) * (vid.dim() - 1)
mean = torch.as_tensor(mean).reshape(shape)
std = torch.as_tensor(std).reshape(shape)
return (vid - mean) / std
def minmax_normalize(vid, vmin, vmax, scale=2):
vid -= vmin
vid /= (vmax - vmin)
return (vid - 0.5) * 2 if scale == 2 else vid
def minmax_denormalize(vid, vmin, vmax, scale=2):
if scale == 2:
vid = vid / 2 + 0.5
return vid * (vmax - vmin) + vmin
def add_noise(data, snr):
sig_avg_power_db = 10*np.log10(np.mean(data**2))
noise_avg_power_db = sig_avg_power_db - snr
noise_avg_power = 10**(noise_avg_power_db/10)
noise = np.random.normal(0, np.sqrt(noise_avg_power), data.shape)
noisy_data = data + noise
return noisy_data
def log_transform(data, k=1, c=0):
return (np.log1p(np.abs(k * data) + c)) * np.sign(data)
def log_transform_tensor(data, k=1, c=0):
return (torch.log1p(torch.abs(k * data) + c)) * torch.sign(data)
def exp_transform(data, k=1, c=0):
return (np.expm1(np.abs(data)) - c) * np.sign(data) / k
def tonumpy_denormalize(vid, vmin, vmax, exp=True, k=1, c=0, scale=2):
if exp:
vmin = log_transform(vmin, k=k, c=c)
vmax = log_transform(vmax, k=k, c=c)
vid = minmax_denormalize(vid.cpu().numpy(), vmin, vmax, scale)
return exp_transform(vid, k=k, c=c) if exp else vid
# Class interface
class RandomCrop(object):
def __init__(self, size):
self.size = size
@staticmethod
def get_params(vid, output_size):
"""Get parameters for ``crop`` for a random crop.
"""
h, w = vid.shape[-2:]
th, tw = output_size
if w == tw and h == th:
return 0, 0, h, w
i = random.randint(0, h - th)
j = random.randint(0, w - tw)
return i, j, th, tw
def __call__(self, vid):
i, j, h, w = self.get_params(vid, self.size)
return crop(vid, i, j, h, w)
class CenterCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, vid):
return center_crop(vid, self.size)
class Resize(object):
def __init__(self, size):
self.size = size
def __call__(self, vid):
return resize(vid, self.size)
class RandomResize(object):
def __init__(self, size, random_factor=1.25):
self.size = size
self.factor = random_factor
def __call__(self, vid):
return random_resize(vid, self.size, self.factor)
class ToFloatTensorInZeroOne(object):
def __call__(self, vid):
return to_normalized_float_tensor(vid)
class Normalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, vid):
return normalize(vid, self.mean, self.std)
class MinMaxNormalize(object):
def __init__(self, datamin, datamax, scale=2):
self.datamin = datamin
self.datamax = datamax
self.scale = scale
def __call__(self, vid):
return minmax_normalize(vid, self.datamin, self.datamax, self.scale)
class RandomHorizontalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, vid):
if random.random() < self.p:
return hflip(vid)
return vid
class Pad(object):
def __init__(self, padding, fill=0):
self.padding = padding
self.fill = fill
def __call__(self, vid):
return pad(vid, self.padding, self.fill)
class TemporalDownsample(object):
def __init__(self, rate=1):
self.rate = rate
def __call__(self, vid):
return vid[::self.rate]
class AddNoise(object):
def __init__(self, snr=10):
self.snr = snr
def __call__(self, vid):
return add_noise(vid, self.snr)
class PCD(object):
def __init__(self, n_comp=8):
self.pca = PCA(n_components=n_comp)
def __call__(self, data):
data= data.reshape((data.shape[0], -1))
feat_mean = data.mean(axis=0)
data -= np.tile(feat_mean, (data.shape[0], 1))
pc = self.pca.fit_transform(data)
pc = pc.reshape((-1,))
pc = pc[:, np.newaxis, np.newaxis]
return pc
class StackPCD(object):
def __init__(self, n_comp=(32, 8)):
self.primary_pca = PCA(n_components=n_comp[0])
self.secondary_pca = PCA(n_components=n_comp[1])
def __call__(self, data):
data = np.transpose(data, (0, 2, 1))
primary_pc = []
for sample in data:
feat_mean = sample.mean(axis=0)
sample -= np.tile(feat_mean, (sample.shape[0], 1))
primary_pc.append(self.primary_pca.fit_transform(sample))
primary_pc = np.array(primary_pc)
data = primary_pc.reshape((data.shape[0], -1))
feat_mean = data.mean(axis=0)
data -= np.tile(feat_mean, (data.shape[0], 1))
secondary_pc = self.secondary_pca.fit_transform(data)
secondary_pc = secondary_pc.reshape((-1,))
secondary_pc = pc[:, np.newaxis, np.newaxis]
return secondary_pc
class LogTransform(object):
def __init__(self, k=1, c=0):
self.k = k
self.c = c
def __call__(self, data):
return log_transform(data, k=self.k, c=self.c)
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
# def __init__(self, device):
# self.device = device
def __call__(self, sample):
return torch.from_numpy(sample)
| 8,236
| 29.394834
| 105
|
py
|
active_grasp-devel
|
active_grasp-devel/setup.py
|
# ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD!
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# Fetch values from package.xml.
setup_args = generate_distutils_setup(
packages=["active_grasp"],
package_dir={"": "src"},
)
setup(**setup_args)
| 314
| 21.5
| 61
|
py
|
active_grasp-devel
|
active_grasp-devel/src/active_grasp/baselines.py
|
import numpy as np
from .policy import SingleViewPolicy, MultiViewPolicy, compute_error
class InitialView(SingleViewPolicy):
def update(self, img, x, q):
self.x_d = x
super().update(img, x, q)
class TopView(SingleViewPolicy):
def activate(self, bbox, view_sphere):
super().activate(bbox, view_sphere)
self.x_d = self.view_sphere.get_view(0.0, 0.0)
self.done = False if self.solve_cam_ik(self.q0, self.x_d) else True
class TopTrajectory(MultiViewPolicy):
def activate(self, bbox, view_sphere):
super().activate(bbox, view_sphere)
self.x_d = self.view_sphere.get_view(0.0, 0.0)
self.done = False if self.solve_cam_ik(self.q0, self.x_d) else True
def update(self, img, x, q):
self.integrate(img, x, q)
linear, _ = compute_error(self.x_d, x)
if np.linalg.norm(linear) < 0.02:
self.done = True
class FixedTrajectory(MultiViewPolicy):
def activate(self, bbox, view_sphere):
pass
def update(self, img, x, q):
pass
| 1,058
| 26.868421
| 75
|
py
|
active_grasp-devel
|
active_grasp-devel/src/active_grasp/timer.py
|
import time
class Timer:
timers = dict()
def __init__(self, name):
self.name = name
self.timers.setdefault(name, 0)
def __enter__(self):
self.start()
return self
def __exit__(self, *exc_info):
self.stop()
@classmethod
def reset(cls):
cls.timers = dict()
def start(self):
self.tic = time.perf_counter()
def stop(self):
elapsed_time = time.perf_counter() - self.tic
self.timers[self.name] += elapsed_time
# with open(f"{self.name}.txt", "a") as f:
# f.write(f"{elapsed_time}\n")
| 609
| 19.333333
| 53
|
py
|
active_grasp-devel
|
active_grasp-devel/src/active_grasp/policy.py
|
import numpy as np
from sensor_msgs.msg import CameraInfo
from pathlib import Path
import rospy
from trac_ik_python.trac_ik import IK
from robot_helpers.ros import tf
from robot_helpers.ros.conversions import *
from vgn.detection import *
from vgn.perception import UniformTSDFVolume
from .timer import Timer
from .rviz import Visualizer
def solve_ik(q0, pose, solver):
x, y, z = pose.translation
qx, qy, qz, qw = pose.rotation.as_quat()
return solver.get_ik(q0, x, y, z, qx, qy, qz, qw)
class Policy:
def __init__(self):
self.load_parameters()
self.init_ik_solver()
self.init_visualizer()
def load_parameters(self):
self.base_frame = rospy.get_param("~base_frame_id")
self.T_grasp_ee = Transform.from_list(rospy.get_param("~ee_grasp_offset")).inv()
self.cam_frame = rospy.get_param("~camera/frame_id")
self.task_frame = "task"
info_topic = rospy.get_param("~camera/info_topic")
msg = rospy.wait_for_message(info_topic, CameraInfo, rospy.Duration(2.0))
self.intrinsic = from_camera_info_msg(msg)
self.qual_thresh = rospy.get_param("vgn/qual_threshold")
def init_ik_solver(self):
self.q0 = [0.0, -0.79, 0.0, -2.356, 0.0, 1.57, 0.79]
self.cam_ik_solver = IK(self.base_frame, self.cam_frame)
self.ee_ik_solver = IK(self.base_frame, "panda_link8")
def solve_cam_ik(self, q0, view):
return solve_ik(q0, view, self.cam_ik_solver)
def solve_ee_ik(self, q0, pose):
return solve_ik(q0, pose, self.ee_ik_solver)
def init_visualizer(self):
self.vis = Visualizer()
def activate(self, bbox, view_sphere):
self.vis.clear()
self.bbox = bbox
self.view_sphere = view_sphere
self.calibrate_task_frame()
self.vis.bbox(self.base_frame, self.bbox)
self.tsdf = UniformTSDFVolume(0.3, 40)
self.vgn = VGN(Path(rospy.get_param("vgn/model")))
self.views = []
self.best_grasp = None
self.x_d = None
self.done = False
self.info = {}
def calibrate_task_frame(self):
xyz = np.r_[self.bbox.center[:2] - 0.15, self.bbox.min[2] - 0.05]
self.T_base_task = Transform.from_translation(xyz)
self.T_task_base = self.T_base_task.inv()
tf.broadcast(self.T_base_task, self.base_frame, self.task_frame)
rospy.sleep(1.0) # Wait for tf tree to be updated
self.vis.roi(self.task_frame, 0.3)
def update(self, img, x, q):
raise NotImplementedError
def filter_grasps(self, out, q):
grasps, qualities = select_local_maxima(
self.tsdf.voxel_size,
out,
self.qual_thresh,
)
filtered_grasps, filtered_qualities = [], []
for grasp, quality in zip(grasps, qualities):
pose = self.T_base_task * grasp.pose
tip = pose.rotation.apply([0, 0, 0.05]) + pose.translation
if self.bbox.is_inside(tip):
grasp.pose = pose
q_grasp = self.solve_ee_ik(q, pose * self.T_grasp_ee)
if q_grasp is not None:
filtered_grasps.append(grasp)
filtered_qualities.append(quality)
return filtered_grasps, filtered_qualities
def deactivate(self):
self.vis.clear_ig_views()
def select_best_grasp(grasps, qualities):
i = np.argmax(qualities)
return grasps[i], qualities[i]
class SingleViewPolicy(Policy):
def update(self, img, x, q):
linear, _ = compute_error(self.x_d, x)
if np.linalg.norm(linear) < 0.02:
self.views.append(x)
self.tsdf.integrate(img, self.intrinsic, x.inv() * self.T_base_task)
tsdf_grid, voxel_size = self.tsdf.get_grid(), self.tsdf.voxel_size
scene_cloud = self.tsdf.get_scene_cloud()
self.vis.scene_cloud(self.task_frame, np.asarray(scene_cloud.points))
map_cloud = self.tsdf.get_map_cloud()
self.vis.map_cloud(
self.task_frame,
np.asarray(map_cloud.points),
np.expand_dims(np.asarray(map_cloud.colors)[:, 0], 1),
)
out = self.vgn.predict(tsdf_grid)
self.vis.quality(self.task_frame, voxel_size, out.qual, 0.5)
grasps, qualities = self.filter_grasps(out, q)
if len(grasps) > 0:
self.best_grasp, quality = select_best_grasp(grasps, qualities)
self.vis.grasp(self.base_frame, self.best_grasp, quality)
self.done = True
class MultiViewPolicy(Policy):
def __init__(self):
super().__init__()
self.T = rospy.get_param("policy/window_size")
def activate(self, bbox, view_sphere):
super().activate(bbox, view_sphere)
self.qual_hist = np.zeros((self.T,) + (40,) * 3, np.float32)
def integrate(self, img, x, q):
self.views.append(x)
self.vis.path(self.base_frame, self.intrinsic, self.views)
with Timer("tsdf_integration"):
self.tsdf.integrate(img, self.intrinsic, x.inv() * self.T_base_task)
scene_cloud = self.tsdf.get_scene_cloud()
self.vis.scene_cloud(self.task_frame, np.asarray(scene_cloud.points))
map_cloud = self.tsdf.get_map_cloud()
self.vis.map_cloud(
self.task_frame,
np.asarray(map_cloud.points),
np.expand_dims(np.asarray(map_cloud.colors)[:, 0], 1),
)
with Timer("grasp_prediction"):
tsdf_grid = self.tsdf.get_grid()
out = self.vgn.predict(tsdf_grid)
self.vis.quality(self.task_frame, self.tsdf.voxel_size, out.qual, 0.9)
t = (len(self.views) - 1) % self.T
self.qual_hist[t, ...] = out.qual
with Timer("grasp_selection"):
grasps, qualities = self.filter_grasps(out, q)
if len(grasps) > 0:
self.best_grasp, quality = select_best_grasp(grasps, qualities)
self.vis.grasp(self.base_frame, self.best_grasp, quality)
else:
self.best_grasp = None
self.vis.clear_grasp()
def compute_error(x_d, x):
linear = x_d.translation - x.translation
angular = (x_d.rotation * x.rotation.inv()).as_rotvec()
return linear, angular
registry = {}
def register(id, cls):
global registry
registry[id] = cls
def make(id, *args, **kwargs):
if id in registry:
return registry[id](*args, **kwargs)
else:
raise ValueError("{} policy does not exist.".format(id))
| 6,580
| 31.579208
| 88
|
py
|
active_grasp-devel
|
active_grasp-devel/src/active_grasp/bbox.py
|
import itertools
import numpy as np
import active_grasp.msg
from robot_helpers.ros.conversions import to_point_msg, from_point_msg
class AABBox:
def __init__(self, bbox_min, bbox_max):
self.min = np.asarray(bbox_min)
self.max = np.asarray(bbox_max)
self.center = 0.5 * (self.min + self.max)
self.size = self.max - self.min
@property
def corners(self):
return list(itertools.product(*np.vstack((self.min, self.max)).T))
def is_inside(self, p):
return np.all(p > self.min) and np.all(p < self.max)
def from_bbox_msg(msg):
aabb_min = from_point_msg(msg.min)
aabb_max = from_point_msg(msg.max)
return AABBox(aabb_min, aabb_max)
def to_bbox_msg(bbox):
msg = active_grasp.msg.AABBox()
msg.min = to_point_msg(bbox.min)
msg.max = to_point_msg(bbox.max)
return msg
| 857
| 24.235294
| 74
|
py
|
active_grasp-devel
|
active_grasp-devel/src/active_grasp/rviz.py
|
import numpy as np
from robot_helpers.ros.rviz import *
from robot_helpers.spatial import Transform
import vgn.rviz
from vgn.utils import *
cm = lambda s: tuple([float(1 - s), float(s), float(0)])
red = [1.0, 0.0, 0.0]
blue = [0, 0.6, 1.0]
grey = [0.9, 0.9, 0.9]
class Visualizer(vgn.rviz.Visualizer):
def clear_ig_views(self):
markers = [Marker(action=Marker.DELETE, ns="ig_views", id=i) for i in range(24)]
self.draw(markers)
def bbox(self, frame, bbox):
pose = Transform.identity()
scale = [0.004, 0.0, 0.0]
color = red
lines = box_lines(bbox.min, bbox.max)
marker = create_line_list_marker(frame, pose, scale, color, lines, "bbox")
self.draw([marker])
def ig_views(self, frame, intrinsic, views, values):
vmin, vmax = min(values), max(values)
scale = [0.002, 0.0, 0.0]
near, far = 0.0, 0.02
markers = []
for i, (view, value) in enumerate(zip(views, values)):
color = cm((value - vmin) / (vmax - vmin))
marker = create_view_marker(
frame,
view,
scale,
color,
intrinsic,
near,
far,
ns="ig_views",
id=i,
)
markers.append(marker)
self.draw(markers)
def path(self, frame, intrinsic, views):
markers = []
points = [p.translation for p in views]
spheres = create_sphere_list_marker(
frame,
Transform.identity(),
np.full(3, 0.008),
blue,
points,
"path",
0,
)
markers.append(spheres)
if len(views) > 1:
lines = create_line_strip_marker(
frame,
Transform.identity(),
[0.002, 0.0, 0.0],
blue,
points,
"path",
1,
)
markers.append(lines)
for i, view in enumerate(views[::4]):
markers.append(
create_view_marker(
frame,
view,
[0.002, 0.0, 0.0],
blue,
intrinsic,
0.0,
0.02,
ns="views",
id=i,
)
)
self.draw(markers)
def point(self, frame, position):
marker = create_sphere_marker(
frame,
Transform.from_translation(position),
np.full(3, 0.01),
[0, 0, 1],
"point",
)
self.draw([marker])
def rays(self, frame, origin, directions, t_max=1.0):
lines = [[origin, origin + t_max * direction] for direction in directions]
marker = create_line_list_marker(
frame,
Transform.identity(),
[0.001, 0.0, 0.0],
grey,
lines,
"rays",
)
self.draw([marker])
def create_view_marker(frame, pose, scale, color, intrinsic, near, far, ns="", id=0):
marker = create_marker(Marker.LINE_LIST, frame, pose, scale, color, ns, id)
x_n = near * intrinsic.width / (2.0 * intrinsic.fx)
y_n = near * intrinsic.height / (2.0 * intrinsic.fy)
z_n = near
x_f = far * intrinsic.width / (2.0 * intrinsic.fx)
y_f = far * intrinsic.height / (2.0 * intrinsic.fy)
z_f = far
points = [
[x_n, y_n, z_n],
[-x_n, y_n, z_n],
[-x_n, y_n, z_n],
[-x_n, -y_n, z_n],
[-x_n, -y_n, z_n],
[x_n, -y_n, z_n],
[x_n, -y_n, z_n],
[x_n, y_n, z_n],
[x_f, y_f, z_f],
[-x_f, y_f, z_f],
[-x_f, y_f, z_f],
[-x_f, -y_f, z_f],
[-x_f, -y_f, z_f],
[x_f, -y_f, z_f],
[x_f, -y_f, z_f],
[x_f, y_f, z_f],
[x_n, y_n, z_n],
[x_f, y_f, z_f],
[-x_n, y_n, z_n],
[-x_f, y_f, z_f],
[-x_n, -y_n, z_n],
[-x_f, -y_f, z_f],
[x_n, -y_n, z_n],
[x_f, -y_f, z_f],
]
marker.points = [to_point_msg(p) for p in points]
return marker
| 4,228
| 26.822368
| 88
|
py
|
active_grasp-devel
|
active_grasp-devel/src/active_grasp/simulation.py
|
from pathlib import Path
import pybullet as p
import pybullet_data
import rospkg
from active_grasp.bbox import AABBox
from robot_helpers.bullet import *
from robot_helpers.io import load_yaml
from robot_helpers.model import KDLModel
from robot_helpers.spatial import Rotation
from vgn.perception import UniformTSDFVolume
from vgn.utils import find_urdfs, view_on_sphere
from vgn.detection import VGN, select_local_maxima
# import vgn.visualizer as vis
rospack = rospkg.RosPack()
pkg_root = Path(rospack.get_path("active_grasp"))
urdfs_dir = pkg_root / "assets"
class Simulation:
"""Robot is placed s.t. world and base frames are the same"""
def __init__(self, gui, scene_id, vgn_path):
self.configure_physics_engine(gui, 60, 4)
self.configure_visualizer()
self.seed()
self.load_robot()
self.load_vgn(Path(vgn_path))
self.scene = get_scene(scene_id)
def configure_physics_engine(self, gui, rate, sub_step_count):
self.rate = rate
self.dt = 1.0 / self.rate
p.connect(p.GUI if gui else p.DIRECT)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.setPhysicsEngineParameter(fixedTimeStep=self.dt, numSubSteps=sub_step_count)
p.setGravity(0.0, 0.0, -9.81)
def configure_visualizer(self):
# p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
p.resetDebugVisualizerCamera(1.2, 30, -30, [0.4, 0.0, 0.2])
def seed(self, seed=None):
self.rng = np.random.default_rng(seed) if seed else np.random
def load_robot(self):
panda_urdf_path = urdfs_dir / "franka/panda_arm_hand.urdf"
self.arm = BtPandaArm(panda_urdf_path)
self.gripper = BtPandaGripper(self.arm)
self.model = KDLModel.from_urdf_file(
panda_urdf_path, self.arm.base_frame, self.arm.ee_frame
)
self.camera = BtCamera(320, 240, 0.96, 0.01, 1.0, self.arm.uid, 11)
def load_vgn(self, model_path):
self.vgn = VGN(model_path)
def reset(self):
valid = False
while not valid:
self.set_arm_configuration([0.0, -1.39, 0.0, -2.36, 0.0, 1.57, 0.79])
self.scene.clear()
q = self.scene.generate(self.rng)
self.set_arm_configuration(q)
uid = self.select_target()
bbox = self.get_target_bbox(uid)
valid = self.check_for_grasps(bbox)
return bbox
def set_arm_configuration(self, q):
for i, q_i in enumerate(q):
p.resetJointState(self.arm.uid, i, q_i, 0)
p.resetJointState(self.arm.uid, 9, 0.04, 0)
p.resetJointState(self.arm.uid, 10, 0.04, 0)
self.gripper.set_desired_width(0.4)
def select_target(self):
_, _, mask = self.camera.get_image()
uids, counts = np.unique(mask, return_counts=True)
mask = np.isin(uids, self.scene.object_uids) # remove ids of the floor, etc
uids, counts = uids[mask], counts[mask]
target_uid = uids[np.argmin(counts)]
p.changeVisualShape(target_uid, -1, rgbaColor=[1, 0, 0, 1])
return target_uid
def get_target_bbox(self, uid):
aabb_min, aabb_max = p.getAABB(uid)
return AABBox(aabb_min, aabb_max)
def check_for_grasps(self, bbox):
origin = Transform.from_translation(self.scene.origin)
origin.translation[2] -= 0.05
center = Transform.from_translation(self.scene.center)
# First, reconstruct the scene from many views
tsdf = UniformTSDFVolume(self.scene.length, 40)
r = 2.0 * self.scene.length
theta = np.pi / 4.0
phis = np.linspace(0.0, 2.0 * np.pi, 5)
for view in [view_on_sphere(center, r, theta, phi) for phi in phis]:
depth_img = self.camera.get_image(view)[1]
tsdf.integrate(depth_img, self.camera.intrinsic, view.inv() * origin)
voxel_size, tsdf_grid = tsdf.voxel_size, tsdf.get_grid()
# Then check whether VGN can find any grasps on the target
out = self.vgn.predict(tsdf_grid)
grasps, qualities = select_local_maxima(voxel_size, out, threshold=0.9)
# vis.scene_cloud(voxel_size, tsdf.get_scene_cloud())
# vis.grasps(grasps, qualities, 0.05)
# vis.show()
for grasp in grasps:
pose = origin * grasp.pose
tip = pose.rotation.apply([0, 0, 0.05]) + pose.translation
if bbox.is_inside(tip):
return True
return False
def step(self):
p.stepSimulation()
class Scene:
def __init__(self):
self.support_urdf = urdfs_dir / "plane/model.urdf"
self.support_uid = -1
self.object_uids = []
def clear(self):
self.remove_support()
self.remove_all_objects()
def generate(self, rng):
raise NotImplementedError
def add_support(self, pos):
self.support_uid = p.loadURDF(str(self.support_urdf), pos, globalScaling=0.3)
def remove_support(self):
p.removeBody(self.support_uid)
def add_object(self, urdf, ori, pos, scale=1.0):
uid = p.loadURDF(str(urdf), pos, ori.as_quat(), globalScaling=scale)
self.object_uids.append(uid)
return uid
def remove_object(self, uid):
p.removeBody(uid)
self.object_uids.remove(uid)
def remove_all_objects(self):
for uid in list(self.object_uids):
self.remove_object(uid)
class YamlScene(Scene):
def __init__(self, config_name):
super().__init__()
self.config_path = pkg_root / "cfg/sim" / config_name
def load_config(self):
self.scene = load_yaml(self.config_path)
self.center = np.asarray(self.scene["center"])
self.length = 0.3
self.origin = self.center - np.r_[0.5 * self.length, 0.5 * self.length, 0.0]
def generate(self, rng):
self.load_config()
self.add_support(self.center)
for object in self.scene["objects"]:
urdf = urdfs_dir / object["object_id"] / "model.urdf"
ori = Rotation.from_euler("xyz", object["rpy"], degrees=True)
pos = self.center + np.asarray(object["xyz"])
scale = object.get("scale", 1)
if randomize := object.get("randomize", False):
angle = rng.uniform(-randomize["rot"], randomize["rot"])
ori = Rotation.from_euler("z", angle, degrees=True) * ori
b = np.asarray(randomize["pos"])
pos += rng.uniform(-b, b)
self.add_object(urdf, ori, pos, scale)
for _ in range(60):
p.stepSimulation()
return self.scene["q"]
class RandomScene(Scene):
def __init__(self):
super().__init__()
self.center = np.r_[0.5, 0.0, 0.2]
self.length = 0.3
self.origin = self.center - np.r_[0.5 * self.length, 0.5 * self.length, 0.0]
self.object_urdfs = find_urdfs(urdfs_dir / "test")
def generate(self, rng, object_count=4, attempts=10):
self.add_support(self.center)
urdfs = rng.choice(self.object_urdfs, object_count)
for urdf in urdfs:
scale = rng.uniform(0.8, 1.0)
uid = self.add_object(urdf, Rotation.identity(), np.zeros(3), scale)
lower, upper = p.getAABB(uid)
z_offset = 0.5 * (upper[2] - lower[2]) + 0.002
state_id = p.saveState()
for _ in range(attempts):
# Try to place and check for collisions
ori = Rotation.from_euler("z", rng.uniform(0, 2 * np.pi))
pos = np.r_[rng.uniform(0.2, 0.8, 2) * self.length, z_offset]
p.resetBasePositionAndOrientation(uid, self.origin + pos, ori.as_quat())
p.stepSimulation()
if not p.getContactPoints(uid):
break
else:
p.restoreState(stateId=state_id)
else:
# No placement found, remove the object
self.remove_object(uid)
q = [0.0, -1.39, 0.0, -2.36, 0.0, 1.57, 0.79]
q += rng.uniform(-0.08, 0.08, 7)
return q
def get_scene(scene_id):
if scene_id.endswith(".yaml"):
return YamlScene(scene_id)
elif scene_id == "random":
return RandomScene()
else:
raise ValueError("Unknown scene {}.".format(scene_id))
| 8,353
| 35.008621
| 88
|
py
|
active_grasp-devel
|
active_grasp-devel/src/active_grasp/controller.py
|
from controller_manager_msgs.srv import *
import copy
import cv_bridge
from geometry_msgs.msg import Twist
import numpy as np
import rospy
from sensor_msgs.msg import Image
import trimesh
from .bbox import from_bbox_msg
from .timer import Timer
from active_grasp.srv import Reset, ResetRequest
from robot_helpers.ros import tf
from robot_helpers.ros.conversions import *
from robot_helpers.ros.panda import PandaArmClient, PandaGripperClient
from robot_helpers.ros.moveit import MoveItClient, create_collision_object_from_mesh
from robot_helpers.spatial import Rotation, Transform
from vgn.utils import look_at, cartesian_to_spherical, spherical_to_cartesian
class GraspController:
def __init__(self, policy):
self.policy = policy
self.load_parameters()
self.init_service_proxies()
self.init_robot_connection()
self.init_moveit()
self.init_camera_stream()
def load_parameters(self):
self.base_frame = rospy.get_param("~base_frame_id")
self.T_grasp_ee = Transform.from_list(rospy.get_param("~ee_grasp_offset")).inv()
self.cam_frame = rospy.get_param("~camera/frame_id")
self.depth_topic = rospy.get_param("~camera/depth_topic")
self.min_z_dist = rospy.get_param("~camera/min_z_dist")
self.control_rate = rospy.get_param("~control_rate")
self.linear_vel = rospy.get_param("~linear_vel")
self.policy_rate = rospy.get_param("policy/rate")
def init_service_proxies(self):
self.reset_env = rospy.ServiceProxy("reset", Reset)
self.switch_controller = rospy.ServiceProxy(
"controller_manager/switch_controller", SwitchController
)
def init_robot_connection(self):
self.arm = PandaArmClient()
self.gripper = PandaGripperClient()
topic = rospy.get_param("cartesian_velocity_controller/topic")
self.cartesian_vel_pub = rospy.Publisher(topic, Twist, queue_size=10)
def init_moveit(self):
self.moveit = MoveItClient("panda_arm")
rospy.sleep(1.0) # Wait for connections to be established.
self.moveit.move_group.set_planner_id("RRTstarkConfigDefault")
self.moveit.move_group.set_planning_time(3.0)
def switch_to_cartesian_velocity_control(self):
req = SwitchControllerRequest()
req.start_controllers = ["cartesian_velocity_controller"]
req.stop_controllers = ["position_joint_trajectory_controller"]
req.strictness = 1
self.switch_controller(req)
def switch_to_joint_trajectory_control(self):
req = SwitchControllerRequest()
req.start_controllers = ["position_joint_trajectory_controller"]
req.stop_controllers = ["cartesian_velocity_controller"]
req.strictness = 1
self.switch_controller(req)
def init_camera_stream(self):
self.cv_bridge = cv_bridge.CvBridge()
rospy.Subscriber(self.depth_topic, Image, self.sensor_cb, queue_size=1)
def sensor_cb(self, msg):
self.latest_depth_msg = msg
def run(self):
bbox = self.reset()
self.switch_to_cartesian_velocity_control()
with Timer("search_time"):
grasp = self.search_grasp(bbox)
if grasp:
self.switch_to_joint_trajectory_control()
with Timer("grasp_time"):
res = self.execute_grasp(grasp)
else:
res = "aborted"
return self.collect_info(res)
def reset(self):
Timer.reset()
self.moveit.scene.clear()
res = self.reset_env(ResetRequest())
rospy.sleep(1.0) # Wait for the TF tree to be updated.
return from_bbox_msg(res.bbox)
def search_grasp(self, bbox):
self.view_sphere = ViewHalfSphere(bbox, self.min_z_dist)
self.policy.activate(bbox, self.view_sphere)
timer = rospy.Timer(rospy.Duration(1.0 / self.control_rate), self.send_vel_cmd)
r = rospy.Rate(self.policy_rate)
while not self.policy.done:
img, pose, q = self.get_state()
self.policy.update(img, pose, q)
r.sleep()
rospy.sleep(0.2) # Wait for a zero command to be sent to the robot.
self.policy.deactivate()
timer.shutdown()
return self.policy.best_grasp
def get_state(self):
q, _ = self.arm.get_state()
msg = copy.deepcopy(self.latest_depth_msg)
img = self.cv_bridge.imgmsg_to_cv2(msg).astype(np.float32) * 0.001
pose = tf.lookup(self.base_frame, self.cam_frame, msg.header.stamp)
return img, pose, q
def send_vel_cmd(self, event):
if self.policy.x_d is None or self.policy.done:
cmd = np.zeros(6)
else:
x = tf.lookup(self.base_frame, self.cam_frame)
cmd = self.compute_velocity_cmd(self.policy.x_d, x)
self.cartesian_vel_pub.publish(to_twist_msg(cmd))
def compute_velocity_cmd(self, x_d, x):
r, theta, phi = cartesian_to_spherical(x.translation - self.view_sphere.center)
e_t = x_d.translation - x.translation
e_n = (x.translation - self.view_sphere.center) * (self.view_sphere.r - r) / r
linear = 1.0 * e_t + 6.0 * (r < self.view_sphere.r) * e_n
scale = np.linalg.norm(linear) + 1e-6
linear *= np.clip(scale, 0.0, self.linear_vel) / scale
angular = self.view_sphere.get_view(theta, phi).rotation * x.rotation.inv()
angular = 1.0 * angular.as_rotvec()
return np.r_[linear, angular]
def execute_grasp(self, grasp):
self.create_collision_scene()
T_base_grasp = self.postprocess(grasp.pose)
self.gripper.move(0.08)
T_base_approach = T_base_grasp * Transform.t_[0, 0, -0.06] * self.T_grasp_ee
success, plan = self.moveit.plan(T_base_approach, 0.2, 0.2)
if success:
self.moveit.scene.clear()
self.moveit.execute(plan)
rospy.sleep(0.5) # Wait for the planning scene to be updated
self.moveit.gotoL(T_base_grasp * self.T_grasp_ee)
rospy.sleep(0.5)
self.gripper.grasp()
T_base_retreat = Transform.t_[0, 0, 0.05] * T_base_grasp * self.T_grasp_ee
self.moveit.gotoL(T_base_retreat)
rospy.sleep(1.0) # Wait to see whether the object slides out of the hand
success = self.gripper.read() > 0.002
return "succeeded" if success else "failed"
else:
return "no_motion_plan_found"
def create_collision_scene(self):
# Segment support surface
cloud = self.policy.tsdf.get_scene_cloud()
cloud = cloud.transform(self.policy.T_base_task.as_matrix())
_, inliers = cloud.segment_plane(0.01, 3, 1000)
support_cloud = cloud.select_by_index(inliers)
cloud = cloud.select_by_index(inliers, invert=True)
# o3d.io.write_point_cloud(f"{time.time():.0f}.pcd", cloud)
# Add collision object for the support
self.add_collision_mesh("support", compute_convex_hull(support_cloud))
# Cluster cloud
labels = np.array(cloud.cluster_dbscan(eps=0.01, min_points=8))
# Generate convex collision objects for each segment
self.hulls = []
for label in range(labels.max() + 1):
segment = cloud.select_by_index(np.flatnonzero(labels == label))
try:
hull = compute_convex_hull(segment)
name = f"object_{label}"
self.add_collision_mesh(name, hull)
self.hulls.append(hull)
except:
# Qhull fails in some edge cases
pass
def add_collision_mesh(self, name, mesh):
frame, pose = self.base_frame, Transform.identity()
co = create_collision_object_from_mesh(name, frame, pose, mesh)
self.moveit.scene.add_object(co)
def postprocess(self, T_base_grasp):
rot = T_base_grasp.rotation
if rot.as_matrix()[:, 0][0] < 0: # Ensure that the camera is pointing forward
T_base_grasp.rotation = rot * Rotation.from_euler("z", np.pi)
T_base_grasp *= Transform.t_[0.0, 0.0, 0.01]
return T_base_grasp
def collect_info(self, result):
points = [p.translation for p in self.policy.views]
d = np.sum([np.linalg.norm(p2 - p1) for p1, p2 in zip(points, points[1:])])
info = {
"result": result,
"view_count": len(points),
"distance": d,
}
info.update(self.policy.info)
info.update(Timer.timers)
return info
def compute_convex_hull(cloud):
hull, _ = cloud.compute_convex_hull()
triangles, vertices = np.asarray(hull.triangles), np.asarray(hull.vertices)
return trimesh.base.Trimesh(vertices, triangles)
class ViewHalfSphere:
def __init__(self, bbox, min_z_dist):
self.center = bbox.center
self.r = 0.5 * bbox.size[2] + min_z_dist
def get_view(self, theta, phi):
eye = self.center + spherical_to_cartesian(self.r, theta, phi)
up = np.r_[1.0, 0.0, 0.0]
return look_at(eye, self.center, up)
def sample_view(self):
raise NotImplementedError
| 9,186
| 38.770563
| 88
|
py
|
active_grasp-devel
|
active_grasp-devel/src/active_grasp/__init__.py
|
from .policy import register
from .baselines import *
from .nbv import NextBestView
register("initial-view", InitialView)
register("top-view", TopView)
register("top-trajectory", TopTrajectory)
register("fixed-trajectory", FixedTrajectory)
register("nbv", NextBestView)
| 271
| 26.2
| 45
|
py
|
active_grasp-devel
|
active_grasp-devel/src/active_grasp/nbv.py
|
import itertools
from numba import jit
import numpy as np
import rospy
from .policy import MultiViewPolicy
from .timer import Timer
@jit(nopython=True)
def get_voxel_at(voxel_size, p):
index = (p / voxel_size).astype(np.int64)
return index if (index >= 0).all() and (index < 40).all() else None
# Note that the jit compilation takes some time the first time raycast is called
@jit(nopython=True)
def raycast(
voxel_size,
tsdf_grid,
ori,
pos,
fx,
fy,
cx,
cy,
u_min,
u_max,
v_min,
v_max,
t_min,
t_max,
t_step,
):
voxel_indices = []
for u in range(u_min, u_max):
for v in range(v_min, v_max):
direction = np.asarray([(u - cx) / fx, (v - cy) / fy, 1.0])
direction = ori @ (direction / np.linalg.norm(direction))
t, tsdf_prev = t_min, -1.0
while t < t_max:
p = pos + t * direction
t += t_step
index = get_voxel_at(voxel_size, p)
if index is not None:
i, j, k = index
tsdf = tsdf_grid[i, j, k]
if tsdf * tsdf_prev < 0 and tsdf_prev > -1: # crossed a surface
break
voxel_indices.append(index)
tsdf_prev = tsdf
return voxel_indices
class NextBestView(MultiViewPolicy):
def __init__(self):
super().__init__()
self.min_z_dist = rospy.get_param("~camera/min_z_dist")
self.max_views = rospy.get_param("nbv_grasp/max_views")
self.min_gain = rospy.get_param("nbv_grasp/min_gain")
self.downsample = rospy.get_param("nbv_grasp/downsample")
self.compile()
def compile(self):
# Trigger the JIT compilation
raycast(
1.0,
np.zeros((40, 40, 40), dtype=np.float32),
np.eye(3),
np.zeros(3),
1.0,
1.0,
1.0,
1.0,
0,
1,
0,
1,
0.0,
1.0,
0.1,
)
def activate(self, bbox, view_sphere):
super().activate(bbox, view_sphere)
def update(self, img, x, q):
if len(self.views) > self.max_views or self.best_grasp_prediction_is_stable():
self.done = True
else:
with Timer("state_update"):
self.integrate(img, x, q)
with Timer("view_generation"):
views = self.generate_views(q)
with Timer("ig_computation"):
gains = [self.ig_fn(v, self.downsample) for v in views]
with Timer("cost_computation"):
costs = [self.cost_fn(v) for v in views]
utilities = gains / np.sum(gains) - costs / np.sum(costs)
self.vis.ig_views(self.base_frame, self.intrinsic, views, utilities)
i = np.argmax(utilities)
nbv, gain = views[i], gains[i]
if gain < self.min_gain and len(self.views) > self.T:
self.done = True
self.x_d = nbv
def best_grasp_prediction_is_stable(self):
if self.best_grasp:
t = (self.T_task_base * self.best_grasp.pose).translation
i, j, k = (t / self.tsdf.voxel_size).astype(int)
qs = self.qual_hist[:, i, j, k]
if np.count_nonzero(qs) == self.T and np.mean(qs) > 0.9:
return True
return False
def generate_views(self, q):
thetas = np.deg2rad([15, 30])
phis = np.arange(8) * np.deg2rad(45)
view_candidates = []
for theta, phi in itertools.product(thetas, phis):
view = self.view_sphere.get_view(theta, phi)
if self.solve_cam_ik(q, view):
view_candidates.append(view)
return view_candidates
def ig_fn(self, view, downsample):
tsdf_grid, voxel_size = self.tsdf.get_grid(), self.tsdf.voxel_size
tsdf_grid = -1.0 + 2.0 * tsdf_grid # Open3D maps tsdf to [0,1]
# Downsample the sensor resolution
fx = self.intrinsic.fx / downsample
fy = self.intrinsic.fy / downsample
cx = self.intrinsic.cx / downsample
cy = self.intrinsic.cy / downsample
# Project bbox onto the image plane to get better bounds
T_cam_base = view.inv()
corners = np.array([T_cam_base.apply(p) for p in self.bbox.corners]).T
u = (fx * corners[0] / corners[2] + cx).round().astype(int)
v = (fy * corners[1] / corners[2] + cy).round().astype(int)
u_min, u_max = u.min(), u.max()
v_min, v_max = v.min(), v.max()
t_min = 0.0 # self.min_z_dist
t_max = corners[2].max() # This bound might be a bit too short
t_step = np.sqrt(3) * voxel_size # Could be replaced with line rasterization
# Cast rays from the camera view (we'll work in the task frame from now on)
view = self.T_task_base * view
ori, pos = view.rotation.as_matrix(), view.translation
voxel_indices = raycast(
voxel_size,
tsdf_grid,
ori,
pos,
fx,
fy,
cx,
cy,
u_min,
u_max,
v_min,
v_max,
t_min,
t_max,
t_step,
)
# Count rear side voxels within the bounding box
indices = np.unique(voxel_indices, axis=0)
bbox_min = self.T_task_base.apply(self.bbox.min) / voxel_size
bbox_max = self.T_task_base.apply(self.bbox.max) / voxel_size
mask = np.array([((i > bbox_min) & (i < bbox_max)).all() for i in indices])
i, j, k = indices[mask].T
tsdfs = tsdf_grid[i, j, k]
ig = np.logical_and(tsdfs > -1.0, tsdfs < 0.0).sum()
return ig
def cost_fn(self, view):
return 1.0
| 5,897
| 30.881081
| 86
|
py
|
active_grasp-devel
|
active_grasp-devel/test/test_sim_scene.py
|
from active_grasp.simulation import Simulation
def main():
gui = True
scene_id = "random"
vgn_path = "../vgn/assets/models/vgn_conv.pth"
sim = Simulation(gui, scene_id, vgn_path)
while True:
sim.reset()
if __name__ == "__main__":
main()
| 273
| 17.266667
| 50
|
py
|
active_grasp-devel
|
active_grasp-devel/test/test_clustering.py
|
import matplotlib.pyplot as plt
import numpy as np
import open3d as o3d
def main():
cloud_file = "1636465097.pcd"
# eps, min_points = 0.02, 10
eps, min_points = 0.01, 8
cloud = o3d.io.read_point_cloud(cloud_file)
labels = np.array(cloud.cluster_dbscan(eps=eps, min_points=min_points))
max_label = labels.max()
print(f"point cloud has {max_label + 1} clusters")
colors = plt.get_cmap("tab20")(labels / (max_label if max_label > 0 else 1))
colors[labels < 0] = 0
cloud.colors = o3d.utility.Vector3dVector(colors[:, :3])
o3d.visualization.draw_geometries([cloud])
if __name__ == "__main__":
main()
| 650
| 25.04
| 80
|
py
|
active_grasp-devel
|
active_grasp-devel/scripts/calibrate_roi.py
|
#!/usr/bin/env python3
import numpy as np
import rospy
from robot_helpers.ros import tf
def main():
rospy.init_node("calibrate_roi")
tf.init()
T_base_roi = tf.lookup("panda_link0", "tag_0")
np.savetxt("cfg/hw/T_base_tag.txt", T_base_roi.as_matrix())
if __name__ == "__main__":
main()
| 310
| 16.277778
| 63
|
py
|
active_grasp-devel
|
active_grasp-devel/scripts/hw_node.py
|
#!/usr/bin/env python3
from controller_manager_msgs.srv import *
import geometry_msgs.msg
import numpy as np
import rospy
from active_grasp.bbox import AABBox, to_bbox_msg
from active_grasp.rviz import Visualizer
from active_grasp.srv import *
from robot_helpers.io import load_yaml
from robot_helpers.ros.conversions import to_pose_msg
from robot_helpers.ros.moveit import MoveItClient
from robot_helpers.ros.panda import PandaGripperClient
from robot_helpers.spatial import Transform
class HwNode:
def __init__(self):
self.load_parameters()
self.init_robot_connection()
self.init_visualizer()
self.advertise_services()
rospy.spin()
def load_parameters(self):
self.cfg = rospy.get_param("hw")
self.T_base_roi = Transform.from_matrix(np.loadtxt(self.cfg["roi_calib_file"]))
def init_robot_connection(self):
self.gripper = PandaGripperClient()
self.switch_controller = rospy.ServiceProxy(
"controller_manager/switch_controller", SwitchController
)
self.moveit = MoveItClient("panda_arm")
rospy.Timer(rospy.Duration(1), self.publish_table_co)
def init_visualizer(self):
self.vis = Visualizer()
rospy.Timer(rospy.Duration(1), self.draw_bbox)
def advertise_services(self):
rospy.Service("seed", Seed, self.seed)
rospy.Service("reset", Reset, self.reset)
def seed(self, req):
self.rng = np.random.default_rng(req.seed)
rospy.loginfo(f"Seeded the rng with {req.seed}.")
return SeedResponse()
def reset(self, req):
q0, bbox = self.load_config()
# Move to the initial configuration
self.switch_to_joint_trajectory_controller()
q0 += self.rng.uniform(-0.069, 0.069, 7)
self.moveit.goto(q0, velocity_scaling=0.4)
self.gripper.move(0.08)
return ResetResponse(to_bbox_msg(bbox))
def load_config(self):
scene_config = load_yaml(self.cfg["scene_file"])
q0 = scene_config["q0"]
bbox_min = self.T_base_roi.apply(scene_config["target"]["min"])
bbox_max = self.T_base_roi.apply(scene_config["target"]["max"])
bbox = AABBox(bbox_min, bbox_max)
return q0, bbox
def switch_to_joint_trajectory_controller(self):
req = SwitchControllerRequest()
req.start_controllers = ["position_joint_trajectory_controller"]
req.stop_controllers = ["cartesian_velocity_controller"]
req.strictness = 1
self.switch_controller(req)
def draw_bbox(self, event):
_, bbox = self.load_config()
self.vis.bbox("panda_link0", bbox)
def publish_table_co(self, event):
msg = geometry_msgs.msg.PoseStamped()
msg.header.frame_id = "panda_link0"
msg.pose = to_pose_msg(self.T_base_roi * Transform.t_[0.15, 0.15, 0.005])
self.moveit.scene.add_box("table", msg, size=(0.8, 0.8, 0.01))
def main():
rospy.init_node("hw")
HwNode()
if __name__ == "__main__":
main()
| 3,038
| 30.989474
| 87
|
py
|
active_grasp-devel
|
active_grasp-devel/scripts/run.py
|
#!/usr/bin/env python3
import argparse
from datetime import datetime
import pandas as pd
from pathlib import Path
import rospy
from tqdm import tqdm
from active_grasp.controller import *
from active_grasp.policy import make, registry
from active_grasp.srv import Seed
from robot_helpers.ros import tf
def main():
rospy.init_node("grasp_controller")
tf.init()
parser = create_parser()
args = parser.parse_args()
policy = make(args.policy)
controller = GraspController(policy)
logger = Logger(args)
seed_simulation(args.seed)
rospy.sleep(1.0) # Prevents a rare race condiion
for _ in tqdm(range(args.runs), disable=args.wait_for_input):
if args.wait_for_input:
controller.gripper.move(0.08)
controller.switch_to_joint_trajectory_control()
controller.moveit.goto("ready", velocity_scaling=0.4)
i = input("Run policy? [y/n] ")
if i != "y":
exit()
rospy.loginfo("Running policy ...")
info = controller.run()
logger.log_run(info)
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument("policy", type=str, choices=registry.keys())
parser.add_argument("--runs", type=int, default=10)
parser.add_argument("--wait-for-input", action="store_true")
parser.add_argument("--logdir", type=Path, default="logs")
parser.add_argument("--seed", type=int, default=1)
return parser
class Logger:
def __init__(self, args):
args.logdir.mkdir(parents=True, exist_ok=True)
stamp = datetime.now().strftime("%y%m%d-%H%M%S")
name = "{}_policy={},seed={}.csv".format(
stamp,
args.policy,
args.seed,
)
self.path = args.logdir / name
def log_run(self, info):
df = pd.DataFrame.from_records([info])
df.to_csv(self.path, mode="a", header=not self.path.exists(), index=False)
def seed_simulation(seed):
rospy.ServiceProxy("seed", Seed)(seed)
rospy.sleep(1.0)
if __name__ == "__main__":
main()
| 2,090
| 26.513158
| 82
|
py
|
active_grasp-devel
|
active_grasp-devel/scripts/bt_sim_node.py
|
#!/usr/bin/env python3
from actionlib import SimpleActionServer
import control_msgs.msg as control_msgs
from controller_manager_msgs.srv import *
import cv_bridge
from franka_msgs.msg import FrankaState, ErrorRecoveryAction
from franka_gripper.msg import *
from geometry_msgs.msg import Twist
import numpy as np
import rospy
from sensor_msgs.msg import JointState, Image, CameraInfo
from scipy import interpolate
from std_msgs.msg import Header
from threading import Thread
from active_grasp.bbox import to_bbox_msg
from active_grasp.srv import *
from active_grasp.simulation import Simulation
from robot_helpers.ros.conversions import *
from vgn.simulation import apply_noise
class BtSimNode:
def __init__(self):
gui = rospy.get_param("~gui")
scene_id = rospy.get_param("~scene")
vgn_path = rospy.get_param("vgn/model")
self.sim = Simulation(gui, scene_id, vgn_path)
self.init_plugins()
self.advertise_services()
def init_plugins(self):
self.plugins = [
PhysicsPlugin(self.sim),
RobotStatePlugin(self.sim.arm, self.sim.gripper),
MoveActionPlugin(self.sim.gripper),
GraspActionPlugin(self.sim.gripper),
GripperActionPlugin(),
CameraPlugin(self.sim.camera),
MockActionsPlugin(),
]
self.controllers = {
"cartesian_velocity_controller": CartesianVelocityControllerPlugin(
self.sim.arm, self.sim.model
),
"position_joint_trajectory_controller": JointTrajectoryControllerPlugin(
self.sim.arm
),
}
def start_plugins(self):
for plugin in self.plugins + list(self.controllers.values()):
plugin.thread.start()
def activate_plugins(self):
for plugin in self.plugins:
plugin.activate()
def deactivate_plugins(self):
for plugin in self.plugins:
plugin.deactivate()
def deactivate_controllers(self):
for controller in self.controllers.values():
controller.deactivate()
def advertise_services(self):
rospy.Service("seed", Seed, self.seed)
rospy.Service("reset", Reset, self.reset)
rospy.Service(
"/controller_manager/switch_controller",
SwitchController,
self.switch_controller,
)
def seed(self, req):
self.sim.seed(req.seed)
rospy.loginfo(f"Seeded the rng with {req.seed}.")
return SeedResponse()
def reset(self, req):
self.deactivate_plugins()
self.deactivate_controllers()
rospy.sleep(1.0) # TODO replace with a read-write lock
bbox = self.sim.reset()
self.activate_plugins()
return ResetResponse(to_bbox_msg(bbox))
def switch_controller(self, req):
for controller in req.stop_controllers:
self.controllers[controller].deactivate()
for controller in req.start_controllers:
self.controllers[controller].activate()
return SwitchControllerResponse(ok=True)
def run(self):
self.start_plugins()
self.activate_plugins()
rospy.spin()
class Plugin:
"""A plugin that spins at a constant rate in its own thread."""
def __init__(self, rate):
self.rate = rate
self.thread = Thread(target=self.loop, daemon=True)
self.is_running = False
def activate(self):
self.is_running = True
def deactivate(self):
self.is_running = False
def loop(self):
rate = rospy.Rate(self.rate)
while not rospy.is_shutdown():
if self.is_running:
self.update()
rate.sleep()
def update(self):
raise NotImplementedError
class PhysicsPlugin(Plugin):
def __init__(self, sim):
super().__init__(sim.rate)
self.sim = sim
def update(self):
self.sim.step()
class RobotStatePlugin(Plugin):
def __init__(self, arm, gripper, rate=30):
super().__init__(rate)
self.arm = arm
self.gripper = gripper
self.arm_state_pub = rospy.Publisher(
"/franka_state_controller/franka_states", FrankaState, queue_size=10
)
self.gripper_state_pub = rospy.Publisher(
"/franka_gripper/joint_states", JointState, queue_size=10
)
self.joint_states_pub = rospy.Publisher(
"joint_states", JointState, queue_size=10
)
def update(self):
q, dq = self.arm.get_state()
width = self.gripper.read()
header = Header(stamp=rospy.Time.now())
msg = FrankaState(header=header, q=q, dq=dq)
self.arm_state_pub.publish(msg)
msg = JointState(header=header)
msg.name = ["panda_finger_joint1", "panda_finger_joint2"]
msg.position = [0.5 * width, 0.5 * width]
self.gripper_state_pub.publish(msg)
msg = JointState(header=header)
msg.name = ["panda_joint{}".format(i) for i in range(1, 8)] + [
"panda_finger_joint1",
"panda_finger_joint2",
]
msg.position = np.r_[q, 0.5 * width, 0.5 * width]
self.joint_states_pub.publish(msg)
class CartesianVelocityControllerPlugin(Plugin):
def __init__(self, arm, model, rate=30):
super().__init__(rate)
self.arm = arm
self.model = model
topic = rospy.get_param("cartesian_velocity_controller/topic")
rospy.Subscriber(topic, Twist, self.target_cb)
def target_cb(self, msg):
self.dx_d = from_twist_msg(msg)
def activate(self):
self.dx_d = np.zeros(6)
self.is_running = True
def deactivate(self):
self.dx_d = np.zeros(6)
self.is_running = False
self.arm.set_desired_joint_velocities(np.zeros(7))
def update(self):
q, _ = self.arm.get_state()
J_pinv = np.linalg.pinv(self.model.jacobian(q))
cmd = np.dot(J_pinv, self.dx_d)
self.arm.set_desired_joint_velocities(cmd)
class JointTrajectoryControllerPlugin(Plugin):
def __init__(self, arm, rate=30):
super().__init__(rate)
self.arm = arm
self.dt = 1.0 / self.rate # TODO this might not be reliable
self.init_action_server()
def init_action_server(self):
name = "position_joint_trajectory_controller/follow_joint_trajectory"
self.action_server = SimpleActionServer(
name, control_msgs.FollowJointTrajectoryAction, auto_start=False
)
self.action_server.register_goal_callback(self.action_goal_cb)
self.action_server.start()
def action_goal_cb(self):
goal = self.action_server.accept_new_goal()
self.interpolate_trajectory(goal.trajectory.points)
self.elapsed_time = 0.0
def interpolate_trajectory(self, points):
t, y = np.zeros(len(points)), np.zeros((7, len(points)))
for i, point in enumerate(points):
t[i] = point.time_from_start.to_sec()
y[:, i] = point.positions
self.m = interpolate.interp1d(t, y)
self.duration = t[-1]
def update(self):
if self.action_server.is_active():
self.elapsed_time += self.dt
if self.elapsed_time > self.duration:
self.action_server.set_succeeded()
return
self.arm.set_desired_joint_positions(self.m(self.elapsed_time))
class MoveActionPlugin(Plugin):
def __init__(self, gripper, rate=10):
super().__init__(rate)
self.gripper = gripper
self.dt = 1.0 / self.rate
self.init_action_server()
def init_action_server(self):
name = "/franka_gripper/move"
self.action_server = SimpleActionServer(name, MoveAction, auto_start=False)
self.action_server.register_goal_callback(self.action_goal_cb)
self.action_server.start()
def action_goal_cb(self):
self.elapsed_time = 0.0
goal = self.action_server.accept_new_goal()
self.gripper.set_desired_width(goal.width)
def update(self):
if self.action_server.is_active():
self.elapsed_time += self.dt
if self.elapsed_time > 1.0:
self.action_server.set_succeeded()
class GraspActionPlugin(Plugin):
def __init__(self, gripper, rate=10):
super().__init__(rate)
self.gripper = gripper
self.dt = 1.0 / self.rate
self.force = rospy.get_param("~gripper_force")
self.init_action_server()
def init_action_server(self):
name = "/franka_gripper/grasp"
self.action_server = SimpleActionServer(name, GraspAction, auto_start=False)
self.action_server.register_goal_callback(self.action_goal_cb)
self.action_server.start()
def action_goal_cb(self):
self.elapsed_time = 0.0
goal = self.action_server.accept_new_goal()
self.gripper.set_desired_speed(-0.1, force=self.force)
def update(self):
if self.action_server.is_active():
self.elapsed_time += self.dt
if self.elapsed_time > 1.0:
self.action_server.set_succeeded()
class GripperActionPlugin(Plugin):
"""Empty action server to make MoveIt happy"""
def __init__(self, rate=1):
super().__init__(rate)
self.init_action_server()
def init_action_server(self):
name = "/franka_gripper/gripper_action"
self.action_server = SimpleActionServer(
name, control_msgs.GripperCommandAction, auto_start=False
)
self.action_server.register_goal_callback(self.action_goal_cb)
self.action_server.start()
def action_goal_cb(self):
self.action_server.accept_new_goal()
def update(self):
if self.action_server.is_active():
self.action_server.set_succeeded()
class CameraPlugin(Plugin):
def __init__(self, camera, name="camera", rate=5):
super().__init__(rate)
self.camera = camera
self.name = name
self.cam_noise = rospy.get_param("~cam_noise", False)
self.cv_bridge = cv_bridge.CvBridge()
self.init_publishers()
def init_publishers(self):
topic = self.name + "/depth/camera_info"
self.info_pub = rospy.Publisher(topic, CameraInfo, queue_size=10)
topic = self.name + "/depth/image_rect_raw"
self.depth_pub = rospy.Publisher(topic, Image, queue_size=10)
def update(self):
stamp = rospy.Time.now()
msg = to_camera_info_msg(self.camera.intrinsic)
msg.header.frame_id = self.name + "_optical_frame"
msg.header.stamp = stamp
self.info_pub.publish(msg)
_, depth, _ = self.camera.get_image()
if self.cam_noise:
depth = apply_noise(depth)
msg = self.cv_bridge.cv2_to_imgmsg((1000 * depth).astype(np.uint16))
msg.header.stamp = stamp
self.depth_pub.publish(msg)
class MockActionsPlugin(Plugin):
def __init__(self):
super().__init__(1)
self.init_recovery_action_server()
self.init_homing_action_server()
def init_homing_action_server(self):
self.homing_as = SimpleActionServer(
"/franka_gripper/homing", HomingAction, auto_start=False
)
self.homing_as.register_goal_callback(self.action_goal_cb)
self.homing_as.start()
def init_recovery_action_server(self):
self.recovery_as = SimpleActionServer(
"/franka_control/error_recovery", ErrorRecoveryAction, auto_start=False
)
self.recovery_as.register_goal_callback(self.action_goal_cb)
self.recovery_as.start()
def action_goal_cb(self):
pass
def update(self):
pass
def main():
rospy.init_node("bt_sim")
server = BtSimNode()
server.run()
if __name__ == "__main__":
main()
| 11,903
| 30.326316
| 84
|
py
|
hurricast
|
hurricast-master/utils/data_processing.py
|
from __future__ import print_function
import pandas as pd
import math
import torch
import numpy as np
import warnings
warnings.filterwarnings('ignore')
dtype = torch.float
device = torch.device("cpu")
#allows to keep only specific columns
def select_data(data):
return data[['SID', 'NUMBER', 'ISO_TIME', 'LAT', 'LON', 'WMO_WIND', 'WMO_PRES', 'DIST2LAND', 'STORM_SPEED']]#, 'STORM_DIR', 'BASIN', 'NATURE']]
#convert columns to numeric values
#and interpolate missing values
def numeric_data(data):
for i in ['LAT', 'LON', 'WMO_WIND', 'WMO_PRES', 'DIST2LAND', 'STORM_SPEED']:
data[i]=pd.to_numeric(data[i],errors='coerce').astype('float64')
data[i]=data[i].interpolate(method='linear')
return data
#to have one-hot encoding of basin and nature of the storm
def add_one_hot(data, df0):
basin = pd.get_dummies(data['BASIN'],prefix='basin')
basin.drop(columns=['basin_ '], inplace = True)
nature = pd.get_dummies(data['NATURE'],prefix='nature')
nature.drop('nature_ ', axis=1, inplace = True)
frames = [df0, basin, nature]
df0 = pd.concat(frames, axis = 1)
print("Basin and Nature of the storm are now added and one-hot.")
return df0
#This code allows to get the maximum wind change in the last X hours.
def get_max_change(data, time, i):
t = time//3
try:
val = max(data['WMO_WIND'][i-t:i])-min(data['WMO_WIND'][i-t:i])
except:
val = 'NaN'
return val
#please specify a multiple of 3h for the time
def get_max_wind_change(data, time):
df = data
df['max_wind_change']=[get_max_change(data, time, i) for i in range(len(data))]
return df
#to use in the future: computes the wind category
def sust_wind_to_cat_one_hot(wind):
# maximum sustained wind in kt (knot)
if wind<=33: cat='TD' # <=33
elif wind<=63.: cat='TS'
elif wind <=82.: cat='H1'
elif wind <=95.: cat='H2'
elif wind <=112.: cat='H3'
elif wind <=136.: cat='H4'
elif wind > 136. : cat='H5'
else: cat = 'nan'
return cat
def sust_wind_to_cat_val(wind):
# maximum sustained wind in kt (knot)
if wind<=33: cat= 0 # <=33
elif wind<=63.: cat=1
elif wind <=82.: cat=2
elif wind <=95.: cat=3
elif wind <=112.: cat=4
elif wind <=136.: cat=5
elif wind > 136. : cat=6
else: cat = 0
return cat
def add_storm_category_one_hot(data):
df = pd.DataFrame()
df['storm_category'] = [sust_wind_to_cat_one_hot(data['WMO_WIND'][i]) for i in range(len(data))]
storm_cat = pd.get_dummies(df['storm_category'],prefix='storm_category')
#storm_cat
storm_cat.drop('storm_category_nan', axis=1, inplace=True)
frames = [data, storm_cat]
df0 = pd.concat(frames, axis = 1)
#df0.drop('storm_category', axis=1)
print("Storm category is now added and one-hot.")
return df0
def add_storm_category_val(data):
df = pd.DataFrame()
df['storm_category'] = [sust_wind_to_cat_val(data['WMO_WIND'][i]) for i in range(len(data))]
frames = [data, df]
df0 = pd.concat(frames, axis = 1)
#df0.drop('storm_category', axis=1)
return df0
def sort_storm(data, min_wind, min_steps = 5, max_steps = 120):
'''function to create dictionary of storm matrices
arguments:
data we want to cut
min_wind: the minimum wind speed to store data
'''
#get unique storm_id:
SID=pd.unique(data['SID']).tolist()
#remove empty SID
#if not dropna: SID.remove(' ')
#create empty dictionary
dict0={}
ind = 0
for i in range(len(SID)):
#get data of a particular SID
M = data.loc[data['SID'] == SID[i]]
#cut off using min wind speed
#TODO : cut everything before, ie look for the right date
try:
t = M.index[M['WMO_WIND']>= min_wind][0]
t0 = M.index[0]
except:
t = 0
N = M.loc[M['WMO_WIND'] >= min_wind]
#save matrix in dict0
if N.shape[0] > min_steps:
ind+=1
dict0.update({ind:M.iloc[t-t0:max_steps+t-t0]})
print("The dictionary of storms has been created.")
return dict0
#Geographical difference features: i.e. feature_1(t) = feature(t)-feature(0)
# features: LAT, LON, DIST2LAND
def geo_diff(dict0):
dict1={}
#loop over each dataframe
for i in dict0:
df=dict0[i]
#reset index
df.reset_index(inplace=True, drop=True)
#calculate difference from t=0
df['LAT_1']= df['LAT'] - df['LAT'][0]
df['LON_1']= df['LON'] - df['LON'][0]
df['DIST2LAND_1']= df['DIST2LAND'] - df['DIST2LAND'][0]
#substitute back to the dictionary
dict1[i]=df
return dict1
#instead of padding with 0, pad with latest values in loop
def pad_traj(dict0, max_steps, nan = False):
dict1={}
for t in dict0:
num_steps = dict0[t].shape[0]
steps2add = max_steps - num_steps
if steps2add > 0:
if nan:
dict1[t] = pd.concat([dict0[t], pd.DataFrame([[np.nan] * dict0[t].shape[1]]*steps2add, columns=dict0[t].columns)], ignore_index=True)
else:
dict1[t] = pd.concat([dict0[t], pd.DataFrame([[0] * dict0[t].shape[1]]*steps2add, columns=dict0[t].columns)], ignore_index=True)
#In fact it happens to be easier to make the change afterwards with repad
#dict1[t] = pd.concat([dict0[t], pd.DataFrame([dict0[t].tail(1)]*steps2add, columns=dict0[t].columns)], ignore_index=True)
else:
dict1[t] = dict0[t][:max_steps]
print("The trajectories have now been padded.")
return dict1
def get_distance_km(lon1, lat1, lon2, lat2):
'''
Using haversine formula (https://www.movable-type.co.uk/scripts/latlong.html)
'''
R=6371e3 # meters (earth's radius)
phi_1=math.radians(lat1)
phi_2 = math.radians(lat2)
delta_phi=math.radians(lat2-lat1)
delta_lambda=math.radians(lon2-lon1)
a=np.power(math.sin(delta_phi/2),2) + math.cos(phi_1)*math.cos(phi_2)\
* np.power(math.sin(delta_lambda/2),2)
c= 2 * math.atan2(math.sqrt(a),math.sqrt(1-a))
return R*c/1000.
#compute the displacement from t=0
def add_displacement_distance(dict0):
dict1={}
#loop over each dataframe
for i in dict0:
df=dict0[i]
#reset index
df.reset_index(inplace=True, drop=True)
#calculate difference from t=0
df['DISPLACEMENT'] = 0
for j in range(1,len(df)):
d = get_distance_km(df['LON'][j-1], df['LAT'][j-1], df['LON'][j], df['LAT'][j])
if d > 500: d=0
df['DISPLACEMENT'][j] = d
dict1[i]=df
return dict1
def add_displacement_lat_lon2(dict0):
dict1={}
#loop over each dataframe
for i in dict0:
df=dict0[i]
#reset index
df.reset_index(inplace=True, drop=True)
lst_lat = [0]
lst_lon = [0]
for j in range(1,len(df)):
d_lat = df['LAT'][j] - df['LAT'][j-1]
d_lon = df['LON'][j] - df['LON'][j-1]
lst_lat.append(d_lat)
lst_lon.append(d_lon)
df['DISPLACEMENT_LAT'] = lst_lat
df['DISPLACEMENT_LON'] = lst_lon
dict1[i]=df
return dict1
#function to calculate tensor shape
#input: dictionary of storm data
def tensor_shape(dict0):
#number of storms
num_storms=len(dict0) - 1
#number of features
num_features=dict0[next(iter(dict0))].shape[1]
#to compute min and max number of steps
t_max = 0 #initialise
t_min = 1000
t_hist = []
for i in dict0:
t0 = dict0[i].shape[0]
t_hist.append(t0)
if t0 > t_max:
t_max = t0
if t0 < t_min:
t_min = t0
print("There are %s storms with %s features, and maximum number of steps is %s and minimum is %s." %(num_storms,num_features,t_max, t_min))
return num_storms, num_features, t_max, t_min, t_hist
#create a tensor
def create_tensor(data, number_of_storms):
tensor = data[1]
for i in range(2,number_of_storms,1):
tensor=np.dstack((tensor, data[i]))
#return list of features
p_list = data[1].columns.tolist()
print("The tensor has now been created.")
return tensor, p_list
def repad(t):
for i in range(t.shape[0]):
if t[i][2][-1] == 0:
ind = np.argmin(t[i][2])
for j in range(ind,t.shape[2]):
t[i,:,j]=t[i,:,ind-1]
return t
def prepare_data(path = "/data/ibtracs.last3years.list.v04r00.csv", max_wind_change = 12, min_wind = 50, min_steps = 15, max_steps = 120, secondary = False, one_hot=False, dropna = False):
data = pd.read_csv(path)
#select interesting columns
df0 = select_data(data)
#transform data from String to numeric
df0 = numeric_data(df0)
#if dropna: df0 = df0.dropna()
#add one_hot columns:
if one_hot:
#add one-hot storm category
#df0 = add_storm_category_val(df0)
df0 = add_storm_category_one_hot(df0)
#transform basin and nature of the storm into one-hot vector
df0 = add_one_hot(data, df0)
if secondary:
#add the max-wind-change column
df0 = get_max_wind_change(df0, max_wind_change)
#get a dict with the storms with a windspeed greater to a threshold
storms = sort_storm(df0, min_wind, min_steps)
#pad the trajectories to a fix length
d = pad_traj(storms, max_steps)
#print(d)
if secondary:
#d = add_displacement_distance(d)
d = add_displacement_lat_lon2(d)
#print the shape of the tensor
m, n, t_max, t_min, t_hist = tensor_shape(d)
#create the tensor
t, p_list = create_tensor(d, m)
#delete id and number of the storms
t2 = torch.Tensor(t[:,3:,:].astype('float64'))
#match feature list
p_list = p_list[3:]
#transpose time and sample
t3 = torch.transpose(t2,0,2)
#replace 0 by latest values in the tensor
t3 = repad(t3)
return t3, p_list
def prepare_data2(path = "./data/ibtracs.last3years.list.v04r00.csv", max_wind_change = 12, min_wind = 50, min_steps = 15, max_steps = 120, secondary = False, one_hot=False, dropna = False):
data = pd.read_csv(path)
#select interesting columns
df0 = select_data(data)
#transform data from String to numeric
df0 = numeric_data(df0)
#if dropna: df0 = df0.dropna()
#add one_hot columns:
if one_hot:
#add one-hot storm category
#df0 = add_storm_category_val(df0)
df0 = add_storm_category_one_hot(df0)
#transform basin and nature of the storm into one-hot vector
df0 = add_one_hot(data, df0)
if secondary:
#add the max-wind-change column
df0 = get_max_wind_change(df0, max_wind_change)
#get a dict with the storms with a windspeed greater to a threshold
storms = sort_storm(df0, min_wind, min_steps)
#pad the trajectories to a fix length
d = pad_traj(storms, max_steps)
#print(d)
if secondary:
#d = add_displacement_distance(d)
d = add_displacement_lat_lon2(d)
#print the shape of the tensor
m, n, t_max, t_min, t_hist = tensor_shape(d)
#create the tensor
t, p_list = create_tensor(d, m)
return t[:,2:5,:]
def prepare_tabular_data_vision(path="./data/ibtracs.last3years.list.v04r00.csv", min_wind=50, min_steps=15,
max_steps=120, get_displacement=True):
data = pd.read_csv(path)
# select interesting columns
df0 = select_data(data)
# transform data from String to numeric
df0 = numeric_data(df0)
df0 = df0[['SID', 'ISO_TIME', 'LAT', 'LON', 'WMO_WIND', 'WMO_PRES']]
# get a dict with the storms with a windspeed and number of timesteps greater to a threshold
storms = sort_storm(df0, min_wind, min_steps)
# pad the trajectories to a fix length
d = pad_traj(storms, max_steps)
# print(d)
if get_displacement:
d = add_displacement_lat_lon2(d)
# print the shape of the tensor
m, n, t_max, t_min, t_hist = tensor_shape(d)
# create the tensor
t, p_list = create_tensor(d, m)
#put t in format storm * timestep * features
e = t.transpose((2, 0, 1))
for tt in e:
try:
tt[0] = datetime.strptime(tt[0], "%Y-%m-%d %H:%M:%S")
except:
pass
return e[:, :, 1:], d
| 12,412
| 30.585242
| 190
|
py
|
hurricast
|
hurricast-master/utils/__init__.py
|
from . import data_processing
from . import utils_vision_data
| 62
| 20
| 31
|
py
|
hurricast
|
hurricast-master/utils/utils_vision_data.py
|
import cdsapi
import numpy as np
import netCDF4
import matplotlib.pyplot as plt
from datetime import datetime
from utils.data_processing import *
import os
import warnings; warnings.simplefilter('ignore')
#All the following functions are used for processing vision data from ERA5
def process_netcdf(filepath, param):
'''
input: netcdf filepath and the specific corresponding parameter in str format (eg. 'z', 'u', 'v'...)
'''
nc = netCDF4.Dataset(filepath, mode='r')
nc.variables.keys()
lat = nc.variables['latitude'][:]
lon = nc.variables['longitude'][:]
time_var = nc.variables['time']
dtime = netCDF4.num2date(time_var[:],time_var.units)
grid = nc.variables[param][:]
#transform into np.array format and reshape something in (1,grid_size,grid_size) into (grid_size,grid_size)
grid = np.array(grid).reshape(grid.shape[1],grid.shape[2], grid.shape[3])
return grid
def get_storms(extraction = False, min_wind = 30, min_steps= 20, max_steps=60, path = "ibtracs.since1980.list.v04r00.csv"):
'''
returns an array of elements of type [datetime, lat, lon]
set extraction to True if used for downloading data and False if used to convert netcdf files to tensor
'''
data = prepare_data2(path = path, min_wind = min_wind, min_steps= min_steps, max_steps=max_steps, one_hot = False, secondary = False)
e = data.transpose((2,0,1))
d = e.reshape(e.shape[0]*e.shape[1],3)
for t in d:
try:
t[0] = datetime.strptime(t[0], "%Y-%m-%d %H:%M:%S")
except:
pass
if extraction :
f = d.reshape(e.shape[0],e.shape[1],3)
return f
return d
def get_timestep_vision(time, lat, lon):
'''
given a datetime and latitute, longitude returns a processed array obtained after donwload
'''
filepath = get_filename(['700', '500', '225'], ['geopotential', 'u_component_of_wind', 'v_component_of_wind'], time, lat, lon)
u, v, z = process_netcdf(filepath, 'u'), process_netcdf(filepath, 'v'), process_netcdf(filepath, 'z')
return np.array([u, v, z])
def get_storm_vision(storm, epsilon = 0):
'''
given a storm (list of timesteps with time and lat/lon), returns the vision array
epsilon is a parameter in case there is a scenario whith not correct grid size
'''
l = np.zeros((len(storm), 3, 3, 25, 25))
bad_shapes = []
times, lati, long = [], [], []
for i in range(len(storm)):
time, lat, lon = storm[i]
try :
l[i]=get_timestep_vision(time, lat, lon)
except:
try :
b = get_timestep_vision(time, lat, lon)
print(b.shape)
print(time, lat, lon)
get_data(['700', '500', '225'], ['geopotential', 'u_component_of_wind', 'v_component_of_wind'], time, lat, lon, grid_size = 25, force = True, epsilon = epsilon)
times.append(time)
lati.append(lat)
long.append(lon)
bad_shapes.append(b)
except:
pass
return l
def extract_vision(data, epsilon):
'''
processes all the data to get the vision array
'''
vision = []
for storm in data:
vision.append(get_storm_vision(storm, epsilon))
return np.array(vision)
def get_filename(pressure, params, time, lat, lon):
'''
returns filename to save the netcdf file
'''
params_str = '_'.join(map(str, params))
pressure_str = '_'.join(map(str, pressure))
year, month, day, hour = str(time.year), str(time.month), str(time.day), str(time.hour)
return 'data_era/'+params_str+'/eradata_'+pressure_str+'hPa'+'_'+year+'_'+month+'_'+day+'_'+hour+'_'+'coord'+'_'+str(lat)+'_'+str(lon)+'.nc'
def get_area(lat, lon, grid_size, e = 0.008):
'''
input : center of the storm, with lat and lon ; grid_size and error parameter in case
output: returns a centered squared grid of size grid_size degrees
'''
val = grid_size // 2
return [lat + val + e, lon - val, lat - val - e, lon + val]
def get_data(pressure_level, params, time, lat, lon, grid_size=25, degbypix=1.0, force=False, epsilon=0.008):
'''
pressure_level is the the pressure level we wish to get the data.
params has to be in format e.g: 'geopotential' or 'u_component_of_wind' or 'v_component_of_wind'
grid_size should be odd
'''
if not os.path.exists(get_filename(pressure_level, params, time, lat, lon)) or force:
c = cdsapi.Client()
year, month, day, hour = str(time.year), str(time.month), str(time.day), str(time.hour)
c.retrieve('reanalysis-era5-pressure-levels', {
'variable': params,
'pressure_level': pressure_level,
'product_type': 'reanalysis',
'year': year,
'month': month,
'day': day,
'area': get_area(lat, lon, grid_size, epsilon), # North, West, South, East. Default: global
'grid': [degbypix, degbypix],
# Latitude/longitude grid: east-west (longitude) and north-south resolution (latitude). Default: 0.25 x 0.25
'time': hour,
'format': 'netcdf' # Supported format: grib and netcdf. Default: grib
}, get_filename(pressure_level, params, time, lat, lon))
else:
print("Already downloaded", get_filename(pressure_level, params, time, lat, lon))
def download_all2(data):
i = 0
for storm in data:
for t in storm:
time, lat, lon = t[0], t[1], t[2]
try:
get_data(['700', '500', '225'], ['geopotential', 'u_component_of_wind', 'v_component_of_wind'], time, lat, lon, grid_size = 25)
except:
print("False request.")
i+=1
print("Storm ", i, " completed.")
print("Download complete.")
| 5,841
| 37.183007
| 176
|
py
|
hurricast
|
hurricast-master/sophie_code/ModuleReanalysisData.py
|
from ftplib import FTP
from netCDF4 import Dataset
from netCDF4 import num2date, date2num
from datetime import datetime, timedelta
import numpy as np
import matplotlib.pyplot as plt
import math
import csv
import calendar
from ecmwfapi import ECMWFDataServer
#from Sophie_modules import ModuleStormReader as Msr
#from Sophie_modules import MyModuleFileFolder as MMff
types = {'vwnd': 'vwnd.sig995',
'uwnd': 'uwnd.sig995',
'slp': 'slp', # Sea level pressure
'rhum': 'rhum.sig995', # Relative humidity
'pres': 'pres.sfc', # pressure (surface)
'pr_wtr': 'pr_wtr.eatm', # Precipitable water
'pottmp': 'pottmp.sig995', # Potential temperature
'omega': 'omega.sig995', # vertical velocity
'lftx4': 'lftx4.sfc', # Best (4-layer) lifted index
'lftx': 'lftx.sfc', # Surface lifted index
'tair': 'air.sig995', # air temperature at sigma 995
'land': 'land' # land sea mark (only one file)
}
types_legend={'vwnd': 'v wind sig995',
'uwnd': 'u wind sig995',
'slp': ' Sea level pressure',
'rhum': 'Relative humidity sig995',
'pres': 'Pressure (surface)',
'pr_wtr': 'Precipitable water',
'pottmp': 'Potential temperature sig995',
'omega': 'Vertical velocity (omega) sig995',
'lftx4': 'Best (4-layer) lifted index',
'lftx': 'Surface lifted index',
'tair': 'Air temperature at sigma 995',
'land': 'land sea mark (only one file)'
}
types_variable={}
for key in types.keys():
types_variable[key]=key
types_variable['tair']='air'
### params for interim database
params={
}
def get_type_name(type):
type_name=types[type]
return type_name
def list_to_datetime(list):
if len(list)==5:
date_t=datetime(list[0],list[1],list[2],list[3], list[4])
if len(list)==4:
date_t=datetime(list[0],list[1],list[2],list[3])
else:
date_t=datetime(list[0],list[1],list[2])
return date_t
def load_ftp_reanalysis(type_data='slp', year=2000,
foldersaving='/home/sgiffard/Documents/StormProject/DataStorm/data_reanalysis/ftp_data/'):
'''
load the reanalysis data from the ftp.cdc.noaa.gov website by ftp connection. nefCDF files
:param type_data: the type of data to load ('vwnd', uwnd', 'slp','rhum', 'pres', 'pr_wtr', 'pottmp', 'omega'
lftx4', 'lftx', 'tair', 'land'
:param year: the year of the data to load (1949 to current)
:param foldersaving:
:return:
'''
if not type_data in types.keys():
print('load_ftp_reanalysis Error: no '+type_data+' in the ftp surface folder.')
print('possible entries are: ')
print(types.keys() )
raise IOError
type_file=types[type_data]
ftp = FTP('ftp.cdc.noaa.gov')
ftp.login()
ftp.cwd('/Projects/Datasets/ncep.reanalysis/surface/')
if type_data is 'land': # no year in file name
ftp.retrbinary('RETR ' + type_file + '.nc',
open(foldersaving + type_file + '.nc', 'wb').write)
else:
ftp.retrbinary('RETR '+type_file+'.'+str(year)+'.nc',
open(foldersaving+type_file+'.'+str(year)+'.nc', 'wb').write)
ftp.quit()
def open_netCDF_file(nc_filename):
#nc_filename = folder_files + 'slp.2011.nc'
rootgrp = Dataset(nc_filename, "r+", format="NETCDF4")
return rootgrp
def get_cropped_nefCDF_data(rootgrp, type_data='slp',
_datetime=datetime(2000,1,1,6), center=[-95,25], size=10):
"""
:param rootgrp:
:param type_data:
:param _datetime:
:param center: longitude from 0 to 357.5, latitude from -90 to 90
:param size:
:return: grid of the crop wanted
"""
nlats = len(rootgrp.dimensions["lat"])
nlons = len(rootgrp.dimensions["lon"])
# range longitudes (indices):
if center[0]<0:
center[0]=center[0]+360
#min_lon = center[0] - size/2
#max_lon = center[0] + size/2
#range_lon = list(map(int, range(math.ceil(min_lon), math.ceil(max_lon)))) #
#range_ilon=list(map(int, range(math.ceil(min_lon/2.5),math.ceil(max_lon/2.5)))) #+1
# test only size window and not size in degrees
#min_lon = center[0]/2.5 - size/2
#max_lon = center[0]/2.5 + size/2
approx_center0=math.ceil(center[0]/2.5)
min_lon=approx_center0-int((size-1)/2)
max_lon=approx_center0+int((size-1)/2)
range_ilon = list(map(int, range(min_lon,max_lon+1)) )
range_ilon_new=[]
for ilon in range_ilon:
if ilon<0:
range_ilon_new.append(ilon+nlons)
elif ilon>=nlons:
range_ilon_new.append(ilon-nlons)
else:
range_ilon_new.append(ilon)
# range latitudes (indices):
#min_lat=180-(center[1]+90+size/2)
#max_lat =180- (center[1]+90 - size/2)
#range_ilat=list(map(int, range(math.ceil(min_lat/2.5),math.ceil(max_lat/2.5))))
#idem1
#min_lat=180-(center[1]+90+size/2)
#max_lat =180- (center[1]+90 - size/2)
center_i1=math.ceil((180-center[1]+90)/2.5)
min_lat=center_i1-int((size-1)/2)
max_lat=center_i1+int((size-1)/2)
range_ilat=list(map(int, range(min_lat, max_lat+1)))
range_ilat_new=[]
for ilat in range_ilat:
if ilat<0:
range_ilat_new.append(ilat+nlats)
elif ilat>nlats:
range_ilat_new.append(ilat-nlats)
else:
range_ilat_new.append(ilat)
#i_lon=int(center[0]/2.5)
#i_lat=int((center[1]+90)/2.5)
if type_data is 'land':
# final grid
grid=[]
longs=[]
lats = []
for i_lat, i in zip(reversed(range_ilat_new), range(len(range_ilon_new))): # reversed because latitudes are filled from +90 to -90!
grid.append([])
lats.append(rootgrp['lat'][i_lat])
longs = []
for i_lon in range_ilon_new:
grid[i].append(rootgrp[types_variable[type_data]][0][i_lat][i_lon])
longs.append(rootgrp['lon'][i_lon])
else:
# date (in hours since 1800 usually --> one value every 6 hours)
times = rootgrp['time']
time_i = date2num(_datetime, units=times.units)\
-date2num(datetime(_datetime.year,1,1,0), units=times.units)
time_i=int(time_i/6)
# final grid
grid=[]
longs=[]
lats = []
for i_lat, i in zip(reversed(range_ilat_new), range(len(range_ilon_new))): # reversed because latitudes are filled from +90 to -90!
grid.append([])
lats.append(rootgrp['lat'][i_lat])
longs = []
for i_lon in range_ilon_new:
grid[i].append(rootgrp[types_variable[type_data]][time_i][i_lat][i_lon])
longs.append(rootgrp['lon'][i_lon])
return grid,longs,lats
def get_cropped_nefCDF_data_interim(rootgrp, params_netCDF=['u10'],
_datetime=datetime(2000,1,1,6), center=[-95,25], size=20,
params_shortnames=None, levels=None, flag_save_lonlat=False):
'''
Get cropped data around a center from ERA interim data, several parameters can be acquired at the same time.
:param rootgrp: netCDF file corresponding to the date wanted.
:param params_netCDF: names of the parameters to get, as they appear in the netCDF (rootgrp) file
:param _datetime:
:param center: center of the grid (location of the storm)
:param size: spatial length of the window: number of points (and not number of degrees)
:return: grid = dict(param_netCDF : sizexsize), longs, lats
'''
# correct shortnames (the netCDF names are sometimes only numbers...)
if not params_shortnames or len(params_shortnames) != len(params_netCDF) :
params_shortnames=params_netCDF
nlats = len(rootgrp.dimensions["latitude"])
nlons = len(rootgrp.dimensions["longitude"])
size_grid_lat=rootgrp['latitude'][1]-rootgrp['latitude'][0]
size_grid_lon=rootgrp['longitude'][1]-rootgrp['longitude'][0]
# range longitudes (indices):
if center[0]<0: center[0]=center[0]+360
# size points and not size in degrees
approx_center0=round(center[0]/size_grid_lon)
min_lon=approx_center0-int((size-1)/2)
max_lon=approx_center0+int((size-1)/2)
range_ilon = list(map(int, range(int(min_lon),int(max_lon+1) )) )
range_ilon_new=[]
for ilon in range_ilon:
if ilon<0: range_ilon_new.append(ilon+nlons)
elif ilon>=nlons: range_ilon_new.append(ilon-nlons)
else: range_ilon_new.append(ilon)
# range latitudes (indices):
center_i1=round((center[1]+90)/size_grid_lat)
min_lat=center_i1-int((size-1)/2)
max_lat=center_i1+int((size-1)/2)
range_ilat=list(map(int, range(int(min_lat)-1, int(max_lat) ))) # after it will be in reverse!
range_ilat_new=[]
for ilat in range_ilat:
if ilat<0: range_ilat_new.append(ilat+nlats)
elif ilat>nlats: range_ilat_new.append(ilat-nlats)
else: range_ilat_new.append(ilat)
# date (in hours since ...depend! --> one value every 6 hours)
times = rootgrp['time']
time_i = date2num(_datetime, units=times.units)-times[0]
time_i=int(time_i/6)
# final grid
grid={}; longs=[]; lats = []
for param,param_name in zip(params_netCDF,params_shortnames):
grid[param_name]=[]
if levels==None:
for i_lat, i in zip(reversed(range_ilat_new), range(len(range_ilon_new))): # reversed because latitudes are filled from +90 to -90!
grid[param_name].append([])
if flag_save_lonlat:
lats.append(rootgrp['latitude'][i_lat])
longs = []
for i_lon in range_ilon_new:
grid[param_name][i].append(rootgrp[param][time_i][i_lat][i_lon])
if flag_save_lonlat:
longs.append(rootgrp['longitude'][i_lon])
else:
i_levs=[]
for level,i_lev in zip(rootgrp['level'],range(len(rootgrp['level']))):
i_levs.append(i_lev)
for i_lev,i_lev_final in zip(i_levs,range(len(i_levs))):
grid[param_name].append([])
for i_lat, i in zip(reversed(range_ilat_new), range(len(range_ilon_new))): # reversed because latitudes are filled from +90 to -90!
#grid[param_name][i_lev_final].append([])
if flag_save_lonlat:
lats.append(rootgrp['latitude'][i_lat])
#longs = []
# for i_lon in range_ilon_new:
#grid[param_name][i_lev_final][i].append(rootgrp[param][time_i][i_lev][i_lat][i_lon])
# if flag_save_lonlat:
# longs.append(rootgrp['longitude'][i_lon])
grid[param_name][i_lev_final].append(rootgrp[param][time_i][i_lev][i_lat][range_ilon_new])
if flag_save_lonlat:
longs= rootgrp['longitude'][range_ilon_new]
return grid,longs,lats
def plot_grid_image(grid,longs=None,lats=None,type_data='slp', _datetime=None, verbose=None, fileSaving=None,
vmin=None, vmax=None, title_add=None):
'''
plot the grid given, if there are the long/lats, the axis are set accordingly.
'''
grid = np.array(grid)
if longs is not None: x_lims = [longs[0], longs[-1]]
else: x_lims = [0, len(grid[0])]
if lats is not None: y_lims = [lats[0], lats[-1]]
else: y_lims = [0, len(grid)]
plt.figure()
if not vmin: vmin=np.min(grid)
if not vmax: vmax=np.max(grid)
plt.imshow(grid, extent=[x_lims[0], x_lims[1], y_lims[0], y_lims[1]],
vmin=vmin, vmax=vmax,
interpolation='nearest', origin='lower', cmap='seismic') # cmap='hot',
#title = types_legend[type_data]
title=type_data
if _datetime: title=title+' '+str(_datetime)
if title_add: title=title+' '+str(title_add)
plt.title(title)#, loc='left'
cax = plt.axes([0.825, 0.1, 0.075, 0.80])
plt.colorbar(cax=cax)
if fileSaving: plt.savefig(fileSaving)
if verbose: plt.show()
def get_windows_from_tracks(year_init=1958, year_end=2017, types_data=['slp'], instants=[0,1],
size_crop=10, folder_data='/home/sgiffard/Documents/StormProject/DataStorm/data_reanalysis/ftp_data/',
pkl_inputfile='/home/sgiffard/Documents/StormProject/DataStorm/2018_02_04_processed_pickle/tracks_1860-01-01_after.pkl'):
'''
get all the windows (or grids) around the storm centers for desired data, time instants and window size
:param year_init:
:param year_end:
:param types_data: list of data wanted ( as pressure, temp, wind... see on top for possible nefCDF possibilities)
:param instants: list of time instants desired (one every 6 h)
:param size_crop: size of the window in long/lat degree. ! the grid will be smaller as step=2.5deg.)
:return: grids of values around the current center of the storm at the instants wanted
= dict{ stormid: list(nbinstants x types_data x size_crop/2.5 x size_crop/2.5) }
'''
list_tracks=Msr.load_list_tracks_from_pkl(pkl_inputfile)
tracks_year={}
for year in range(year_init, year_end+1):
tracks_year[year]=[]
for track in list_tracks:
if track.dates[0][0] in range(year_init, year_end+1):
tracks_year[track.dates[0][0]].append(track)
grids={}
for year,tracks in tracks_year.items():
for track in tracks:
grids[track.stormid]=[]
i=-1
for instant in instants:
if instant >= track.Ninstants:
continue
grids[track.stormid].append([])
i=i+1
for type_data in types_data:
type_name = get_type_name(type_data)
if type_name is 'land':
nc_filename = type_name + '.nc'
else:
nc_filename = type_name + '.' + str(year) + '.nc'
rootgrp = open_netCDF_file(folder_data + nc_filename)
center=[track.longitudes[instant],track.latitudes[instant]]
grid, longs, lats=get_cropped_nefCDF_data(rootgrp, type_data=type_data,
_datetime=list_to_datetime(track.dates[instant]), center=center, size=size_crop)
grids[track.stormid][i].append(grid)
rootgrp.close()
return grids
def get_windows_from_track(track,instants,types, size_crop=10,
folder_data='/home/sgiffard/Documents/StormProject/DataStorm/data_reanalysis/ftp_data/'):
'''
single track of a storm
:return: grids corresponding to track
'''
year=track.dates[0][0]
grids=[]
for type in types:
type_name = get_type_name(type)
if type_name is 'land':
nc_filename = type_name + '.nc'
else:
nc_filename = type_name + '.' + str(year) + '.nc'
rootgrp = open_netCDF_file(folder_data + nc_filename)
for instant in instants:
center = [track.longitudes[instant], track.latitudes[instant]]
grid, longs, lats=get_cropped_nefCDF_data(rootgrp, type_data=type,
_datetime=list_to_datetime(track.dates[instant]), center=center, size=size_crop)
grids.append(grid)
return grids
def get_windows_from_track_interim(track,instants,types, size_crop=10,
folder_data='/home/sgiffard/Documents/StormProject/DataStorm/ERA_interim/grid_1/sfc/sst_mont_pres_uvb_pv_crwc_sp/',
levtype='sfc' , folderLUT='/home/sgiffard/Documents/StormProject/DataStorm/ERA_interim/',
levels=None, history=0):
'''
single track of a storm
:return: grids corresponding to track
'''
units = 'hours since 1970-01-01 00:00:00 UTC'
if history:
for instant in instants:
numdate=date2num(list_to_datetime(track.dates[instant]),units)
new_datetime=num2date(numdate-6*history, units)
track.dates[instant]=[new_datetime.year,new_datetime.month,new_datetime.day,new_datetime.hour]
year=track.dates[0][0]
month=track.dates[0][1]
grids=[]
rootgrp = open_netCDF_file(folder_data + "interim_daily_%04d%02d.nc" % (year, month))
for instant in instants:
if track.dates[instant][1] != month:
year = track.dates[instant][0]
month = track.dates[instant][1]
rootgrp = open_netCDF_file(folder_data + "interim_daily_%04d%02d.nc" % (year, month))
center = [track.longitudes[instant], track.latitudes[instant]]
a,b,c,netCDFnames=open_LUT_list_params(folderLUT+'list_params_nums_'+levtype+'.txt')
types_netCDF=[netCDFnames[t] for t in types]
grid, longs, lats=get_cropped_nefCDF_data_interim(rootgrp, params_netCDF=types_netCDF, _datetime=list_to_datetime(track.dates[instant]),
center=center, size=size_crop, params_shortnames=types, levels=levels)
grids.append(grid)
return grids
def get_distance_km(lon1, lat1, lon2, lat2):
'''
Using haversine formula (https://www.movable-type.co.uk/scripts/latlong.html)
'''
R=6371e3 # meters (earth's radius)
phi_1=math.radians(lat1)
phi_2 = math.radians(lat2)
delta_phi=math.radians(lat2-lat1)
delta_lambda=math.radians(lon2-lon1)
a=np.power(math.sin(delta_phi/2),2) + math.cos(phi_1)*math.cos(phi_2)\
* np.power(math.sin(delta_lambda/2),2)
c= 2 * math.atan2(math.sqrt(a),math.sqrt(1-a))
return R*c/1000.
def get_longlat_from_offsets(lon, lat, dkm_lon, dkm_lat):
'''
:param lon: initial longitude
:param lat: inital latitude
:param dn: offsets in meters
:param de: offsets in meters
:return: lon_final, lat_final
'''
# Earth’s radius, sphere
R = 6378137
# Coordinate offsets in radians
dLat = dkm_lat / R
dLon = dkm_lon / (R * math.cos(math.radians(lat)))
# OffsetPosition, decimal degrees
latO = lat + dLat * 180 / math.pi
lonO = lon + dLon * 180 / math.pi
return lonO, latO
##### ERA interim data ########
###############################
def load_ECMWF_server():
server = ECMWFDataServer()
return server
def interim_request(server, requestDates, requestParams, levtype, size_grid, targetfile, levelist=''):
"""
An ERA interim request for analysis pressure level data.
Change the keywords below to adapt it to your needs.
(eg to add or to remove levels, parameters, times etc)
Request cost per day is 112 fields, 14.2326 Mbytes
:param server: loaded server
:param requestDates:
:param requestParams:
:param levtype: sfc, ml (model level), pl (pressure level), pt, pv
:param size_grid: if 1: 1 degree x 1 degree (0.75 is the smallest computed, but 0.25 is the smallest available)
:param targetfile:
:param levelist:
:return:
"""
if not levelist:
levelist="0"
server.retrieve({
"class": "ei",
"stream": "oper",
"type": "an", # an= analysis, fc= forcast , 4v...
"dataset": "interim", # interim= ERA-interim dataset
"date": requestDates,
"expver": "1",
"levtype": levtype,
"levelist": "/".join(map(str,levelist)),#"100/500/700/750/850/925/1000",
"param": "/".join(requestParams),
"target": targetfile,
"time": "00/06/12/18",
"format" : "netcdf",
"grid": str(size_grid)+"/"+str(size_grid)
})
def retrieve_interim(yearStart=2000, yearEnd=2001, monthStart=1, monthEnd=12, list_params='',
levtype='sfc', size_grid=1,
targetfolder='/home/sgiffard/Documents/StormProject/DataStorm/ERA_interim/', levelist=''):
'''
A function to demonstrate how to iterate efficiently over several years and months etc
for a particular interim_request.
:param yearStart:
:param yearEnd:
:param monthStart:
:param monthEnd:
:param list_params: list of shortnames of the parameters wanted (get them from the LUT files
:param levtype: sfc, ml (model level), pl (pressure level), pt, pv
:param size_grid: if 1: 1 degree x 1 degree (0.75 is the smallest computed, but 0.25 is the smallest available)
:param targetfolder: folder to store results and where are LUT files
:param levelist: for pressure level or model level
:return:
'''
curr_targetfolder=targetfolder+'grid_'+str(size_grid)+'/'+levtype+'/'+'_'.join(list_params)+'/'
MMff.MakeDir(curr_targetfolder)
server=load_ECMWF_server()
requestParams_ids=get_params_ids_interim(list_params, levtype,
folderLUT=targetfolder)
for year in list(range(yearStart, yearEnd + 1)):
for month in list(range(monthStart, monthEnd + 1)):
startDate = '%04d%02d%02d' % (year, month, 1)
numberOfDays = calendar.monthrange(year, month)[1]
lastDate = '%04d%02d%02d' % (year, month, numberOfDays)
targetfile = curr_targetfolder+"interim_daily_%04d%02d.nc" % (year, month)
requestDates = (startDate + "/TO/" + lastDate)
interim_request(server, requestDates, requestParams_ids, levtype, size_grid, targetfile, levelist)
def open_LUT_list_params(LUTfilename):
'''
the Look up table files are here to help mapping the parameter names to its number used in the netCDF files.
:param LUTfilename: total path of the .txt (ex: path_to/list_params_nums_ml.txt)
:return: 3 lists: shortnames, corresponding ids, total names of the parameters
'''
shortnames=[]; ids={}; names={}; names_netCDF={}
with open(LUTfilename, newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
shortnames.append(row['shortname'][1:])
ids[row['shortname'][1:]]=row['id']
names[row['shortname'][1:]]=row['name'][1:]
names_netCDF[row['shortname'][1:]]=row['CDFname'][1:]
return shortnames, ids, names, names_netCDF
def open_level_list(levtype='ml', folderLUT='/home/sgiffard/Documents/StormProject/DataStorm/ERA_interim/'):
filename=folderLUT+'list_levels_'+levtype+'.txt'
with open(filename, newline='') as file:
level_list=file.read().splitlines()
return level_list
def get_params_ids_interim(list_params,levtype,
folderLUT='/home/sgiffard/Documents/StormProject/DataStorm/ERA_interim/'):
'''
Get the numbers of the parameters wanted to collect in the ERA interim database, using the LUT files.
:param list_params:
:param levtype:
:param folderpath:
:return:
'''
LUTfilename=folderLUT+'list_params_nums_'+str(levtype)+'.txt'
shortnames, ids, names, namesCDF = open_LUT_list_params(LUTfilename)
params_ids=[]
for param in list_params:
params_ids.append(ids[param])
return params_ids
def get_center_value_Xdata(X):
'''
keep only the central value of the images,
:param X: array size nb_samples x nb_params x size_crop x size_crop (size_crop is longitude/latitude)
:return: X_center, array size nb_samples x nb_params
'''
size_crop=len(X[0][0])
ncenter=int((size_crop-1)/2)
print('center:'+str(ncenter), flush=True)
Xcenters=np.zeros([len(X),len(X[0])])
for i,x in enumerate(X):
Xcenters[i]=[xparam[ncenter][ncenter] for xparam in x]
return Xcenters
def get_mean_value_Xdata(X):
'''
keep only the central value of the images,
:param X: array size nb_samples x nb_params x size_crop x size_crop (size_crop is longitude/latitude)
:return: X_center, array size nb_samples x nb_params
'''
Xmeans=np.zeros([len(X),len(X[0])])
for i,x in enumerate(X):
Xmeans[i]=[np.mean(xparam) for xparam in x]
return Xmeans
def get_deriv_values_Xdata(X,dist_diff=2, flag_center=False):
'''
get the 4 derivatives of the parameters in +-longitude, +-latitude wrt the center.
:param X: array size nb_samples x nb_params x size_crop x size_crop (size_crop is longitude/latitude)
flag_center: if True, adds the center value at the end.
:return: Xderiv, array size nb_samples x nb_params x 4
'''
size_crop=len(X[0][0])
ncenter=int((size_crop-1)/2)
if dist_diff>ncenter:
print('Warning! distance to center for the derivative is too large. setting it to '+str(ncenter), flush=True)
dist_diff=ncenter
if flag_center:
Xderiv = np.zeros([len(X), len(X[0]), 5])
else: Xderiv = np.zeros([len(X), len(X[0]), 4])
for i,x in enumerate(X):
for p,xparam in enumerate(x):
Xderiv[i][p][0] = xparam[ncenter][ncenter] - xparam[int(ncenter+dist_diff)][ncenter]
Xderiv[i][p][1] = xparam[ncenter][ncenter] - xparam[int(ncenter - dist_diff)][ncenter]
Xderiv[i][p][2] = xparam[ncenter][ncenter] - xparam[ncenter][int(ncenter + dist_diff)]
Xderiv[i][p][3] = xparam[ncenter][ncenter] - xparam[ncenter][int(ncenter - dist_diff)]
if flag_center:
Xderiv[i][p][4] = xparam[ncenter][ncenter]
return Xderiv
def crop_grids(X,crop_final=11):
size_crop=len(X[0][0])
if crop_final>size_crop:
print('desired crop is larger than initial.')
return X
elif crop_final==size_crop:
return X
ncenter=int((size_crop-1)/2)
nmin=int(ncenter-(crop_final-1)/2)
print(nmin)
nmax=int(ncenter+(crop_final-1)/2+1)
print(nmax)
Xsmall=np.zeros([len(X),len(X[0]),crop_final,crop_final])
for i,x in enumerate(X):
Xsmall[i]=[np.array(xparam)[nmin:nmax,nmin:nmax] for xparam in x]
#Xsmall=Xsmall.tolist()
return Xsmall
| 26,288
| 39.074695
| 149
|
py
|
hurricast
|
hurricast-master/sophie_code/ModuleStormReader.py
|
## Module to read and process data from
## https://ghrc.nsstc.nasa.gov/services/storms website
import requests
from xml.etree import ElementTree
import re
import pickle
from http.client import RemoteDisconnected
import csv
import os
import pandas as pd
import numpy as np
from netCDF4 import num2date, date2num
from datetime import datetime, timedelta
from Sophie_modules import ModuleReanalysisData as Mre
class Track:
def __init__(self):
self.dates = []
self.categories=[]
self.latitudes=[]
self.longitudes=[]
self.windspeeds=[]
self.pressures=[]
self.stormid = 0
self.Ninstants=0
self.month=None
self.maxcategory=0 # out of 8 : 8 is maximum
def import_from_raw_track(self, raw_track, stormid):
self.stormid=stormid
self.Ninstants=len(raw_track)
for data_t in raw_track:
date_t=list(map(int, re.findall('\d+',data_t['date'])))
self.dates.append(date_t)
self.categories.append(get_num_cat(data_t['category']))
self.latitudes.append( float(data_t['latitude']) )
self.longitudes.append(float(data_t['longitude']))
self.pressures.append(float(data_t['pressure']))
self.windspeeds.append(float(data_t['windspeed']))
self.month=self.dates[0][1]
self.maxcategory=max(self.categories)
def import_from_raw_track_IBTRACKS(self,rootgrp, id_in_list, time_steps):
self.stormid=b''.join(rootgrp['storm_sn'][id_in_list]).decode("utf-8")
self.Ninstants=len(time_steps)
for t in time_steps:
dtime = num2date(rootgrp['time_wmo'][id_in_list][t], units=rootgrp['time_wmo'].units)
self.dates.append([dtime.year,dtime.month,dtime.day,dtime.hour])
wind=rootgrp['wind_wmo'][id_in_list][t]
self.windspeeds.append(wind)
self.categories.append(get_num_cat(sust_wind_to_cat(wind )) ) #1.12*
self.latitudes.append(rootgrp['lat_wmo'][id_in_list][t])
self.longitudes.append(rootgrp['lon_wmo'][id_in_list][t])
self.pressures.append(rootgrp['pres_wmo'][id_in_list][t])
self.month = self.dates[0][1]
self.maxcategory = max(self.categories)
if self.maxcategory>5: print('cat>5!!: '+str(self.maxcategory))
class Track_IBTRACKS_full(Track):
def __init__(self):
Track.__init__(self)
self.name = ''
self.basin=[]
self.dist2land=[]
self.nature=[] #Storm nature
# key: 0 = TS - Tropical
# 1 = SS - Subtropical
# 2 = ET - Extratropical
# 3 = DS - Disturbance
# 4 = MX - Mix of conflicting reports
# 5 = NR - Not Reported
# 6 = MM - Missing
# 7 = - Missing
#(storm, time) Minimum Central Pressure
# basin: Based on present location
# key: 0 = NA - North Atlantic
# 1 = SA - South Atlantic
# 2 = WP - West Pacific
# 3 = EP - East Pacific
# 4 = SP - South Pacific
# 5 = NI - North Indian
# 6 = SI - South Indian
# 7 = AS - Arabian Sea
# 8 = BB - Bay of Bengal
# 9 = EA - Eastern Australia
# 10 = WA - Western Australia
# 11 = CP - Central Pacific
# 12 = CS - Carribbean Sea
# 13 = GM - Gulf of Mexico
# 14 = MM - Missing
def import_from_raw_track_IBTRACKS_full(self,rootgrp, id_in_list, time_steps):
Track.import_from_raw_track_IBTRACKS(self,rootgrp,id_in_list, time_steps)
self.name=b''.join(rootgrp['name'][id_in_list]).decode("utf-8")
for t in time_steps:
self.basin.append(rootgrp['basin'][id_in_list][t])
self.dist2land.append(rootgrp['dist2land'][id_in_list][t])
self.nature.append(rootgrp['nature_wmo'][id_in_list][t])
def get_num_cat(raw_category):
'''
:param raw_category: storm category
:return: numerical category in [0,7]
'''
if raw_category in ['LP', 'WV', 'DB']:
cat=0
elif raw_category in ['SD', 'TD', 'ED']:
cat=1
elif raw_category in ['SS','TS','ES']:
cat=2
elif raw_category[0] is 'H':
cat=int(raw_category[1])+2
else:
print('No category found. cat=-1')
cat=-1
return cat
def sust_wind_to_cat(wind):
# maximum sustained wind in kt (knot)
if wind<=33: cat='TD' # <=33
elif wind<=63.: cat='TS'
elif wind <=82.: cat='H1'
elif wind <=95.: cat='H2'
elif wind <=112.: cat='H3'
elif wind <=136.: cat='H4'
else: cat='H5'
return cat
def get_num_basin(basin):
'''
:param basin : string of the basin type 'AT', 'EP' or 'CP'
:return: numerical basin {'AT':0, 'EP':1, 'CP':2}
'''
dict_basin={'AT':0, 'EP':1, 'CP':2}
return dict_basin[basin]
def get_num_basin2(basin):
'''
:param basin : string of the basin type 'AT', 'EP' or 'CP'
:return: numerical basin - same as IBtracks {'AT':0, 'EP':3, 'CP':12}
'''
dict_basin={'AT':0, 'EP':3, 'CP':12}
return dict_basin[basin]
def get_disp_long_lat(stormid, t0, t, list_tracks=None, storm=None):
if not storm:
if not list_tracks:
list_tracks=load_list_tracks_from_pkl()
for track in list_tracks:
if track.stormid == stormid:
storm=track
break
if storm.Ninstants<t+1:
return None
lo=storm.longitudes[t]-storm.longitudes[t0]
la=storm.latitudes[t]-storm.latitudes[t0]
if lo>100:
lo=lo-360
if lo<-100:
lo=lo+360
return [lo,la]
def list_storm_request(date_init=None,date_end=None,basin=None,mincat=None,maxcat=None, flag_onlyids=False):
'''
get the list of the storms (with relevant infos) from the some date or loc infos
:param date_init: 'yyyy-mm-dd'
:param date_end: 'yyyy-mm-dd'
:param basin: 'AT', 'EP' or 'CP' : atlantic (tot 1757), eastern pacific(tot 1018) or central pacific(tot 77)
:param mincat: L - Unknown, disturbance, wave, or low pressure
D - Tropical, subtropical, or extratropical depression
S - Tropical, subtropical, or extratropical storm
1 - Category-1 hurricane
2, 3, 4, 5 (idem)
:param maxcat: idem
:param flag_onlyids: if the output is only a list of storm ids, set to True. Default is False
:return: dict: storms{stormid:stormparams} OR list of ids
'''
list_attr=locals()
print(list_attr)
string_tot='https://ghrc.nsstc.nasa.gov/services/storms/search.pl?'
lut_args= dict(date_init='from', date_end='thru', basin='basin', mincat='mincategory', maxcat='maxcategory')
for key,value in list_attr.items():
if value and key in lut_args.keys():
string_tot = string_tot+lut_args[key]+'='+str(value)+'&'
string_tot = string_tot[:-1] # erase last '&'
print(string_tot)
try:
r = requests.get(string_tot, headers={'Connection':'close'})
except:
print('Error in the request, maybe wrong parameters.')
raise
tree = ElementTree.fromstring(r.content)
if flag_onlyids:
storms = []
if tree.getchildren()[0].tag=='Error':
print('Warning: No storm found with these parameters!')
else:
print(str(len(tree.getchildren()) ) + ' storms found.')
for child in tree.iter('Storm'):
storms.append( int(child.attrib['stormid']) )
else:
storms = {}
if tree.getchildren()[0].tag=='Error':
print('Warning: No storm found with these parameters!')
else:
print(str(len(tree.getchildren()) ) + ' storms found.')
for child in tree.iter('Storm'):
storms[int(child.attrib['stormid'])] = child.attrib
return storms
def get_storm_track(stormid=2017011):
'''
Get the track of the storm
:param stormid: id of the storm
:return: track= list of dicts, each element is one time point and its associated values
'''
while True:
try:
r = requests.get('https://ghrc.nsstc.nasa.gov/services/storms/track.pl?stormid='+str(stormid),
headers={'Connection':'close'})
tree = ElementTree.fromstring(r.content)
if tree.getchildren()[0].tag == 'Error':
print('Warning: No track found with this storm id!')
raise IOError
raw_track = []
for child in tree.iter('Track'):
raw_track.append(child.attrib)
break
except (RemoteDisconnected , ConnectionError):
print(str(stormid)+ ' connection error! trying again...')
except:
print('Error in the track request, maybe wrong parameters:')
print('current request is: '+'https://ghrc.nsstc.nasa.gov/services/storms/track.pl?stormid='+str(stormid))
raw_track=[]
return raw_track
def save_all_storm_tracks(namefilesaving, date_init='1860-01-01'):
list_stormids=list_storm_request(date_init=date_init, flag_onlyids=True)
list_tracks=[]
for stormid in list_stormids:
raw_track=get_storm_track(stormid)
if raw_track:
track=Track()
track.import_from_raw_track(raw_track, stormid)
list_tracks.append(track)
with open(namefilesaving, 'wb') as pickle_file:
pickle.dump(list_tracks, pickle_file)
def get_6h_step_storm(times_storm):
times_storm=times_storm.compressed()
time0=times_storm[0]
store_times=[0]
for time1,id_t in zip(times_storm[1:],range(1,len(times_storm))):
h_diff=24*(time1-time0)
if h_diff==6:
time0=time1
store_times.append(id_t)
elif h_diff<6:
pass
elif h_diff>6:
return 0
return store_times
def save_all_storm_tracks_IBTRACKS(namefiledata, namefilesaving, date_init=1860, flag_full_data=False):
rootgrp = Mre.open_netCDF_file(namefiledata)
list_tracks = []
for s in range(len(rootgrp['storm_sn'])):
dtime = num2date(rootgrp['time_wmo'][s][0], units=rootgrp['time_wmo'].units)
if dtime.year < date_init:
continue
time_steps=get_6h_step_storm(rootgrp['time_wmo'][s])
if time_steps==0:
continue
if not flag_full_data:
track=Track()
track.import_from_raw_track_IBTRACKS(rootgrp,s,time_steps)
else:
track=Track_IBTRACKS_full()
track.import_from_raw_track_IBTRACKS_full(rootgrp,s,time_steps)
list_tracks.append(track)
with open(namefilesaving, 'wb') as pickle_file:
pickle.dump(list_tracks, pickle_file)
def load_list_tracks_from_pkl(pkl_inputfile='/home/sgiffard/Documents/StormProject/DataStorm/2018_02_04_processed_pickle/tracks_1860-01-01_after.pkl'):
file=open(pkl_inputfile, 'rb')
list_tracks=pickle.load(file)
file.close()
return list_tracks
def get_all_tracks_from_period(year_init=1958, year_end=2017):
list_tracks=load_list_tracks_from_pkl()
tracks=[]
for track in list_tracks:
if track.dates[0][0] in range(year_init, year_end+1):
tracks.append(track)
return tracks
def write_csv_data_from_pkl(pkl_inputfile, csv_outputfile, fields, windowt=8, thresh=4, flag_increase_data=True, namefilebasin=None):
file=open(pkl_inputfile, 'rb')
list_tracks=pickle.load(file)
file.close()
name_cols=['stormid','delay_tsteps', 'intenseStorm']
for field in fields:
if field in ['month', 'maxcategory']:
name_cols.append(field)
elif field is 'basin' and os.path.isfile(namefilebasin):
name_cols.append(field)
basins=dict(np.array(pd.read_csv(namefilebasin,header=None)))
else:
name_cols.extend([field+str(i) for i in range(windowt)])
with open(csv_outputfile, 'w', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(name_cols)
smalltracks=0
maxcat_tracks=0
Nintense=0
for track in list_tracks:
if track.Ninstants<windowt+2:
print('Track '+str(track.stormid)+' too small, only '+str(track.Ninstants)+' instants.')
smalltracks=smalltracks+1
continue
if thresh < max(track.categories[0:windowt])+1:
print('Track '+str(track.stormid)+' threshold category ('+str(thresh)+') already reached.')
maxcat_tracks=maxcat_tracks+1
continue
# augment size of database by sliding the initial window.
if flag_increase_data:
range_windows=range(min(int((track.Ninstants-windowt)/2),6))
else:
range_windows=[0]
for delay in range_windows:
if thresh < max(track.categories[0:windowt+delay*2])+1:
continue
else:
tarray=[track.stormid, delay*2]
if track.maxcategory>thresh:
tarray.append(1)
Nintense=Nintense+1
else:
tarray.append(0)
for field in fields:
if field in ['month', 'maxcategory']:
tarray.append(getattr(track,field))
elif field is 'basin' and os.path.isfile(namefilebasin):
tarray.append(get_num_basin(basins[track.stormid]))
else:
tarray.extend(getattr(track,field)[delay*2:windowt+delay*2])
spamwriter.writerow(tarray)
print('Number of too small tracks: '+str(smalltracks))
print('Number of threshold category already reached: '+ str(maxcat_tracks))
print('Number of intense (positive) storms: '+str(Nintense))
def get_sliding_tracks_from_pkl(windowi=8, instant_f=8, year_init=1958, year_end=2017, flag_windspeed=True, flag_abs_degrees=False):
'''
get X and y for learning storm direction from only tracking data.
:param windowi: nb of time points to use in training
:param instant_f: time point target
:param year_init:
:param year_end:
:param flag_windspeed: if windspeeds in train
:return: X, y, group
X=matrix of features(nb_samplesxnb_features)
y= matrix of target, delta long/lat at instant_f (nb_samplesx2),
group=list of id of the storm, for chosing train/test data)
'''
print('ModuleStormReader.get_sliding_tracks_from_pkl function...')
list_tracks=get_all_tracks_from_period(year_init, year_end)
X=[]; y=[]; group=[]
Nsmalltracks = 0
for track in list_tracks:
if track.Ninstants<instant_f+1:
Nsmalltracks=Nsmalltracks+1
continue
# augment size of database by sliding the initial window. (here delay = 1 time step)
range_windows = range(track.Ninstants - instant_f)
for delay in range_windows:
x_i=[]
for i in range(delay, delay+windowi-1):
if flag_windspeed:
x_i.append(track.windspeeds[i])
if flag_abs_degrees:
x_i.append(track.longitudes[i])
x_i.append(track.latitudes[i])
x_i.extend( get_disp_long_lat(track.stormid,i,i+1, storm=track) )
if flag_windspeed:
x_i.append(track.windspeeds[delay+windowi-1])
if flag_abs_degrees:
x_i.append(track.longitudes[delay+windowi-1])
x_i.append(track.latitudes[delay+windowi-1])
y_i=get_disp_long_lat(track.stormid,windowi-1+delay,instant_f+delay, storm=track)
X.append(x_i)
y.append(y_i)
group.append(track.stormid)
print('X and y created.')
print(' Nb of samples:'+ str(len(X)))
print(' Nb of storms used: '+str(len(list_tracks)))
print(' Nb of too small tracks: '+str(Nsmalltracks))
return X,y,group
def load_1D_data_from_IBTRACS(list_stormids,names_1Ddata,
file_IBtracks='/home/sgiffard/Documents/StormProject/DataStorm/storm_IBTrACS/tracks_1979_after_full_data_cat_nature.pkl',
file_tracksold='/home/sgiffard/Documents/StormProject/DataStorm/2018_02_04_processed_pickle/tracks_1860-01-01_after.pkl',
filecorr_stormids='/home/sgiffard/Documents/StormProject/DataStorm/storm_IBTrACS/correspondances_stormids.txt',
namefilebasin='/home/sgiffard/Documents/StormProject/DataStorm/2018_02_04_processed_pickle/basins_idstorms.csv'):
with open(filecorr_stormids, 'r') as f:
LUT_to_IBTRACS = {}
for line in f:
p = line.split()
LUT_to_IBTRACS[p[0]] = p[1]
list_IBtracks= load_list_tracks_from_pkl(file_IBtracks)
dict_IBtracks={}
for track in list_IBtracks:
dict_IBtracks[track.stormid]=track
list_oldtracks = load_list_tracks_from_pkl(file_tracksold)
dict_oldtracks={}
for track in list_oldtracks:
dict_oldtracks[str(track.stormid)]=track
basins_old=dict(np.array(pd.read_csv(namefilebasin,header=None)))
list_1Ddatatot=[]
for id in list_stormids:
flag_old=False
if str(id) in LUT_to_IBTRACS.keys():
oldid=LUT_to_IBTRACS[str(id)]
else:
oldid=None
if str(id) not in dict_IBtracks.keys():
if str(id) in LUT_to_IBTRACS.keys():
newid=LUT_to_IBTRACS[str(id)]
else:
flag_old=True
newid=id
else: newid=id
if flag_old: track=dict_oldtracks[str(newid)]
else:
track=dict_IBtracks[str(newid)]
if oldid:
trackold = dict_oldtracks[oldid]
for t in range(track.Ninstants):
if oldid:
t_f = None
for t2 in range(trackold.Ninstants):
if track.dates[t] == trackold.dates[t2]:
t_f=t2
if not t_f:
continue
else:
t_f=t
list_1Ddata=[id, t_f]
for name in names_1Ddata:
if name =='windspeed':
list_1Ddata.append(track.windspeeds[t])
elif name =='Jday_predictor': # static (at t=0)
dtime = datetime(track.dates[0][0], track.dates[0][1], track.dates[0][2], track.dates[0][3])
Jnum=date2num(dtime, "days since 1900-01-01", "standard")
if str(id).find('S')>0: # pacific
season_peak=date2num(datetime(track.dates[0][0],1,1),"days since 1900-01-01",
"standard")+238
else: # atlantic
season_peak = date2num(datetime(track.dates[0][0], 1, 1), "days since 1900-01-01",
"standard") + 253
Rd=25 # days providing the best fit, according to demaria 2005 p. 535.
list_1Ddata.append(np.exp(-np.power((Jnum-season_peak)/Rd,2)))
elif name == 'hemisphere':
if str(id).find('S') > 0: # pacific
list_1Ddata.append(0)
else:
list_1Ddata.append(1)
elif name =='latitude':
list_1Ddata.append(track.latitudes[t])
elif name =='longitude':
list_1Ddata.append(track.longitudes[t])
elif name =='initial_max_wind': #static
list_1Ddata.append(track.windspeeds[0])
elif name =='max_wind_change_12h':
if t==0:
list_1Ddata.append(0)
elif t==1:
list_1Ddata.append(track.windspeeds[t] - track.windspeeds[t-1])
else:
list_1Ddata.append(track.windspeeds[t]-track.windspeeds[t-2])
elif name=='basin':
if flag_old:
list_1Ddata.append(get_num_basin2(basins_old[track.stormid]))
else: list_1Ddata.append(track.basin[t])
elif name =='dist2land':
if flag_old:
list_1Ddata.append(None)
else: list_1Ddata.append(track.dist2land[t])
elif name =='nature':
if flag_old:
list_1Ddata.append(None)
else: list_1Ddata.append(track.nature[t])
list_1Ddatatot.append(list_1Ddata)
return list_1Ddatatot
def load_1D_data_from_IBTRACS_simple(names_1Ddata,
file_IBtracks='/home/sgiffard/Documents/StormProject/DataStorm/storm_IBTrACS/tracks_1979_after_full_data_cat_nature.pkl'):
list_IBtracks= load_list_tracks_from_pkl(file_IBtracks)
list_1Ddatatot=[]
for track in list_IBtracks:
for t in range(track.Ninstants):
list_1Ddata=[track.stormid, t]
for name in names_1Ddata:
if name =='windspeed':
list_1Ddata.append(track.windspeeds[t])
elif name =='Jday_predictor': # static (at t=0)
dtime = datetime(track.dates[0][0], track.dates[0][1], track.dates[0][2], track.dates[0][3])
Jnum=date2num(dtime, "days since 1900-01-01", "standard")
if str(track.stormid).find('S')>0: # pacific
season_peak=date2num(datetime(track.dates[0][0],1,1),"days since 1900-01-01",
"standard")+238
else: # atlantic
season_peak = date2num(datetime(track.dates[0][0], 1, 1), "days since 1900-01-01",
"standard") + 253
Rd=25 # days providing the best fit, according to demaria 2005 p. 535.
list_1Ddata.append(np.exp(-np.power((Jnum-season_peak)/Rd,2)))
elif name == 'hemisphere':
if str(track.stormid).find('S') > 0: # pacific
list_1Ddata.append(0)
else:
list_1Ddata.append(1)
elif name =='latitude':
list_1Ddata.append(track.latitudes[t])
elif name =='longitude':
list_1Ddata.append(track.longitudes[t])
elif name =='initial_max_wind': #static
list_1Ddata.append(track.windspeeds[0])
elif name =='max_wind_change_12h':
if t==0:
list_1Ddata.append(0)
elif t==1:
list_1Ddata.append(track.windspeeds[t] - track.windspeeds[t-1])
else:
list_1Ddata.append(track.windspeeds[t]-track.windspeeds[t-2])
elif name=='basin':
list_1Ddata.append(track.basin[t])
elif name =='dist2land':
list_1Ddata.append(track.dist2land[t])
elif name =='nature':
list_1Ddata.append(track.nature[t])
list_1Ddatatot.append(list_1Ddata)
return list_1Ddatatot
| 23,847
| 38.353135
| 152
|
py
|
news-tls
|
news-tls-master/setup.py
|
from setuptools import setup
setup(
name='news_tls',
packages=['news_tls'],
)
| 88
| 10.125
| 28
|
py
|
news-tls
|
news-tls-master/experiments/evaluate.py
|
import argparse
from pathlib import Path
from tilse.data.timelines import Timeline as TilseTimeline
from tilse.data.timelines import GroundTruth as TilseGroundTruth
from tilse.evaluation import rouge
from news_tls import utils, data, datewise, clust, summarizers
from pprint import pprint
def get_scores(metric_desc, pred_tl, groundtruth, evaluator):
if metric_desc == "concat":
return evaluator.evaluate_concat(pred_tl, groundtruth)
elif metric_desc == "agreement":
return evaluator.evaluate_agreement(pred_tl, groundtruth)
elif metric_desc == "align_date_costs":
return evaluator.evaluate_align_date_costs(pred_tl, groundtruth)
elif metric_desc == "align_date_content_costs":
return evaluator.evaluate_align_date_content_costs(
pred_tl, groundtruth)
elif metric_desc == "align_date_content_costs_many_to_one":
return evaluator.evaluate_align_date_content_costs_many_to_one(
pred_tl, groundtruth)
def zero_scores():
return {'f_score': 0., 'precision': 0., 'recall': 0.}
def evaluate_dates(pred, ground_truth):
pred_dates = pred.get_dates()
ref_dates = ground_truth.get_dates()
shared = pred_dates.intersection(ref_dates)
n_shared = len(shared)
n_pred = len(pred_dates)
n_ref = len(ref_dates)
prec = n_shared / n_pred
rec = n_shared / n_ref
if prec + rec == 0:
f_score = 0
else:
f_score = 2 * prec * rec / (prec + rec)
return {
'precision': prec,
'recall': rec,
'f_score': f_score,
}
def get_average_results(tmp_results):
rouge_1 = zero_scores()
rouge_2 = zero_scores()
date_prf = zero_scores()
for rouge_res, date_res, _ in tmp_results:
metrics = [m for m in date_res.keys() if m != 'f_score']
for m in metrics:
rouge_1[m] += rouge_res['rouge_1'][m]
rouge_2[m] += rouge_res['rouge_2'][m]
date_prf[m] += date_res[m]
n = len(tmp_results)
for result in [rouge_1, rouge_2, date_prf]:
for k in ['precision', 'recall']:
result[k] /= n
prec = result['precision']
rec = result['recall']
if prec + rec == 0:
result['f_score'] = 0.
else:
result['f_score'] = (2 * prec * rec) / (prec + rec)
return rouge_1, rouge_2, date_prf
def evaluate(tls_model, dataset, result_path, trunc_timelines=False, time_span_extension=0):
results = []
metric = 'align_date_content_costs_many_to_one'
evaluator = rouge.TimelineRougeEvaluator(measures=["rouge_1", "rouge_2"])
n_topics = len(dataset.collections)
for i, collection in enumerate(dataset.collections):
ref_timelines = [TilseTimeline(tl.date_to_summaries)
for tl in collection.timelines]
topic = collection.name
n_ref = len(ref_timelines)
if trunc_timelines:
ref_timelines = data.truncate_timelines(ref_timelines, collection)
for j, ref_timeline in enumerate(ref_timelines):
print(f'topic {i+1}/{n_topics}: {topic}, ref timeline {j+1}/{n_ref}')
tls_model.load(ignored_topics=[collection.name])
ref_dates = sorted(ref_timeline.dates_to_summaries)
start, end = data.get_input_time_span(ref_dates, time_span_extension)
collection.start = start
collection.end = end
#utils.plot_date_stats(collection, ref_dates)
l = len(ref_dates)
k = data.get_average_summary_length(ref_timeline)
pred_timeline_ = tls_model.predict(
collection,
max_dates=l,
max_summary_sents=k,
ref_tl=ref_timeline # only oracles need this
)
# print('*** PREDICTED ***')
# utils.print_tl(pred_timeline_)
print('timeline done')
pred_timeline = TilseTimeline(pred_timeline_.date_to_summaries)
sys_len = len(pred_timeline.get_dates())
ground_truth = TilseGroundTruth([ref_timeline])
rouge_scores = get_scores(
metric, pred_timeline, ground_truth, evaluator)
date_scores = evaluate_dates(pred_timeline, ground_truth)
print('sys-len:', sys_len, 'gold-len:', l, 'gold-k:', k)
print('Alignment-based ROUGE:')
pprint(rouge_scores)
print('Date selection:')
pprint(date_scores)
print('-' * 100)
results.append((rouge_scores, date_scores, pred_timeline_.to_dict()))
avg_results = get_average_results(results)
print('Average results:')
pprint(avg_results)
output = {
'average': avg_results,
'results': results,
}
utils.write_json(output, result_path)
def main(args):
dataset_path = Path(args.dataset)
if not dataset_path.exists():
raise FileNotFoundError(f'Dataset not found: {args.dataset}')
dataset = data.Dataset(dataset_path)
dataset_name = dataset_path.name
if args.method == 'datewise':
resources = Path(args.resources)
models_path = resources / 'supervised_date_ranker.{}.pkl'.format(
dataset_name
)
# load regression models for date ranking
key_to_model = utils.load_pkl(models_path)
date_ranker = datewise.SupervisedDateRanker(method='regression')
sent_collector = datewise.PM_Mean_SentenceCollector(
clip_sents=5, pub_end=2)
summarizer = summarizers.CentroidOpt()
system = datewise.DatewiseTimelineGenerator(
date_ranker=date_ranker,
summarizer=summarizer,
sent_collector=sent_collector,
key_to_model = key_to_model
)
elif args.method == 'clust':
cluster_ranker = clust.ClusterDateMentionCountRanker()
clusterer = clust.TemporalMarkovClusterer()
summarizer = summarizers.CentroidOpt()
system = clust.ClusteringTimelineGenerator(
cluster_ranker=cluster_ranker,
clusterer=clusterer,
summarizer=summarizer,
clip_sents=5,
unique_dates=True,
)
else:
raise ValueError(f'Method not found: {args.method}')
if dataset_name == 'entities':
evaluate(system, dataset, args.output, trunc_timelines=True, time_span_extension=7)
else:
evaluate(system, dataset, args.output, trunc_timelines=False, time_span_extension=0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', required=True)
parser.add_argument('--method', required=True)
parser.add_argument('--resources', default=None,
help='model resources for tested method')
parser.add_argument('--output', default=None)
main(parser.parse_args())
| 6,868
| 33.00495
| 92
|
py
|
news-tls
|
news-tls-master/experiments/run_without_eval.py
|
import argparse
from pathlib import Path
from news_tls import utils, data, datewise, clust, summarizers
from pprint import pprint
def run(tls_model, dataset, outpath):
n_topics = len(dataset.collections)
outputs = []
for i, collection in enumerate(dataset.collections):
topic = collection.name
times = [a.time for a in collection.articles()]
# setting start, end, L, K manually instead of from ground-truth
collection.start = min(times)
collection.end = max(times)
l = 8 # timeline length (dates)
k = 1 # number of sentences in each summary
timeline = tls_model.predict(
collection,
max_dates=l,
max_summary_sents=k,
)
print('*** TIMELINE ***')
utils.print_tl(timeline)
outputs.append(timeline.to_dict())
if outpath:
utils.write_json(outputs, outpath)
def main(args):
dataset_path = Path(args.dataset)
if not dataset_path.exists():
raise FileNotFoundError(f'Dataset not found: {args.dataset}')
dataset = data.Dataset(dataset_path)
dataset_name = dataset_path.name
if args.method == 'datewise':
# load regression models for date ranking
key_to_model = utils.load_pkl(args.model)
models = list(key_to_model.values())
date_ranker = datewise.SupervisedDateRanker(method='regression')
# there are multiple models (for cross-validation),
# we just an arbitrary model, the first one
date_ranker.model = models[0]
sent_collector = datewise.PM_Mean_SentenceCollector(
clip_sents=2, pub_end=2)
summarizer = summarizers.CentroidOpt()
system = datewise.DatewiseTimelineGenerator(
date_ranker=date_ranker,
summarizer=summarizer,
sent_collector=sent_collector,
key_to_model = key_to_model
)
elif args.method == 'clust':
cluster_ranker = clust.ClusterDateMentionCountRanker()
clusterer = clust.TemporalMarkovClusterer()
summarizer = summarizers.CentroidOpt()
system = clust.ClusteringTimelineGenerator(
cluster_ranker=cluster_ranker,
clusterer=clusterer,
summarizer=summarizer,
clip_sents=2,
unique_dates=True,
)
else:
raise ValueError(f'Method not found: {args.method}')
run(system, dataset, args.output)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', required=True)
parser.add_argument('--method', required=True)
parser.add_argument('--model', default=None,
help='model for date ranker')
parser.add_argument('--output', default=None)
main(parser.parse_args())
| 2,798
| 30.1
| 72
|
py
|
news-tls
|
news-tls-master/preprocessing/preprocess_heideltime.py
|
import os
import argparse
import arrow
import pathlib
import subprocess
import collections
import shutil
from news_tls import utils
def write_input_articles(articles, out_dir):
utils.force_mkdir(out_dir)
date_to_articles = collections.defaultdict(list)
for a in articles:
date = arrow.get(a['time']).datetime.date()
date_to_articles[date].append(a)
for date in sorted(date_to_articles):
utils.force_mkdir(out_dir / str(date))
date_articles = date_to_articles[date]
for a in date_articles:
fpath = out_dir / str(date) / '{}.txt'.format(a['id'])
with open(fpath, 'w') as f:
f.write(a['text'])
def delete_input_articles(articles, out_dir):
date_to_articles = collections.defaultdict(list)
for a in articles:
date = arrow.get(a['time']).datetime.date()
date_to_articles[date].append(a)
for date in sorted(date_to_articles):
date_articles = date_to_articles[date]
for a in date_articles:
fpath = out_dir / str(date) / '{}.txt'.format(a['id'])
if os.path.exists(fpath):
os.remove(fpath)
def heideltime_preprocess(dataset_dir, heideltime_path):
apply_heideltime = heideltime_path / 'apply-heideltime.jar'
heideltime_config = heideltime_path / 'config.props'
for topic in os.listdir(dataset_dir):
print('TOPIC:', topic)
articles = utils.read_jsonl_gz(dataset_dir / topic / 'articles.tokenized.jsonl.gz')
out_dir = dataset_dir / topic / 'time_annotated'
utils.force_mkdir(out_dir)
write_input_articles(articles, out_dir)
subprocess.run([
'java',
'-jar',
str(apply_heideltime),
str(heideltime_config),
str(out_dir),
'txt'
])
delete_input_articles(articles, out_dir)
def main(args):
dataset_dir = pathlib.Path(args.dataset)
heideltime_path = pathlib.Path(args.heideltime)
if not dataset_dir.exists():
raise FileNotFoundError('dataset not found')
if not heideltime_path.exists():
raise FileNotFoundError('heideltime not found')
heideltime_preprocess(dataset_dir, heideltime_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--heideltime', required=True,
help='location of heideltime software')
parser.add_argument('--dataset', required=True, help='dataset directory')
main(parser.parse_args())
| 2,542
| 30.012195
| 91
|
py
|
news-tls
|
news-tls-master/preprocessing/preprocess_tokenize.py
|
import os
import argparse
import pathlib
import spacy
from news_tls import utils
def tokenize_dataset(root, spacy_model):
nlp = spacy.load(spacy_model)
for topic in sorted(os.listdir(root)):
print('TOPIC:', topic)
if os.path.exists(root / topic / 'articles.jsonl.gz'):
articles = list(utils.read_jsonl_gz(root / topic / 'articles.jsonl.gz'))
elif os.path.exists(root / topic / 'articles.jsonl'):
articles = list(utils.read_jsonl(root / topic / 'articles.jsonl'))
else:
continue
jsonl_out_path = root / topic / 'articles.tokenized.jsonl'
out_batch = []
for i, a in enumerate(articles):
tokenized_doc = ''
doc = nlp(a['text'])
for sent in doc.sents:
tokens = [tok.text for tok in sent if not tok.text.isspace()]
tokenized_doc += ' '.join(tokens) + '\n'
a['text'] = tokenized_doc.strip()
out_batch.append(a)
if i % 100 == 0:
utils.write_jsonl(out_batch, jsonl_out_path, override=False)
out_batch = []
print(i)
utils.write_jsonl(out_batch, jsonl_out_path, override=False)
gz_out_path = root / topic / 'articles.tokenized.jsonl.gz'
utils.gzip_file(jsonl_out_path, gz_out_path, delete_old=True)
def main(args):
dataset_dir = pathlib.Path(args.dataset)
if not dataset_dir.exists():
raise FileNotFoundError('dataset not found')
tokenize_dataset(dataset_dir, args.spacy_model)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', required=True, help='dataset directory')
parser.add_argument('--spacy-model', default='en_core_web_sm')
main(parser.parse_args())
| 1,818
| 31.482143
| 84
|
py
|
news-tls
|
news-tls-master/preprocessing/preprocess_spacy.py
|
import os
import pathlib
import argparse
import arrow
import spacy
import datetime
import collections
import codecs
from xml.etree import ElementTree
from news_tls import utils
from news_tls.data import Token, Sentence, Article
from pprint import pprint
def extract_time_tag_value(time_tag):
value = [(None, None)]
if 'type' not in time_tag.attrib:
return value
elif time_tag.attrib['type'] == 'DATE':
formats = ['%Y-%m-%d', '%Y-%m', '%Y']
elif time_tag.attrib['type'] == 'TIME':
formats = ['%Y-%m-%dT%H:%M', '%Y-%m-%dTMO', '%Y-%m-%dTEV',
'%Y-%m-%dTNI', '%Y-%m-%dTAF']
else:
return value
for format in formats:
try:
time = datetime.datetime.strptime(
time_tag.attrib['value'], format)
value = [(time, format)]
except:
pass
return value
def parse_timeml_doc(raw):
# cleanup heideltime bugs
replace_pairs = [
("T24", "T12"),
(")TMO", "TMO"),
(")TAF", "TAF"),
(")TEV", "TEV"),
(")TNI", "TNI"),
]
for old, new in replace_pairs:
raw = raw.replace(old, new)
tokens = []
time_values = []
try:
root = ElementTree.fromstring(raw)
except ElementTree.ParseError as e:
return None, None
tokens.extend(root.text.split())
time_values.extend([(None, None)] * len(tokens))
for time_tag in root:
if time_tag.text is None:
continue
split_text = time_tag.text.split()
tokens.extend(split_text)
value = extract_time_tag_value(time_tag)
time_values.extend(value * len(split_text))
split_tail = time_tag.tail.split()
tokens.extend(split_tail)
time_values.extend([(None, None)] * len(split_tail))
return tokens, time_values
def read_articles(articles, tmp_dir):
date_to_articles = collections.defaultdict(list)
for a in articles:
date = arrow.get(a['time']).date()
date_to_articles[date].append(a)
for date in sorted(date_to_articles):
date_articles = date_to_articles[date]
for a in date_articles:
fpath = tmp_dir / str(date) / '{}.txt.timeml'.format(a['id'])
if os.path.exists(fpath):
with codecs.open(fpath, 'r', encoding='utf-8') as f:
raw = f.read()
yield a, raw
def preprocess_title(title, pub_time, nlp):
doc = nlp(title)
token_objects = []
for token in doc:
token_object = Token(
token.orth_,
token.lemma_,
token.tag_,
token.ent_type_,
token.ent_iob_,
token.dep_,
token.head.i,
None,
None,
)
token_objects.append(token_object)
title_object = Sentence(title, token_objects, pub_time, None, None)
return title_object
def preprocess_article(old_article, timeml_raw, nlp):
tokens, time_values = parse_timeml_doc(timeml_raw)
if tokens is None:
return None
doc = spacy.tokens.Doc(nlp.vocab, words=tokens)
nlp.tagger(doc)
nlp.entity(doc)
nlp.parser(doc)
token_objects = []
for token in doc:
token_object = Token(
token.orth_,
token.lemma_,
token.tag_,
token.ent_type_,
token.ent_iob_,
token.dep_,
token.head.i,
time_values[token.i][0],
time_values[token.i][1],
)
token_objects.append(token_object)
sentence_objects = []
for sent in doc.sents:
sent_tokens = token_objects[sent.start:sent.end]
times = [tok.time for tok in sent_tokens if tok.time]
if times:
time = times[0]
else:
time = None
pub_time = arrow.get(old_article['time'])
sent_object = Sentence(str(sent), sent_tokens, pub_time, time, None)
sentence_objects.append(sent_object)
raw_title = old_article.get('title')
if raw_title:
title_object = preprocess_title(raw_title, pub_time, nlp)
else:
title_object = None
new_article = Article(
title=raw_title,
text=old_article['text'],
time=old_article['time'],
id=old_article.get('id'),
sentences=sentence_objects,
title_sentence=title_object
)
return new_article
def preprocess_dataset(root, nlp):
for topic in sorted(os.listdir(root)):
print('TOPIC:', topic)
article_path = root / topic / 'articles.tokenized.jsonl.gz'
articles = utils.read_jsonl_gz(article_path)
h_output_dir = root / topic / 'time_annotated'
out_path = root / topic / 'articles.preprocessed.jsonl'
out_batch = []
i = 0
for old_a, timeml_raw in read_articles(articles, h_output_dir):
a = preprocess_article(old_a, timeml_raw, nlp)
if a:
out_batch.append(a.to_dict())
else:
date = arrow.get(old_a['time']).date()
print('cannot process:', date, old_a['id'])
if i % 100 == 0:
print('writing batch,', i, 'articles done')
if i == 0:
utils.write_jsonl(out_batch, out_path, override=True)
else:
utils.write_jsonl(out_batch, out_path, override=False)
out_batch = []
i += 1
utils.write_jsonl(out_batch, out_path, override=False)
gz_path = str(out_path) + '.gz'
utils.gzip_file(inpath=out_path, outpath=gz_path, delete_old=True)
def main(args):
dataset_dir = pathlib.Path(args.dataset)
if not dataset_dir.exists():
raise FileNotFoundError('dataset not found')
nlp = spacy.load(args.spacy_model)
preprocess_dataset(dataset_dir, nlp)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', required=True, help='dataset directory')
parser.add_argument('--spacy-model', default='en_core_web_sm')
main(parser.parse_args())
| 6,135
| 27.539535
| 77
|
py
|
news-tls
|
news-tls-master/news_tls/datewise.py
|
import random
import datetime
import collections
import numpy as np
from scipy import sparse
from sklearn.preprocessing import normalize
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from news_tls import data, utils, summarizers
random.seed(42)
class DatewiseTimelineGenerator():
def __init__(self,
date_ranker=None,
summarizer=None,
sent_collector=None,
clip_sents=5,
pub_end=2,
key_to_model=None):
self.date_ranker = date_ranker or MentionCountDateRanker()
self.sent_collector = sent_collector or PM_Mean_SentenceCollector(
clip_sents, pub_end)
self.summarizer = summarizer or summarizers.CentroidOpt()
self.key_to_model = key_to_model
def predict(self,
collection,
max_dates=10,
max_summary_sents=1,
ref_tl=None,
input_titles=False,
output_titles=False,
output_body_sents=True):
print('vectorizer...')
vectorizer = TfidfVectorizer(stop_words='english', lowercase=True)
vectorizer.fit([s.raw for a in collection.articles() for s in a.sentences])
print('date ranking...')
ranked_dates = self.date_ranker.rank_dates(collection)
start = collection.start.date()
end = collection.end.date()
ranked_dates = [d for d in ranked_dates if start <= d <= end]
print('candidates & summarization...')
dates_with_sents = self.sent_collector.collect_sents(
ranked_dates,
collection,
vectorizer,
include_titles=input_titles,
)
def sent_filter(sent):
"""
Returns True if sentence is allowed to be in a summary.
"""
lower = sent.raw.lower()
if not any([kw in lower for kw in collection.keywords]):
return False
elif not output_titles and sent.is_title:
return False
elif not output_body_sents and not sent.is_sent:
return False
else:
return True
timeline = []
l = 0
for i, (d, d_sents) in enumerate(dates_with_sents):
if l >= max_dates:
break
summary = self.summarizer.summarize(
d_sents,
k=max_summary_sents,
vectorizer=vectorizer,
filter=sent_filter
)
if summary:
time = datetime.datetime(d.year, d.month, d.day)
timeline.append((time, summary))
l += 1
timeline.sort(key=lambda x: x[0])
return data.Timeline(timeline)
def load(self, ignored_topics):
key = ' '.join(sorted(ignored_topics))
if self.key_to_model:
self.date_ranker.model = self.key_to_model[key]
################################ DATE RANKING ##################################
class DateRanker:
def rank_dates(self, collection, date_buckets):
raise NotImplementedError
class RandomDateRanker(DateRanker):
def rank_dates(self, collection):
dates = [a.time.date() for a in collection.articles()]
random.shuffle(dates)
return dates
class MentionCountDateRanker(DateRanker):
def rank_dates(self, collection):
date_to_count = collections.defaultdict(int)
for a in collection.articles():
for s in a.sentences:
d = s.get_date()
if d:
date_to_count[d] += 1
ranked = sorted(date_to_count.items(), key=lambda x: x[1], reverse=True)
return [d for d, _ in ranked]
class PubCountDateRanker(DateRanker):
def rank_dates(self, collection):
dates = [a.time.date() for a in collection.articles()]
counts = collections.Counter(dates)
ranked = sorted(counts.items(), key=lambda x: x[1], reverse=True)
return [d for d, _ in ranked]
class SupervisedDateRanker(DateRanker):
def __init__(self, model=None, method='classification'):
self.model = model
self.method = method
if method not in ['classification', 'regression']:
raise ValueError('method must be classification or regression')
def rank_dates(self, collection):
dates, X = self.extract_features(collection)
X = normalize(X, norm='l2', axis=0)
if self.method == 'classification':
Y = [y[1] for y in self.model['model'].predict_proba(X)]
else:
Y = self.model['model'].predict(X)
scored = sorted(zip(dates, Y), key=lambda x: x[1], reverse=True)
ranked = [x[0] for x in scored]
# for d, score in scored[:16]:
# print(d, score)
return ranked
def extract_features(self, collection):
date_to_stats = self.extract_date_statistics(collection)
dates = sorted(date_to_stats)
X = []
for d in dates:
feats = [
date_to_stats[d]['sents_total'],
date_to_stats[d]['sents_before'],
date_to_stats[d]['sents_after'],
date_to_stats[d]['docs_total'],
date_to_stats[d]['docs_before'],
date_to_stats[d]['docs_after'],
date_to_stats[d]['docs_published'],
]
X.append(np.array(feats))
X = np.array(X)
return dates, X
def extract_date_statistics(self, collection):
default = lambda: {
'sents_total': 0,
'sents_same_day': 0,
'sents_before': 0,
'sents_after': 0,
'docs_total': 0,
'docs_same_day': 0,
'docs_before': 0,
'docs_after': 0,
'docs_published': 0
}
date_to_feats = collections.defaultdict(default)
for a in collection.articles():
pub_date = a.time.date()
mentioned_dates = []
for s in a.sentences:
if s.time and s.time_level == 'd':
d = s.time.date()
date_to_feats[d]['sents_total'] += 1
if d < pub_date:
date_to_feats[d]['sents_before'] += 1
elif d > pub_date:
date_to_feats[d]['sents_after'] += 1
else:
date_to_feats[d]['sents_same_day'] += 1
mentioned_dates.append(d)
for d in sorted(set(mentioned_dates)):
date_to_feats[d]['docs_total'] += 1
if d < pub_date:
date_to_feats[d]['docs_before'] += 1
elif d > pub_date:
date_to_feats[d]['docs_after'] += 1
else:
date_to_feats[d]['docs_same_day'] += 1
return date_to_feats
############################## CANDIDATE SELECTION #############################
class M_SentenceCollector:
def collect_sents(self, ranked_dates, collection, vectorizer, include_titles):
date_to_ment = collections.defaultdict(list)
for a in collection.articles():
for s in a.sentences:
ment_date = s.get_date()
if ment_date:
date_to_ment[ment_date].append(s)
for d in ranked_dates:
if d in date_to_ment:
d_sents = date_to_ment[d]
if d_sents:
yield (d, d_sents)
class P_SentenceCollector:
def __init__(self, clip_sents=5, pub_end=2):
self.clip_sents = clip_sents
self.pub_end = pub_end
def collect_sents(self, ranked_dates, collection, vectorizer, include_titles):
date_to_pub = collections.defaultdict(list)
for a in collection.articles():
pub_date = a.time.date()
if include_titles:
for k in range(self.pub_end):
pub_date2 = pub_date - datetime.timedelta(days=k)
if a.title_sentence:
date_to_pub[pub_date2].append(a.title_sentence)
for s in a.sentences[:self.clip_sents]:
for k in range(self.pub_end):
pub_date2 = pub_date - datetime.timedelta(days=k)
date_to_pub[pub_date2].append(s)
for d in ranked_dates:
if d in date_to_pub:
d_sents = date_to_pub[d]
if d_sents:
yield (d, d_sents)
class PM_All_SentenceCollector:
def __init__(self, clip_sents=5, pub_end=2):
self.clip_sents = clip_sents
self.pub_end = pub_end
def collect_sents(self, ranked_dates, collection, vectorizer, include_titles):
date_to_sents = collections.defaultdict(list)
for a in collection.articles():
pub_date = a.time.date()
if include_titles:
for k in range(self.pub_end):
pub_date2 = pub_date - datetime.timedelta(days=k)
if a.title_sentence:
date_to_sents[pub_date2].append(a.title_sentence)
for j, s in enumerate(a.sentences):
ment_date = s.get_date()
if ment_date:
date_to_sents[ment_date].append(s)
elif j <= self.clip_sents:
for k in range(self.pub_end):
pub_date2 = pub_date - datetime.timedelta(days=k)
date_to_sents[pub_date2].append(s)
for d in ranked_dates:
if d in date_to_sents:
d_sents = date_to_sents[d]
if d_sents:
yield (d, d_sents)
class PM_Mean_SentenceCollector:
def __init__(self, clip_sents=5, pub_end=2):
self.clip_sents = clip_sents
self.pub_end = pub_end
def collect_sents(self, ranked_dates, collection, vectorizer, include_titles):
date_to_pub, date_to_ment = self._first_pass(
collection, include_titles)
for d, sents in self._second_pass(
ranked_dates, date_to_pub, date_to_ment, vectorizer):
yield d, sents
def _first_pass(self, collection, include_titles):
date_to_ment = collections.defaultdict(list)
date_to_pub = collections.defaultdict(list)
for a in collection.articles():
pub_date = a.time.date()
if include_titles:
for k in range(self.pub_end):
pub_date2 = pub_date - datetime.timedelta(days=k)
if a.title_sentence:
date_to_pub[pub_date2].append(a.title_sentence)
for j, s in enumerate(a.sentences):
ment_date = s.get_date()
if ment_date:
date_to_ment[ment_date].append(s)
elif j <= self.clip_sents:
for k in range(self.pub_end):
pub_date2 = pub_date - datetime.timedelta(days=k)
date_to_pub[pub_date2].append(s)
return date_to_pub, date_to_ment
def _second_pass(self, ranked_dates, date_to_pub, date_to_ment, vectorizer):
for d in ranked_dates:
ment_sents = date_to_ment[d]
pub_sents = date_to_pub[d]
selected_sents = []
if len(ment_sents) > 0 and len(pub_sents) > 0:
X_ment = vectorizer.transform([s.raw for s in ment_sents])
X_pub = vectorizer.transform([s.raw for s in pub_sents])
C_ment = sparse.csr_matrix(X_ment.sum(0))
C_pub = sparse.csr_matrix(X_pub.sum(0))
ment_weight = 1 / len(ment_sents)
pub_weight = 1 / len(pub_sents)
C_mean = (ment_weight * C_ment + pub_weight * C_pub)
_, indices = C_mean.nonzero()
C_date = sparse.lil_matrix(C_ment.shape)
for i in indices:
v_pub = C_pub[0, i]
v_ment = C_ment[0, i]
if v_pub == 0 or v_ment == 0:
C_date[0, i] = 0
else:
C_date[0, i] = pub_weight * v_pub + ment_weight * v_ment
ment_sims = cosine_similarity(C_date, X_ment)[0]
pub_sims = cosine_similarity(C_date, X_pub)[0]
all_sims = np.concatenate([ment_sims, pub_sims])
cut = detect_knee_point(sorted(all_sims, reverse=True))
thresh = all_sims[cut]
for s, sim in zip(ment_sents, ment_sims):
if sim > 0 and sim > thresh:
selected_sents.append(s)
for s, sim in zip(pub_sents, pub_sims):
if sim > 0 and sim > thresh:
selected_sents.append(s)
if len(selected_sents) == 0:
selected_sents = ment_sents + pub_sents
elif len(ment_sents) > 0:
selected_sents = ment_sents
elif len(pub_sents) > 0:
selected_sents = pub_sents
yield d, selected_sents
def detect_knee_point(values):
"""
From:
https://stackoverflow.com/questions/2018178/finding-the-best-trade-off-point-on-a-curve
"""
# get coordinates of all the points
n_points = len(values)
all_coords = np.vstack((range(n_points), values)).T
# get the first point
first_point = all_coords[0]
# get vector between first and last point - this is the line
line_vec = all_coords[-1] - all_coords[0]
line_vec_norm = line_vec / np.sqrt(np.sum(line_vec ** 2))
vec_from_first = all_coords - first_point
scalar_prod = np.sum(
vec_from_first * np.tile(line_vec_norm, (n_points, 1)), axis=1)
vec_from_first_parallel = np.outer(scalar_prod, line_vec_norm)
vec_to_line = vec_from_first - vec_from_first_parallel
# distance to line is the norm of vec_to_line
dist_to_line = np.sqrt(np.sum(vec_to_line ** 2, axis=1))
# knee/elbow is the point with max distance value
best_idx = np.argmax(dist_to_line)
return best_idx
| 14,342
| 36.351563
| 91
|
py
|
news-tls
|
news-tls-master/news_tls/explore_dataset.py
|
import os
import argparse
import collections
from news_tls import utils
from news_tls.data import (Dataset,
truncate_timelines,
get_input_time_span,
get_average_summary_length)
from tilse.data.timelines import Timeline as TilseTimeline
from tilse.data.timelines import GroundTruth as TilseGroundTruth
from pprint import pprint
def explore_dataset(dataset, trunc_timelines, time_span_extension):
for collection in dataset.collections:
print('topic:', collection.name)
ref_timelines = [TilseTimeline(tl.date_to_summaries)
for tl in collection.timelines]
if trunc_timelines:
ref_timelines = truncate_timelines(ref_timelines, collection)
# each collection/topic can have multiple reference timelines
for i, ref_timeline in enumerate(ref_timelines):
ref_dates = sorted(ref_timeline.dates_to_summaries)
# depending on the reference timeline, we set the time range in
# article collection differently
start, end = get_input_time_span(ref_dates, time_span_extension)
collection.start = start
collection.end = end
#utils.plot_date_stats(collection, ref_dates)
l = len(ref_dates)
k = get_average_summary_length(ref_timeline)
print(f'timeline:{i}, k:{k}, l:{l}')
print()
def main(args):
dataset_name = os.path.basename(args.dataset)
dataset = Dataset(args.dataset)
# these are settings we only apply to our new dataset (entities) but not to
# crisis/t17 to keep these comparable to previous work
if dataset_name == 'entities':
explore_dataset(dataset, trunc_timelines=True, time_span_extension=7)
else:
explore_dataset(dataset, trunc_timelines=False, time_span_extension=0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', required=True)
main(parser.parse_args())
| 2,056
| 33.283333
| 79
|
py
|
news-tls
|
news-tls-master/news_tls/utils.py
|
import pickle
import json
import numpy as np
import gzip
import io
import datetime
import codecs
import tarfile
import pandas
import shutil
import os
import collections
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler
def force_mkdir(path):
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)
def dict_to_dense_vector(d, key_to_idx):
x = np.zeros(len(key_to_idx))
for key, i in key_to_idx.items():
x[i] = d[key]
return x
def read_file(path):
with codecs.open(path, 'r', encoding='utf-8', errors='ignore') as f:
text = f.read()
return text
def write_file(s, path):
with open(path, 'w') as f:
f.write(s)
def read_json(path):
text = read_file(path)
return json.loads(text)
def read_jsonl(path):
with open(path) as f:
for line in f:
yield json.loads(line)
def write_jsonl(items, path, batch_size=100, override=True):
if override:
with open(path, 'w'):
pass
batch = []
for i, x in enumerate(items):
if i > 0 and i % batch_size == 0:
with open(path, 'a') as f:
output = '\n'.join(batch) + '\n'
f.write(output)
batch = []
raw = json.dumps(x)
batch.append(raw)
if batch:
with open(path, 'a') as f:
output = '\n'.join(batch) + '\n'
f.write(output)
def write_json(obj, path):
with open(path, 'w') as f:
json.dump(obj, f)
def load_pkl(path):
with open(path, 'rb') as f:
obj = pickle.load(f)
return obj
def dump_pkl(obj, path):
with open(path, 'wb') as f:
pickle.dump(obj, f)
def write_gzip(text, path):
with gzip.open(path, 'wb') as output:
with io.TextIOWrapper(output, encoding='utf-8') as enc:
enc.write(text)
def read_gzip(path):
with gzip.open(path, 'rb') as input_file:
with io.TextIOWrapper(input_file) as dec:
content = dec.read()
return content
def read_jsonl_gz(path):
with gzip.open(path, 'rb') as input_file:
with io.TextIOWrapper(input_file) as dec:
for line in dec:
yield json.loads(line)
def read_tar_gz(path):
contents = []
with tarfile.open(path, 'r:gz') as tar:
for member in tar.getmembers():
f = tar.extractfile(member)
content = f.read()
contents.append(content)
return contents
def read_json_tar_gz(path):
contents = read_tar_gz(path)
raw_data = contents[0]
return json.loads(raw_data, strict=False)
def get_date_range(start, end):
diff = end - start
date_range = []
for n in range(diff.days + 1):
t = start + datetime.timedelta(days=n)
date_range.append(t)
return date_range
def days_between(t1, t2):
return abs((t1 - t2).days)
def any_in(items, target_list):
return any([item in target_list for item in items])
def csr_item_generator(M):
"""Generates tuples (i,j,x) of sparse matrix."""
for row in range(len(M.indptr) - 1):
i,j = M.indptr[row], M.indptr[row + 1]
for k in range(i,j):
yield (row, M.indices[k], M.data[k])
def max_normalize_matrix(A):
try:
max_ = max(A.data)
for i, j, x in csr_item_generator(A):
A[i, j] = x / max_
except:
pass
return A
def gzip_file(inpath, outpath, delete_old=False):
with open(inpath, 'rb') as infile:
with gzip.open(outpath, 'wb') as outfile:
outfile.writelines(infile)
if delete_old:
os.remove(inpath)
def normalise(X, method='standard'):
if method == 'max':
return X / X.max(0)
elif method == 'minmax':
return MinMaxScaler().fit_transform(X)
elif method == 'standard':
return StandardScaler().fit_transform(X)
elif method == 'robust':
return RobustScaler().fit_transform(X)
else:
raise ValueError('normalisation method not known: {}'.format(method))
def normalize_vectors(vector_batches, mode='standard'):
if mode == 'max':
normalize = lambda X: X / X.max(0)
elif mode == 'minmax':
normalize = lambda X: MinMaxScaler().fit_transform(X)
elif mode == 'standard':
normalize = lambda X: StandardScaler().fit_transform(X)
elif mode == 'robust':
normalize = lambda X: RobustScaler().fit_transform(X)
else:
normalize = lambda X: X
norm_vectors = []
for vectors in vector_batches:
X = np.array(vectors)
X_norm = normalize(X)
norm_vectors += list(X_norm)
return norm_vectors
def strip_to_date(t):
return datetime.datetime(t.year, t.month, t.day)
def print_tl(tl):
for t, sents in tl.items:
print('[{}]'.format(t.date()))
for s in sents:
print(' '.join(s.split()))
print('---')
| 4,944
| 22.660287
| 77
|
py
|
news-tls
|
news-tls-master/news_tls/data.py
|
import os
import pathlib
import arrow
import datetime
import string
from spacy.lang.en.stop_words import STOP_WORDS
from collections import defaultdict
from tilse.data.timelines import Timeline as TilseTimeline
from news_tls import utils
from pprint import pprint
PUNCT_SET = set(string.punctuation)
def load_dataset(path):
dataset = Dataset(path)
return dataset
def load_article(article_dict):
sentences = [load_sentence(x) for x in article_dict['sentences']]
if article_dict.get('title_sentence'):
title_sentence = load_sentence(article_dict['title_sentence'])
title_sentence.is_title = True
else:
title_sentence = None
fix_dependency_heads(sentences)
time = arrow.get(article_dict['time']).datetime
time = time.replace(tzinfo=None)
return Article(
article_dict['title'],
article_dict['text'],
time,
article_dict['id'],
sentences,
title_sentence,
)
def load_sentence(sent_dict):
tokens = load_tokens(sent_dict['tokens'])
pub_time = utils.strip_to_date(arrow.get(sent_dict['pub_time']))
time = Sentence.get_time(tokens)
time_level = None
if time:
time = arrow.get(time)
time_format = Sentence.get_time_format(tokens)
time_level = None
if 'd' in time_format:
time = datetime.datetime(time.year, time.month, time.day)
time_level = 'd'
elif ('m' in time_format) or ('y' in time_format):
if 'm' in time_format:
start, end = time.span('month')
time_level = 'm'
else:
start, end = time.span('year')
time_level = 'y'
start = datetime.datetime(start.year, start.month, start.day)
end = datetime.datetime(end.year, end.month, end.day)
time = (start, end)
return Sentence(
sent_dict['raw'],
tokens,
pub_time,
time,
time_level
)
def load_tokens(tokens_dict):
token_dicts = decompress_dict_list(tokens_dict)
tokens = []
for token_ in token_dicts:
token = Token(
token_['raw'],
token_['lemma'],
token_['pos'],
token_['ner_type'],
token_['ner_iob'],
token_['dep'],
token_['head'],
token_['time'],
token_['time_format']
)
tokens.append(token)
return tokens
def fix_dependency_heads(sentences):
"""
Change from document to sentence-level head indices.
"""
i = 0
for s in sentences:
for tok in s.tokens:
tok.head -= i
i += len(s.tokens)
class Token:
def __init__(self, raw, lemma, pos, ner_type, ner_iob, dep, head, time,
time_format):
self.raw = raw
self.lemma = lemma
self.pos = pos
self.ner_type = ner_type
self.ner_iob = ner_iob
self.dep = dep
self.head = head
self.time = time
self.time_format = time_format
def to_dict(self):
time = self.time.isoformat() if self.time else None
return {
'raw': self.raw,
'lemma': self.lemma,
'pos': self.pos,
'ner_type': self.ner_type,
'ner_iob': self.ner_iob,
'dep': self.dep,
'head': self.head,
'time': time,
'time_format': self.time_format
}
class Sentence:
def __init__(self, raw, tokens, pub_time, time, time_level, is_title=False):
self.raw = raw
self.tokens = tokens
self.pub_time = pub_time
self.time = time
self.time_level = time_level
self.is_title = is_title
@staticmethod
def get_time(tokens):
for token in tokens:
if token.time:
return token.time
return None
@staticmethod
def get_time_format(tokens):
for token in tokens:
if token.time_format:
return token.time_format
return None
def get_date(self):
if self.time_level == 'd':
return self.time.date()
else:
return None
def clean_tokens(self):
tokens = [tok.raw.lower() for tok in self.tokens]
tokens = [tok for tok in tokens if
(tok not in STOP_WORDS and tok not in PUNCT_SET)]
return tokens
def _group_entity(self, entity_tokens):
surface_form = ' '.join([tok_.raw for tok_ in entity_tokens])
type = entity_tokens[-1].ner_type
return surface_form, type
def get_entities(self, return_other=False):
entities = []
tmp_entity = []
other = []
for tok in self.tokens:
if tok.ner_iob == 'B':
if len(tmp_entity) > 0:
e = self._group_entity(tmp_entity)
entities.append(e)
tmp_entity = [tok]
elif tok.ner_iob == 'I':
tmp_entity.append(tok)
else:
other.append(tok)
if len(tmp_entity) > 0:
e = self._group_entity(tmp_entity)
entities.append(e)
if return_other:
return entities, other
else:
return entities
def to_dict(self):
if self.time:
time = self.time.isoformat()
else:
time = None
tokens = [tok.to_dict() for tok in self.tokens]
tokens = compress_dict_list(tokens)
return {
'raw': self.raw,
'tokens': tokens,
'time': time,
'pub_time': self.pub_time.isoformat(),
}
class Article:
'''
Stores information about a news article.
'''
def __init__(self,
title,
text,
time,
id,
sentences=None,
title_sentence=None,
vector=None):
self.title = title
self.text = text
self.time = time
self.id = id
self.sentences = sentences
self.title_sentence = title_sentence
self.vector = vector
def to_dict(self):
title_sent_dict = None
if self.title_sentence:
title_sent_dict = self.title_sentence.to_dict()
return {
'title': self.title,
'text': self.text,
'time': str(self.time),
'id': self.id,
'sentences': [s.to_dict() for s in self.sentences],
'title_sentence': title_sent_dict,
'vector': self.vector
}
class Dataset:
def __init__(self, path):
self.path = pathlib.Path(path)
self.topics = self._get_topics()
self.collections = self._load_collections()
def _get_topics(self):
return sorted(os.listdir(self.path))
def _load_collections(self):
collections = []
for topic in self.topics:
topic_path = self.path / topic
c = ArticleCollection(topic_path)
collections.append(c)
return collections
class ArticleCollection:
def __init__(self, path, start=None, end=None):
self.name = os.path.basename(path)
self.path = pathlib.Path(path)
self.keywords = utils.read_json(self.path / 'keywords.json')
self.timelines = self._load_timelines()
self.start = start
self.end = end
def _load_timelines(self):
timelines = []
path = self.path / 'timelines.jsonl'
if not path.exists():
return []
for raw_tl in utils.read_jsonl(path):
if raw_tl:
tl_items = []
for t, s in raw_tl:
t = self.normalise_time(arrow.get(t))
tl_items.append((t, s))
tl = Timeline(tl_items)
timelines.append(tl)
return timelines
def articles(self):
path1 = self.path / 'articles.preprocessed.jsonl'
path2 = self.path / 'articles.preprocessed.jsonl.gz'
if path1.exists():
articles = utils.read_jsonl(path1)
else:
articles = utils.read_jsonl_gz(path2)
for a_ in articles:
a = load_article(a_)
t = self.normalise_time(a.time)
if self.start and t < self.start:
continue
if self.end and t > self.end:
break
yield a
def time_batches(self):
articles = utils.read_jsonl_gz(self.path / 'articles.preprocessed.jsonl.gz')
time = None
batch = []
for a_ in articles:
a = load_article(a_)
a_time = self.normalise_time(a.time)
if self.start and a_time < self.start:
continue
if self.end and a_time > self.end:
break
if time and a_time > time:
yield time, batch
time = a_time
batch = [a]
else:
batch.append(a)
time = a_time
yield time, batch
def times(self):
articles = utils.read_jsonl(self.path / 'articles.preprocessed.jsonl')
times = []
for a in articles:
t = arrow.get(a['time']).datetime
t = t.replace(tzinfo=None)
times.append(t)
return times
def normalise_time(self, t):
return datetime.datetime(t.year, t.month, t.day)
class Timeline:
def __init__(self, items):
self.items = sorted(items, key=lambda x: x[0])
self.time_to_summaries = dict((t, s) for t, s in items)
self.date_to_summaries = dict((t.date(), s) for t, s in items)
self.times = sorted(self.time_to_summaries)
def __getitem__(self, item):
return self.time_to_summaries[item]
def __len__(self):
return len(self.items)
def __str__(self):
lines = []
for t, summary in self.items:
lines.append('[{}]'.format(t.date()))
for sent in summary:
lines.append(sent)
lines.append('-'*50)
return '\n'.join(lines)
def to_dict(self):
items = [(str(t), s) for (t, s) in self.items]
return items
def compress_dict_list(dicts):
keys = sorted(dicts[0].keys())
data = []
for d in dicts:
values = [d[k] for k in keys]
data.append(values)
return {
'keys': keys,
'data': data
}
def decompress_dict_list(x):
dicts = []
keys = x['keys']
for values in x['data']:
d = dict(zip(keys, values))
dicts.append(d)
return dicts
def truncate_timelines(ref_timelines_, collection):
input_dates = [t.date() for t, _ in collection.time_batches()]
input_date_set = set(input_dates)
input_start = min(input_dates)
input_end = max(input_dates)
ref_timelines = []
for tl in ref_timelines_:
dates_to_summaries = tl.dates_to_summaries
new_dates_to_summaries = {}
for d, s in dates_to_summaries.items():
if d >= input_start and d <= input_end:
window_start = d + datetime.timedelta(days=-2)
window_end = d + datetime.timedelta(days=+2)
window = utils.get_date_range(window_start, window_end)
if any([d2 in input_date_set for d2 in window]):
new_dates_to_summaries[d] = s
tl = TilseTimeline(dates_to_summaries)
ref_timelines.append(tl)
return ref_timelines
def get_average_summary_length(ref_tl):
lens = []
for date, summary in ref_tl.dates_to_summaries.items():
lens.append(len(summary))
k = sum(lens) / len(lens)
return round(k)
def get_input_time_span(ref_dates, extension):
ref_start = utils.strip_to_date(min(ref_dates))
ref_end = utils.strip_to_date(max(ref_dates))
input_start = ref_start - datetime.timedelta(days=extension)
input_end = ref_end + datetime.timedelta(days=extension)
return input_start, input_end
| 12,124
| 27.462441
| 84
|
py
|
news-tls
|
news-tls-master/news_tls/clust.py
|
import numpy as np
import datetime
import itertools
import random
import collections
import markov_clustering as mc
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from scipy import sparse
from typing import List
from news_tls import utils, data
class ClusteringTimelineGenerator():
def __init__(self,
clusterer=None,
cluster_ranker=None,
summarizer=None,
clip_sents=5,
key_to_model=None,
unique_dates=True):
self.clusterer = clusterer or TemporalMarkovClusterer()
self.cluster_ranker = cluster_ranker or ClusterDateMentionCountRanker()
self.summarizer = summarizer or summarizers.CentroidOpt()
self.key_to_model = key_to_model
self.unique_dates = unique_dates
self.clip_sents = clip_sents
def predict(self,
collection,
max_dates=10,
max_summary_sents=1,
ref_tl=None,
input_titles=False,
output_titles=False,
output_body_sents=True):
print('clustering articles...')
doc_vectorizer = TfidfVectorizer(lowercase=True, stop_words='english')
clusters = self.clusterer.cluster(collection, doc_vectorizer)
print('assigning cluster times...')
for c in clusters:
c.time = c.most_mentioned_time()
if c.time is None:
c.time = c.earliest_pub_time()
print('ranking clusters...')
ranked_clusters = self.cluster_ranker.rank(clusters, collection)
print('vectorizing sentences...')
raw_sents = [s.raw for a in collection.articles() for s in
a.sentences[:self.clip_sents]]
vectorizer = TfidfVectorizer(lowercase=True, stop_words='english')
vectorizer.fit(raw_sents)
def sent_filter(sent):
"""
Returns True if sentence is allowed to be in a summary.
"""
lower = sent.raw.lower()
if not any([kw in lower for kw in collection.keywords]):
return False
elif not output_titles and sent.is_title:
return False
elif not output_body_sents and not sent.is_sent:
return False
else:
return True
print('summarization...')
sys_l = 0
sys_m = 0
ref_m = max_dates * max_summary_sents
date_to_summary = collections.defaultdict(list)
for c in ranked_clusters:
date = c.time.date()
c_sents = self._select_sents_from_cluster(c)
#print("C", date, len(c_sents), "M", sys_m, "L", sys_l)
summary = self.summarizer.summarize(
c_sents,
k=max_summary_sents,
vectorizer=vectorizer,
filter=sent_filter
)
if summary:
if self.unique_dates and date in date_to_summary:
continue
date_to_summary[date] += summary
sys_m += len(summary)
if self.unique_dates:
sys_l += 1
if sys_m >= ref_m or sys_l >= max_dates:
break
timeline = []
for d, summary in date_to_summary.items():
t = datetime.datetime(d.year, d.month, d.day)
timeline.append((t, summary))
timeline.sort(key=lambda x: x[0])
return data.Timeline(timeline)
def _select_sents_from_cluster(self, cluster):
sents = []
for a in cluster.articles:
pub_d = a.time.date()
for s in a.sentences[:self.clip_sents]:
sents.append(s)
return sents
def load(self, ignored_topics):
pass
################################# CLUSTERING ###################################
class Cluster:
def __init__(self, articles, vectors, centroid, time=None, id=None):
self.articles = sorted(articles, key=lambda x: x.time)
self.centroid = centroid
self.id = id
self.vectors = vectors
self.time = time
def __len__(self):
return len(self.articles)
def pub_times(self):
return [a.time for a in self.articles]
def earliest_pub_time(self):
return min(self.pub_times())
def most_mentioned_time(self):
mentioned_times = []
for a in self.articles:
for s in a.sentences:
if s.time and s.time_level == 'd':
mentioned_times.append(s.time)
if mentioned_times:
return collections.Counter(mentioned_times).most_common()[0][0]
else:
return None
def update_centroid(self):
X = sparse.vstack(self.vectors)
self.centroid = sparse.csr_matrix.mean(X, axis=0)
class Clusterer():
def cluster(self, collection, vectorizer) -> List[Cluster]:
raise NotImplementedError
class OnlineClusterer(Clusterer):
def __init__(self, max_days=1, min_sim=0.5):
self.max_days = max_days
self.min_sim = min_sim
def cluster(self, collection, vectorizer) -> List[Cluster]:
# build article vectors
texts = ['{} {}'.format(a.title, a.text) for a in collection.articles]
try:
X = vectorizer.transform(texts)
except:
X = vectorizer.fit_transform(texts)
id_to_vector = {}
for a, x in zip(collection.articles(), X):
id_to_vector[a.id] = x
online_clusters = []
for t, articles in collection.time_batches():
for a in articles:
# calculate similarity between article and all clusters
x = id_to_vector[a.id]
cluster_sims = []
for c in online_clusters:
if utils.days_between(c.time, t) <= self.max_days:
centroid = c.centroid
sim = cosine_similarity(centroid, x)[0, 0]
cluster_sims.append(sim)
else:
cluster_sims.append(0)
# assign article to most similar cluster (if over threshold)
cluster_found = False
if len(online_clusters) > 0:
i = np.argmax(cluster_sims)
if cluster_sims[i] >= self.min_sim:
c = online_clusters[i]
c.vectors.append(x)
c.articles.append(a)
c.update_centroid()
c.time = t
online_clusters[i] = c
cluster_found = True
# initialize new cluster if no cluster was similar enough
if not cluster_found:
new_cluster = Cluster([a], [x], x, t)
online_clusters.append(new_cluster)
clusters = []
for c in online_clusters:
cluster = Cluster(c.articles, c.vectors)
clusters.append(cluster)
return clusters
class TemporalMarkovClusterer(Clusterer):
def __init__(self, max_days=1):
self.max_days = max_days
def cluster(self, collection, vectorizer) -> List[Cluster]:
articles = list(collection.articles())
texts = ['{} {}'.format(a.title, a.text) for a in articles]
try:
X = vectorizer.transform(texts)
except:
X = vectorizer.fit_transform(texts)
times = [a.time for a in articles]
print('temporal graph...')
S = self.temporal_graph(X, times)
#print('S shape:', S.shape)
print('run markov clustering...')
result = mc.run_mcl(S)
print('done')
idx_clusters = mc.get_clusters(result)
idx_clusters.sort(key=lambda c: len(c), reverse=True)
print(f'times: {len(set(times))} articles: {len(articles)} '
f'clusters: {len(idx_clusters)}')
clusters = []
for c in idx_clusters:
c_vectors = [X[i] for i in c]
c_articles = [articles[i] for i in c]
Xc = sparse.vstack(c_vectors)
centroid = sparse.csr_matrix(Xc.mean(axis=0))
cluster = Cluster(c_articles, c_vectors, centroid=centroid)
clusters.append(cluster)
return clusters
def temporal_graph(self, X, times):
times = [utils.strip_to_date(t) for t in times]
time_to_ixs = collections.defaultdict(list)
for i in range(len(times)):
time_to_ixs[times[i]].append(i)
n_items = X.shape[0]
S = sparse.lil_matrix((n_items, n_items))
start, end = min(times), max(times)
total_days = (end - start).days + 1
for n in range(total_days + 1):
t = start + datetime.timedelta(days=n)
window_size = min(self.max_days + 1, total_days + 1 - n)
window = [t + datetime.timedelta(days=k) for k in range(window_size)]
if n == 0 or len(window) == 1:
indices = [i for t in window for i in time_to_ixs[t]]
if len(indices) == 0:
continue
X_n = sparse.vstack([X[i] for i in indices])
S_n = cosine_similarity(X_n)
n_items = len(indices)
for i_x, i_n in zip(indices, range(n_items)):
for j_x, j_n in zip(indices, range(i_n + 1, n_items)):
S[i_x, j_x] = S_n[i_n, j_n]
else:
# prev is actually prev + new
prev_indices = [i for t in window for i in time_to_ixs[t]]
new_indices = time_to_ixs[window[-1]]
if len(new_indices) == 0:
continue
X_prev = sparse.vstack([X[i] for i in prev_indices])
X_new = sparse.vstack([X[i] for i in new_indices])
S_n = cosine_similarity(X_prev, X_new)
n_prev, n_new = len(prev_indices), len(new_indices)
for i_x, i_n in zip(prev_indices, range(n_prev)):
for j_x, j_n in zip(new_indices, range(n_new)):
S[i_x, j_x] = S_n[i_n, j_n]
return sparse.csr_matrix(S)
############################### CLUSTER RANKING ################################
class ClusterRanker:
def rank(self, clusters, collection, vectorizer):
raise NotImplementedError
class ClusterSizeRanker(ClusterRanker):
def rank(self, clusters, collection=None, vectorizer=None):
return sorted(clusters, key=len, reverse=True)
class ClusterDateMentionCountRanker(ClusterRanker):
def rank(self, clusters, collection=None, vectorizer=None):
date_to_count = collections.defaultdict(int)
for a in collection.articles():
for s in a.sentences:
d = s.get_date()
if d:
date_to_count[d] += 1
clusters = sorted(clusters, reverse=True, key=len)
def get_count(c):
t = c.most_mentioned_time()
if t:
return date_to_count[t.date()]
else:
return 0
clusters = sorted(clusters, reverse=True, key=get_count)
return sorted(clusters, key=len, reverse=True)
#
| 11,459
| 31.464589
| 81
|
py
|
news-tls
|
news-tls-master/news_tls/summarizers.py
|
import networkx as nx
from scipy import sparse
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import normalize
from sklearn.cluster import MiniBatchKMeans
class Summarizer:
def summarize(self, sents, k, vectorizer, filters=None):
raise NotImplementedError
class TextRank(Summarizer):
def __init__(self, max_sim=0.9999):
self.name = 'TextRank Summarizer'
self.max_sim = max_sim
def score_sentences(self, X):
S = cosine_similarity(X)
nodes = list(range(S.shape[0]))
graph = nx.Graph()
graph.add_nodes_from(nodes)
for i in range(S.shape[0]):
for j in range(S.shape[0]):
graph.add_edge(nodes[i], nodes[j], weight=S[i, j])
pagerank = nx.pagerank(graph, weight='weight')
scores = [pagerank[i] for i in nodes]
return scores
def summarize(self, sents, k, vectorizer, filter=None):
raw_sents = [s.raw for s in sents]
try:
X = vectorizer.transform(raw_sents)
except:
return None
scores = self.score_sentences(X)
indices = list(range(len(sents)))
ranked = sorted(zip(indices, scores), key=lambda x: x[1], reverse=True)
summary_sents = []
summary_vectors = []
for i, _ in ranked:
if len(summary_sents) >= k:
break
new_x = X[i]
s = sents[i]
is_redundant = False
for x in summary_vectors:
if cosine_similarity(new_x, x)[0, 0] > self.max_sim:
is_redundant = True
break
if filter and not filter(s):
continue
elif is_redundant:
continue
else:
summary_sents.append(sents[i])
summary_vectors.append(new_x)
summary = [s.raw for s in summary_sents]
return summary
class CentroidRank(Summarizer):
def __init__(self, max_sim=0.9999):
self.name = 'Sentence-Centroid Summarizer'
self.max_sim = max_sim
def score_sentences(self, X):
Xsum = sparse.csr_matrix(X.sum(0))
centroid = normalize(Xsum)
scores = cosine_similarity(X, centroid)
return scores
def summarize(self, sents, k, vectorizer, filter=None):
raw_sents = [s.raw for s in sents]
try:
X = vectorizer.transform(raw_sents)
for i, s in enumerate(sents):
s.vector = X[i]
except:
return None
scores = self.score_sentences(X)
indices = list(range(len(sents)))
ranked = sorted(zip(indices, scores), key=lambda x: x[1], reverse=True)
summary_sents = []
summary_vectors = []
for i, _ in ranked:
if len(summary_sents) >= k:
break
new_x = X[i]
s = sents[i]
is_redundant = False
for x in summary_vectors:
if cosine_similarity(new_x, x)[0, 0] > self.max_sim:
is_redundant = True
break
if filter and not filter(s):
continue
elif is_redundant:
continue
else:
summary_sents.append(sents[i])
summary_vectors.append(new_x)
summary = [s.raw for s in summary_sents]
return summary
class CentroidOpt(Summarizer):
def __init__(self, max_sim=0.9999):
self.name = 'Summary-Centroid Summarizer'
self.max_sim = max_sim
def optimise(self, centroid, X, sents, k, filter):
remaining = set(range(len(sents)))
selected = []
while len(remaining) > 0 and len(selected) < k:
if len(selected) > 0:
summary_vector = sparse.vstack([X[i] for i in selected])
summary_vector = sparse.csr_matrix(summary_vector.sum(0))
i_to_score = {}
for i in remaining:
if len(selected) > 0:
new_x = X[i]
new_summary_vector = sparse.vstack([new_x, summary_vector])
new_summary_vector = normalize(new_summary_vector.sum(0))
else:
new_summary_vector = X[i]
score = cosine_similarity(new_summary_vector, centroid)[0, 0]
i_to_score[i] = score
ranked = sorted(i_to_score.items(), key=lambda x: x[1], reverse=True)
for i, score in ranked:
s = sents[i]
remaining.remove(i)
if filter and not filter(s):
continue
elif self.is_redundant(i, selected, X):
continue
else:
selected.append(i)
break
return selected
def is_redundant(self, new_i, selected, X):
summary_vectors = [X[i] for i in selected]
new_x = X[new_i]
for x in summary_vectors:
if cosine_similarity(new_x, x)[0] > self.max_sim:
return True
return False
def summarize(self, sents, k, vectorizer, filter=None):
raw_sents = [s.raw for s in sents]
try:
X = vectorizer.transform(raw_sents)
except:
return None
X = sparse.csr_matrix(X)
Xsum = sparse.csr_matrix(X.sum(0))
centroid = normalize(Xsum)
selected = self.optimise(centroid, X, sents, k, filter)
summary = [sents[i].raw for i in selected]
return summary
class SubmodularSummarizer(Summarizer):
"""
Selects a combination of sentences as a summary by greedily optimising
a submodular function.
The function models the coverage and diversity of the sentence combination.
"""
def __init__(self, a=5, div_weight=6, cluster_factor=0.2):
self.name = 'Submodular Summarizer'
self.a = a
self.div_weight = div_weight
self.cluster_factor = cluster_factor
def cluster_sentences(self, X):
n = X.shape[0]
n_clusters = round(self.cluster_factor * n)
if n_clusters <= 1 or n <= 2:
return dict((i, 1) for i in range(n))
clusterer = MiniBatchKMeans(n_clusters=n_clusters)
labels = clusterer.fit_predict(X)
i_to_label = dict((i, l) for i, l in enumerate(labels))
return i_to_label
def compute_summary_coverage(self,
alpha,
summary_indices,
sent_coverages,
pairwise_sims):
cov = 0
for i, i_generic_cov in enumerate(sent_coverages):
i_summary_cov = sum([pairwise_sims[i, j] for j in summary_indices])
i_cov = min(i_summary_cov, alpha * i_generic_cov)
cov += i_cov
return cov
def compute_summary_diversity(self,
summary_indices,
ix_to_label,
avg_sent_sims):
cluster_to_ixs = collections.defaultdict(list)
for i in summary_indices:
l = ix_to_label[i]
cluster_to_ixs[l].append(i)
div = 0
for l, l_indices in cluster_to_ixs.items():
cluster_score = sum([avg_sent_sims[i] for i in l_indices])
cluster_score = np.sqrt(cluster_score)
div += cluster_score
return div
def optimise(self,
sents,
k,
filter,
ix_to_label,
pairwise_sims,
sent_coverages,
avg_sent_sims):
alpha = self.a / len(sents)
remaining = set(range(len(sents)))
selected = []
while len(remaining) > 0 and len(selected) < k:
i_to_score = {}
for i in remaining:
summary_indices = selected + [i]
cov = self.compute_summary_coverage(
alpha, summary_indices, sent_coverages, pairwise_sims)
div = self.compute_summary_diversity(
summary_indices, ix_to_label, avg_sent_sims)
score = cov + self.div_weight * div
i_to_score[i] = score
ranked = sorted(i_to_score.items(), key=lambda x: x[1], reverse=True)
for i, score in ranked:
s = sents[i]
remaining.remove(i)
if filter and not filter(s):
continue
else:
selected.append(i)
break
return selected
def summarize(self, sents, k, vectorizer, filter=None):
raw_sents = [s.raw for s in sents]
try:
X = vectorizer.transform(raw_sents)
except:
return None
ix_to_label = self.cluster_sentences(X)
pairwise_sims = cosine_similarity(X)
sent_coverages = pairwise_sims.sum(0)
avg_sent_sims = sent_coverages / len(sents)
selected = self.optimise(
sents, k, filter, ix_to_label,
pairwise_sims, sent_coverages, avg_sent_sims
)
summary = [sents[i].raw for i in selected]
return summary
| 9,357
| 32.184397
| 81
|
py
|
client
|
client-master/setup.py
|
from setuptools import setup
from setuptools import find_packages
setup(
name='bugswarm-client',
version='0.1.8',
url='https://github.com/BugSwarm/client',
author='BugSwarm',
author_email='dev.bugswarm@gmail.com',
description='The official command line client for the BugSwarm artifact dataset',
long_description='The official command line client for the BugSwarm artifact dataset',
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: BSD License',
],
zip_safe=False,
packages=find_packages(),
namespace_packages=[
'bugswarm',
],
install_requires=[
'Click==6.7',
'requests>=2.20.0',
'bugswarm-common==2022.12.3',
],
entry_points={
'console_scripts': [
'bugswarm = bugswarm.client.bugswarm:cli',
],
},
)
| 879
| 24.882353
| 90
|
py
|
client
|
client-master/bugswarm/__init__.py
|
__import__('pkg_resources').declare_namespace(__name__)
| 56
| 27.5
| 55
|
py
|
client
|
client-master/bugswarm/client/bugswarm.py
|
import json
import logging
import os
import click
from bugswarm.common import log
from bugswarm.common.rest_api.database_api import DatabaseAPI
from . import docker
from .command import MyCommand
@click.group()
@click.version_option(message='The BugSwarm Client, version %(version)s')
def cli():
"""A command line interface for the BugSwarm dataset."""
# Configure logging.
log.config_logging(getattr(logging, 'INFO', None))
@cli.command(cls=MyCommand)
@click.option('--image-tag', required=True,
type=str,
help='The artifact image tag.')
@click.option('--use-sandbox/--no-use-sandbox', default=False,
help='Whether to set up a directory that is shared by the host and container.')
@click.option('--pipe-stdin/--no-pipe-stdin', default=False,
help='If enabled, the contents of stdin are executed inside the container. '
'This option supports heredocs in shells that support them. '
'Disabled by default.')
@click.option('--rm/--no-rm', default=True,
help='If enabled, artifact containers will be cleaned up automatically after use. '
'Disable this behavior if you want to inspect the container filesystem after use. '
'Enabled by default.')
def run(image_tag, use_sandbox, pipe_stdin, rm):
"""Start an artifact container."""
# If the script does not already have sudo privileges, then explain to the user why the password prompt will appear.
if os.getuid() != 0:
log.info('Docker requires sudo privileges.')
docker.docker_run(image_tag, use_sandbox, pipe_stdin, rm)
@cli.command(cls=MyCommand)
@click.option('--image-tag', required=True,
type=str,
help='The artifact image tag.')
@click.option('--token', required=True,
type=str,
help='An authentication token for the BugSwarm database. '
'Please visit www.bugswarm.org/get-full-access for more information.')
def show(image_tag, token):
"""Display artifact metadata."""
token = token or ''
bugswarmapi = DatabaseAPI(token=token)
response = bugswarmapi.find_artifact(image_tag, error_if_not_found=False)
if not response.ok:
log.info('No artifact metadata found for image tag {}.'.format(image_tag))
else:
artifact = response.json()
# Print without the INFO prefix so the output is easier to parse.
print(json.dumps(artifact, sort_keys=True, indent=4))
| 2,524
| 39.079365
| 120
|
py
|
client
|
client-master/bugswarm/client/docker.py
|
import os
import subprocess
import sys
from bugswarm.common import log
from bugswarm.common.shell_wrapper import ShellWrapper
import bugswarm.common.credentials as credentials
SCRIPT_DEFAULT = '/bin/bash'
HOST_SANDBOX_DEFAULT = '~/bugswarm-sandbox'
CONTAINER_SANDBOX_DEFAULT = '/bugswarm-sandbox'
if hasattr(credentials, 'DOCKER_HUB_REPO') and credentials.DOCKER_HUB_REPO != '#':
DOCKER_HUB_REPO = credentials.DOCKER_HUB_REPO
else:
DOCKER_HUB_REPO = 'bugswarm/images'
if hasattr(credentials, 'DOCKER_HUB_CACHED_REPO') and credentials.DOCKER_HUB_CACHED_REPO != '#':
DOCKER_HUB_CACHED_REPO = credentials.DOCKER_HUB_CACHED_REPO
else:
DOCKER_HUB_CACHED_REPO = 'bugswarm/cached-images'
# By default, this function downloads the image, enters the container, and executes '/bin/bash' in the container.
# The executed script can be changed by passing the script argument.
def docker_run(image_tag, use_sandbox, use_pipe_stdin, use_rm):
assert isinstance(image_tag, str) and not image_tag.isspace()
assert isinstance(use_sandbox, bool)
assert isinstance(use_pipe_stdin, bool)
assert isinstance(use_rm, bool)
# First, try to pull the image.
ok, image_location = docker_pull(image_tag)
if not ok:
return False
# Communicate progress to the user.
host_sandbox = _default_host_sandbox()
container_sandbox = CONTAINER_SANDBOX_DEFAULT
if use_sandbox:
if not os.path.exists(host_sandbox):
log.info('Creating', host_sandbox, 'as the host sandbox.')
os.makedirs(host_sandbox, exist_ok=True)
log.info('Binding host sandbox', host_sandbox, 'to container directory', container_sandbox)
# Communicate progress to the user.
if use_pipe_stdin:
log.info('Entering the container and executing the contents of stdin inside the container.')
else:
log.info('Entering the container.')
if use_rm:
log.info('The container will be cleaned up after use.')
# Prepare the arguments for the docker run command.
volume_args = ['-v', '{}:{}'.format(host_sandbox, container_sandbox)] if use_sandbox else []
# The -t option must not be used in order to use a heredoc.
input_args = ['-i'] if use_pipe_stdin else ['-i', '-t']
subprocess_input = sys.stdin.read() if use_pipe_stdin else None
subprocess_universal_newlines = use_pipe_stdin
rm_args = ['--rm'] if use_rm else []
# If we're using a shared directory, we need to modify the start script to change the permissions of the shared
# directory on the container side. However, this will also change the permissions on the host side.
script_args = [SCRIPT_DEFAULT]
if use_sandbox:
start_command = '"sudo chmod -R 777 {} && cd {} && umask 000 && cd .. && {}"'.format(
container_sandbox, container_sandbox, SCRIPT_DEFAULT)
# These arguments represent a command of the following form:
# /bin/bash -c "sudo chmod 777 <container_sandbox> && cd <container_sandbox> && umask 000 && /bin/bash"
# So bash will execute chmod and umask and then start a new bash shell. From the user's perspective, the chmod
# and umask commands happen transparently. That is, the user only sees the final new bash shell.
script_args = [SCRIPT_DEFAULT, '-c', start_command]
# Try to run the image.
# The tail arguments must be at the end of the command.
tail_args = [image_location] + script_args
args = ['sudo', 'docker', 'run', '--privileged'] + rm_args + volume_args + input_args + tail_args
command = ' '.join(args)
print(command)
_, _, returncode = ShellWrapper.run_commands(command,
input=subprocess_input,
universal_newlines=subprocess_universal_newlines,
shell=True)
return returncode == 0
def docker_pull(image_tag):
assert image_tag
assert isinstance(image_tag, str)
# Exit early if the image already exists locally.
exists, image_location = _image_exists_locally(image_tag)
if exists:
return True, image_location
image_location = _image_location(image_tag)
command = 'sudo docker pull {}'.format(image_location)
_, _, returncode = ShellWrapper.run_commands(command, shell=True)
if returncode != 0:
# Image is not cached. Attempt to pull from bugswarm/images.
image_location = '{}:{}'.format(DOCKER_HUB_REPO, image_tag)
command = 'sudo docker pull {}'.format(image_location)
_, _, returncode = ShellWrapper.run_commands(command, shell=True)
if returncode != 0:
# Image is not in bugswarm/images
log.error('Could not download the image', image_location)
else:
log.info('Downloaded the image', image_location + '.')
else:
log.info('Downloaded the image', image_location + '.')
return returncode == 0, image_location
# Returns True and image_location if the image already exists locally.
def _docker_image_inspect(image_tag):
image_location = _image_location(image_tag)
command = 'sudo docker image inspect {}'.format(image_location)
_, _, returncode = ShellWrapper.run_commands(command,
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True)
# For a non-existent image, docker image inspect has a non-zero exit status.
if returncode != 0:
image_location = '{}:{}'.format(DOCKER_HUB_REPO, image_tag)
command = 'sudo docker image inspect {}'.format(image_location)
_, _, returncode = ShellWrapper.run_commands(command,
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True)
if returncode == 0:
log.info('The image', image_location, 'already exists locally and is up to date.')
else:
log.info('The image', image_location, 'already exists locally and is up to date.')
return returncode == 0, image_location
# Returns True and image_location if the image already exists locally.
def _image_exists_locally(image_tag):
return _docker_image_inspect(image_tag)
def _image_location(image_tag):
assert image_tag
assert isinstance(image_tag, str)
return DOCKER_HUB_CACHED_REPO + ':' + image_tag
def _default_host_sandbox():
return os.path.expanduser(HOST_SANDBOX_DEFAULT)
| 6,471
| 43.634483
| 118
|
py
|
client
|
client-master/bugswarm/client/command.py
|
from bugswarm.common import outdated
from click import Command
class MyCommand(Command):
"""
A subclass of Click's Command class that checks if the client is outdated after invoking the command.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def invoke(self, ctx):
try:
super().invoke(ctx)
finally:
# Ask users to consider updating if a newer version of the client is available.
outdated.check_package_outdated('bugswarm-client')
| 541
| 29.111111
| 105
|
py
|
client
|
client-master/bugswarm/client/__init__.py
| 0
| 0
| 0
|
py
|
|
DialogID
|
DialogID-main/atc_adt_train.py
|
import sys
sys.path.append('src/auto_text_classifier')
import os
from atc.models.aml import AML
# 3、选择数据
train_path = "data/train.csv"
dev_path = "data/dev.csv"
test_path = "data/test.csv"
# 4、训练模型
config = dict()
config['num_labels'] = 9
config['epochs'] = 100
config['batch_size'] = 64
config['max_len'] = 128
config['lr'] = 0.00001
config['adt_type'] = "fgm"
config['adt_emb_name'] = 'emb'
for adt_eps in [0.5,1]:
config['adt_epsilon'] = adt_eps
model_list = ['roberta']
save_dir = f"output/fgm/adt_eps={adt_eps}"
model = AML(save_dir=save_dir, config=config)
df_report = model.train(train_path, dev_path, test_path, model_list=model_list)
save_name = "-".join(model_list)
df_report.to_csv(os.path.join(save_dir,f'result_{save_name}.csv'), index=True)
| 788
| 25.3
| 84
|
py
|
DialogID
|
DialogID-main/atc_train.py
|
import sys
sys.path.append('src/auto_text_classifier')
import os
from atc.models.aml import AML
train_path = "data/train.csv"
dev_path = "data/dev.csv"
test_path = "data/test.csv"
config = dict()
config['num_labels'] = 9
config['epochs'] = 100
config['batch_size'] = 64
config['max_len'] = 128
model_list = ['electra_base','xlnet_base','bert_base','macbert_base']
save_dir = "output/raw"
model = AML(save_dir=save_dir, config=config)
df_report = model.train(train_path, dev_path, test_path, model_list=model_list) # model_list 可以参考“支持的模型”,不填则使用全部的模型。
df_report.to_csv(os.path.join(save_dir,'result_{}.csv'.format("-".join(model_list))), index=True)
| 658
| 24.346154
| 117
|
py
|
DialogID
|
DialogID-main/src/auto_text_classifier/atc/__init__.py
|
from __future__ import absolute_import
__version__ = '0.0.1'
| 66
| 7.375
| 38
|
py
|
DialogID
|
DialogID-main/src/auto_text_classifier/atc/models/hf_base.py
|
import torch
import torch.nn.functional as F
import torch.nn as nn
import os
import copy
import numpy as np
import pandas as pd
import random
import datetime
from tqdm import tqdm, trange
from transformers import BertConfig
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from transformers.data.data_collator import default_data_collator
from keras.preprocessing.sequence import pad_sequences
from transformers import BertForSequenceClassification, BertModel, BertTokenizer, AutoTokenizer, AutoModelForSequenceClassification
from transformers import AdamW
from transformers import get_linear_schedule_with_warmup
from atc.utils.data_utils import init_dir
from atc.models.base_model import BaseModel
from atc.utils.metrics_utils import get_model_metrics
from atc.utils.data_utils import load_df, load_df_1
from transformers import AutoConfig
from atc.utils.adt_utils import *
from atc.utils.data_utils import DFDataset
import gc
import time
import sys
try:
from apex import amp # noqa: F401
_has_apex = True
except ImportError:
_has_apex = False
def is_apex_available():
return _has_apex
def get_model_report(preds, labels, num_labels, multi_label=False):
# 多标签
if multi_label:
pred_list_01 = HFBase.transfer_01(preds)
correct_num = 0
for i in range(len(pred_list_01)):
if sum(pred_list_01[i] == labels[i]) == num_labels:
correct_num += 1
acc = correct_num / len(labels)
return {"Accuracy": acc}
#
if num_labels != 2:
# 多分类
pred_flat = np.argmax(preds, axis=1)
acc = np.sum(pred_flat == labels) / len(labels)
return {"Accuracy": acc}
else:
# 二分类
y_pred = preds[:, 1]
return get_model_metrics(y_true=labels, y_pred=y_pred)
def format_time(elapsed):
'''
Takes a time in seconds and returns a string hh:mm:ss
'''
# Round to the nearest second.
elapsed_rounded = int(round((elapsed)))
# Format as hh:mm:ss
return str(datetime.timedelta(seconds=elapsed_rounded))
class HFBase(BaseModel):
def __init__(self, config):
super().__init__(config)
self.model = None
self.tokenizer = self.get_tokenizer()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.model_path = self.save_dir
self.config = config
self.adt_emb_name = config.get("adt_emb_name","emb")
self.adt_epsilon = config.get("adt_epsilon",1)
#
if self.pos_weight:
self.pos_counts_dict = self.get_pos_count() # 训练集中各个类别的样本数
self.pos_weight = self.get_pos_weight(self.pos_counts_dict) # loss中各个类别的权重
else:
self.pos_weight = None
#
if self.focal_loss == 1:
self._loss_fun = FocalLoss(logits=True, multilabel=self.multi_label)
print("Training use focal loss ~~")
elif self.supcon_loss == 1:
self._loss_fun = SupConLoss(config["num_labels"])
print("Training use supcon loss ~~")
elif self.triplet_loss == 1:
self._loss_fun = TripletLoss()
print("Training use triplet loss ~~")
elif self.multi_label:
self._loss_fun = nn.BCEWithLogitsLoss(reduction="sum", pos_weight=self.pos_weight)
print("Training use BCEWithLogitsLoss ~~, weight {}".format(self.pos_weight))
else:
self._loss_fun = nn.CrossEntropyLoss(weight=self.pos_weight) # 默认损失函数未交叉熵损失,不配置权重
print("Training use origin loss ~~, weight {}".format(self.pos_weight))
#
self.model_to_save = None
def get_pos_count(self):
"""
计算训练集中每个类别的数量
"""
df_tmp = pd.read_csv(self.train_dir)
label_count_dict = dict() # {label: count}
if "label_index" in df_tmp.columns.tolist():
multilabel_list = df_tmp["label_index"].tolist()
for label_list in multilabel_list:
label_list = eval(label_list)
for label in label_list:
if label in label_count_dict:
label_count_dict[label] += 1
else:
label_count_dict[label] = 1
else:
label_list = df_tmp["label"].tolist()
for label in label_list:
if label in label_count_dict:
label_count_dict[label] += 1
else:
label_count_dict[label] = 1
#
return label_count_dict
def get_pos_weight(self, pos_counts_dict):
"""
计算loss中各个类别的权重
weight[i] = min(counts) / counts[i] 最少样本的类别权重为1,其余类别样本越多权重越低
"""
pos_counts_list = [0] * len(pos_counts_dict)
for index, count in pos_counts_dict.items():
pos_counts_list[index] = count
pos_weight = [max(pos_counts_list) / count for count in pos_counts_list]
#
return torch.Tensor(pos_weight).to(self.device)
def get_tokenizer(self):
raise NotImplementedError
def get_data_generator(self, data, shuffle=False, num_workers=1):
data = load_df_1(data)
dataset = DFDataset(data,
tokenizer=self.tokenizer,
max_len=self.max_len,
multi_label=self.multi_label,
num_labels=self.num_labels)
data_dataloader = torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,
shuffle=shuffle,
collate_fn=default_data_collator,
batch_size=self.batch_size,
)
return data_dataloader
def get_inputs(self, batch):
for k, v in batch.items():
if isinstance(v, torch.Tensor):
batch[k] = v.to(self.device)
return batch
def process_data(self, train_path, dev_path, test_path):
train_generator = self.get_data_generator(train_path, shuffle=True)
dev_generator = self.get_data_generator(dev_path)
test_generator = self.get_data_generator(test_path)
return train_generator, dev_generator, test_generator
def init_model(self):
print("HFBase init")
try:
self.model = AutoModelForSequenceClassification.from_pretrained(self.model_dir,
num_labels=self.num_labels)
#output_hidden_states=True)
except:
config = self.get_config()
self.model = AutoModelForSequenceClassification.from_pretrained(self.model_dir,
config=config)
#output_hidden_states=True)
def train(self, train_path, dev_path, test_path):
self.set_seed(self.seed) # 为了可复现
train_generator, dev_generator, test_generator = self.process_data(
train_path, dev_path, test_path)
self.init_model()
self.model = self.model.to(self.device)
self.optimizer = AdamW(self.model.parameters(),
lr=self.lr, # args.learning_rate - default is 5e-5, our notebook had 2e-5
# args.adam_epsilon - default is 1e-8.
eps=1e-8
)
if not self.fp16 is None:
if not is_apex_available():
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
self.model, self.optimizer = amp.initialize(self.model, self.optimizer, opt_level=self.fp16)
print("train model use fp16")
self.train_model(train_generator,
dev_generator, test_generator)
## load best model
self.load_model(self.save_dir)
return self.evaluate(test_path)
def get_sentence_embedding(self, text):
"""
使用当前模型预测text,获得embedding
"""
pass
def get_label_attention_sentence_embedding(self):
"""
使用模型同时对sentence和label进行预测,求word的attention再合并成sentence embedding
https://arxiv.org/pdf/1805.04174.pdf
"""
pass
def load_model(self, model_path):
# 有一些模型必须要指定num_labels,例如bart,但是有一些模型有没有这个参数,因此这里首先判断。和init_model很类似
try:
self.model = AutoModelForSequenceClassification.from_pretrained(model_path,
num_labels=self.num_labels)
except:
self.model = AutoModelForSequenceClassification.from_pretrained(model_path)
# Copy the model to the GPU.
self.model = self.model.to(self.device)
def _eval_model(self, dataloader, have_label=False):
self.model.eval()
total_loss = 0
pred_list = []
label_list = []
# Predict
batch_num = 0
for batch in tqdm(dataloader):
batch_num += 1
inputs = self.get_inputs(batch)
with torch.no_grad():
# Forward pass, calculate logit predictions
outputs = self.model(**inputs)
total_loss += outputs[0].item()
if self.multi_label:
pred = torch.sigmoid(outputs['logits']).detach().cpu().numpy()
else:
pred = F.softmax(outputs['logits']).detach().cpu().numpy()
pred_list.append(pred)
label_list.append(batch['labels'].detach().cpu().numpy())
#
y_pred = np.concatenate(pred_list)
labels = np.concatenate(label_list)
#
if have_label:
loss = total_loss/batch_num
else:
loss, labels = None, None
#
return loss, y_pred, labels
def demo(self, text, softmax_b=False):
"""
对单条数据进行预测
"""
if text.count("[SEP]") == 1:
text1, text2 = text.split("[SEP]")
else:
text1 = text
text2 = None
#
encoding = self.tokenizer.encode_plus(
text1,
text2,
add_special_tokens=True,
max_length=self.max_len,
padding='max_length',
return_token_type_ids=True,
truncation=True
)
for k, v in encoding.items():
v = torch.Tensor([v]).long()
encoding[k] = v.to(self.device)
#
self.model.eval()
with torch.no_grad():
outputs = self.model(**encoding)
if self.multi_label:
preds = torch.sigmoid(outputs['logits']).detach().cpu().numpy()
else:
preds = F.softmax(outputs['logits']).detach().cpu().numpy()
#
if self.config.get("后处理", False):
preds = self.post_thresholds(preds)
#
if softmax_b:
return preds
#
pred = []
if self.multi_label:
for p in preds.tolist()[0]:
if p >= 0.5:
pred.append(1)
else:
pred.append(0)
else:
pred = preds.argmax()
return pred
@staticmethod
def transfer_01(preds, threshold=0.5):
pred_list = []
if type(preds) != list:
preds = preds.tolist()
#
for p in preds:
pred = []
for val in p:
if val >= threshold:
pred.append(1)
else:
pred.append(0)
#
pred_list.append(np.array(pred))
#
return np.array(pred_list)
def demo_text_list(self, text_list, softmax_b=False):
"""
对多条数据进行预测
"""
df = pd.DataFrame({"text": text_list})
dataloader = self.get_data_generator(df, shuffle=False)
_,preds,_ = self._eval_model(dataloader, have_label=False)
#
# df1 = pd.DataFrame()
# df1["softmax"] = preds.tolist()
model_name = self.model_dir.split("/")[-2]
# df1.to_csv("/data1/sp/jupyter_data/exercise/output/softmax_result_{}_{}.csv".format(model_name, self.date))
#
if self.config.get("后处理", False):
preds = self.post_thresholds(preds)
#
if softmax_b:
return preds
#
pred_list = []
if self.multi_label:
# 多标签
pred_list = self.transfer_01(preds)
else:
if self.num_labels == 2:
# 二分类
pred_list = preds[:, 1]
else:
# 多分类
pred_list = np.argmax(preds, axis=1).flatten()
#
return pred_list
def post_thresholds(self, input_preds):
# 对每个位置,如果这个地方的数字没有大于阈值,则最后一位+1
# 如果大于阈值则还是原来的样子.
# 鼓励,引导, 总结, 寒暄, 笔记,复述,复习,举例,其他
# thresholds = [0] * 9
thresholds = [0.98, 0.98, 0.99, 0.9, 0.95, 0.99, 0.99, 0.99, 0]
preds = input_preds.copy()
origin_pred_class = np.argmax(input_preds, axis=1).flatten()
for idx, i in enumerate(origin_pred_class):
if input_preds[idx, i] < thresholds[i]:
preds[idx, 8] += 0.9
return preds
def labelEncoder(self, y, nClass):
"""
将label转换成one hot矩阵
[3, 4, 1] -> [[0,0,0,1,0], [0,0,0,0,1], [0,1,0,0,0]]
"""
tmp = torch.zeros(size=(y.shape[0], nClass))
for i in range(y.shape[0]):
tmp[i][y[i]] = 1
return tmp.to(self.device)
def k_is_in(self, t, keywords):
for k in keywords:
if k in t:
return True
return False
def train_model(self, train_generator, dev_generator, test_generator):
patience_count = 0
best_eval_score = 0
best_loss = np.inf
epochs = self.epochs
output_dir = self.save_dir
total_steps = len(train_generator) * epochs
# Create the learning rate scheduler.
# It is useful to release gpu momory.
scheduler = get_linear_schedule_with_warmup(self.optimizer,
num_warmup_steps=0, # Default value in run_glue.py
num_training_steps=total_steps)
# if self.adt_type=='fgm':
# fgm = FGM(self.model)
# print("Training use fgm ~~, fgm_epsilon is {}".format(self.fgm_epsilon))
if self.adt_type=='fgm':
fgm = FGM(self.model)
print(f"Training use FGM ~~,self.adt_epsilon is {self.adt_epsilon}")
elif self.adt_type == 'pgd':
pgd = PGD(self.model)
print("Training use PGD ~~")
elif self.adt_type == 'freeat':
freeat = FreeAT(self.model)
print("Training use FreeAT ~~")
elif self.adt_type == 'freelb':
freelb = FreeLB(self.model)
print("Training use FreeLB ~~")
else:
print("Training use none adt ~~")
# Store the average loss after each epoch so we can plot them.
loss_values = []
# For each epoch...
self.set_seed(self.seed) # 为了可复现
step_num = 0
if(self.adt_type == "freeat"):
self.epochs = int(self.epochs / self.K)
for epoch_i in range(0, epochs):
print("")
print(
'======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))
print('Training...')
print("learning rate: {}".format(self.lr))
# Measure how long the training epoch takes.
t_train = time.time()
# Reset the total loss for this epoch.
total_loss = 0
# For each batch of training data...
batch_i = 0
for _, batch in tqdm(enumerate(train_generator)):
batch_i += 1
self.model.train()
step_num += 1
inputs = self.get_inputs(batch)
outputs = self.model(**inputs)
#
logit = outputs[1]
#
loss = self._loss_fun(logit, inputs['labels'])
total_loss += loss
loss.backward() # 反向传播,得到正常的grad
if batch_i % 100 == 0:
print("batch {} loss {} \n".format(batch_i, loss))
sys.stdout.flush()
# Clip the norm of the gradients to 1.0.
# This is to help prevent the "exploding gradients" problem.
# torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
# if self.adt_type=='fgm':
# # 对抗训练
# fgm.attack(epsilon=self.fgm_epsilon) # 在embedding上添加对抗扰动
# outputs = self.model(**inputs)
# loss_adv = outputs[0]
# loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度
# fgm.restore() # 恢复embedding参数
if self.adt_type=='fgm':#使用fgm对抗训练方式
#对抗训练
fgm.attack(self.adt_epsilon, self.adt_emb_name) # 在embedding上添加对抗扰动
outputs = self.model(**inputs)
loss_adv = outputs[0]
loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度
fgm.restore(self.adt_emb_name) # 恢复embedding参数
#梯度下降更新参数
self.optimizer.step()
elif self.adt_type == 'pgd':#使用pgd对抗训练方式
pgd.backup_grad()
# 对抗训练
for t in range(self.K):
pgd.attack(is_first_attack=(t==0)) # 在embedding上添加对抗扰动, first attack时备份param.data
if t != self.K - 1:
self.optimizer.zero_grad()
else:
pgd.restore_grad()
outputs = self.model(**inputs)
loss_adv = outputs[0]
loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度
pgd.restore() # 恢复embedding参数
self.optimizer.step()
elif self.adt_type == 'freeat': # 使用freeat对抗训练方式
# 对抗训练
for t in range(self.K):
freeat.attack(is_first_attack=(t==0)) # 在embedding上添加对抗扰动, first attack时备份param.data
self.optimizer.zero_grad()
# freeat.restore_grad()
outputs = self.model(**inputs)
loss_adv = outputs[0]
loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度
self.optimizer.step()
# freeat.restore() # 恢复embedding参数
elif self.adt_type == 'freelb': # 使用freelb对抗训练方式
freelb.backup_grad()
# 对抗训练
for t in range(self.K):
freelb.attack(is_first_attack=(t==0)) # 在embedding上添加对抗扰动, first attack时备份param.data
# self._optimizer.zero_grad()
outputs = self.model(**inputs)
loss_adv = outputs[0] / self.K
loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度
freelb.restore() # 恢复embedding参数
self.optimizer.step()
else:
self.optimizer.step()
# 梯度下降,更新参数
# self.optimizer.step()
# Update the learning rate.
scheduler.step()
self.model.zero_grad()
# self.eval_steps 个step进行一次效果评估(此处未执行)
if self.eval_steps is not None and self.eval_steps == step_num:
t0 = time.time()
avg_eval_loss, y_pred, labels = self._eval_model(dev_generator, have_label=True)
model_report = get_model_report(y_pred, labels, self.num_labels, self.multi_label)
eval_score = model_report[self.refit] # 选取优化的指标
# if best save self.model
if eval_score > best_eval_score:
best_eval_score = eval_score
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print(" Get best result, saving self.model to %s" % output_dir)
self.model_to_save = self.model.module if hasattr(
self.model, 'module') else self.model
self.model_to_save.save_pretrained(output_dir)
self.tokenizer.save_pretrained(output_dir)
# Report the final accuracy for this validation run.
print(" Validation {}: {:.4f},Loss :{:.4f},best_eval_loss {} is {:.4f}".format(self.refit,
eval_score,
avg_eval_loss,
self.refit,
best_eval_score))
print(" Validation took: {:}".format(
format_time(time.time() - t0)))
step_num = 0 # reset step_num
#
# 每个epoch进行一次效果评估
# Calculate the average loss over the training data.
avg_train_loss = total_loss / len(train_generator)
# Store the loss value for plotting the learning curve.
loss_values.append(avg_train_loss)
print("")
print(" Average training loss: {0:.2f}".format(avg_train_loss))
print(" Training epcoh took: {:}".format(
format_time(time.time() - t_train)))
# ========================================
# Validation
# ========================================
# After the completion of each training epoch, measure our performance on
# our validation set.
print("")
print("Running Validation...")
t0 = time.time()
# do eval
# Put the self.model in evaluation mode--the dropout layers behave differently
# during evaluation.
avg_eval_loss, y_pred, labels = self._eval_model(dev_generator, have_label=True)
model_report = get_model_report(y_pred, labels, self.num_labels, self.multi_label)
eval_score = model_report[self.refit] # 选取优化的指标
# Report the final accuracy for this validation run.
print(" {}: {:.4f},Loss :{:.4f}".format(self.refit,eval_score,avg_eval_loss))
print(" Validation took: {:}".format(
format_time(time.time() - t0)))
# if best save self.model
if eval_score > best_eval_score + 0.001:
patience_count = 0
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print("Get best result, saving self.model to %s" % output_dir)
self.model_to_save = self.model.module if hasattr(
self.model, 'module') else self.model
self.model_to_save.save_pretrained(output_dir)
self.tokenizer.save_pretrained(output_dir)
best_eval_score = eval_score
else:
patience_count = patience_count + 1
if patience_count > self.patience:
print("Epoch {}:early stopping Get best result, {} did not improve from {}".format(
epoch_i + 1,self.refit,best_eval_score))
break
# 学习率衰减
self.lr *= 0.9
#
del self.optimizer
del self.model_to_save
if self.adt_type == 'fgm':
del fgm
def release(self):
# see this issue:https://github.com/huggingface/transformers/issues/1742
print("Release model")
del self.model
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
def load_raw_config(self):
'''获取原始的config'''
config = AutoConfig.from_pretrained(self.model_dir)
return config
def get_config(self):
config = self.load_raw_config()
num_labels = self.num_labels
config_dict = {"num_labels": num_labels,
"id2label": {x: "LABEL_{}".format(x) for x in range(num_labels)},
"label2id": {"LABEL_{}".format(x): x for x in range(num_labels)},
"output_hidden_states": self.config["output_hidden_states"],
"label_text_filepath": self.config["label_text_filepath"],
"max_length": self.max_len,
"model_dir": self.model_dir,
}
for k, v in config_dict.items():
setattr(config, k, v)
return config
| 25,472
| 38.493023
| 131
|
py
|
DialogID
|
DialogID-main/src/auto_text_classifier/atc/models/base_model.py
|
import numpy as np
from atc.utils.data_utils import init_dir, load_df, DataGet
from atc.utils.metrics_utils import get_model_metrics, get_multi_class_report,refit_map
import torch
import random
import os
import pandas as pd
import traceback
from tqdm import tqdm
import time
class BaseModel():
def __init__(self, config):
self.model = None
self.config = config
self.batch_size = int(self.config.get('batch_size', 32))
self.max_len = int(self.config.get('max_len', 128))
self.epochs = int(self.config.get("epochs", 100))
self.patience = int(self.config.get("patience", 5))
#
self.save_dir = self.config.get('save_dir', "")
self.train_dir = self.config.get('train_dir', "")
self.dev_dir = self.config.get('dev_dir', "")
self.test_dir = self.config.get('test_dir', "")
#
self.model_dir = self.config.get('model_dir', "")
self.num_labels = int(self.config.get('num_labels', 2))
self.seed = int(self.config.get('seed', 0))
self.fp16 = self.config.get('fp16', None)
self.token_type_ids_disable = self.config.get(
'token_type_ids_disable', False)
if self.num_labels == 2:
refit = self.config.get('refit', 'acc') # support
self.refit = refit_map[refit]
else:
self.refit = refit_map['acc']
self.adt_type = self.config.get('adt_type',None) # adversarial_training
self.focal_loss = self.config.get('focal_loss', 0)
self.supcon_loss = self.config.get('supcon_loss', 0)
self.triplet_loss = self.config.get('triplet_loss', 0)
self.K = self.config.get('K', 3)
self.fgm_epsilon = self.config.get('fgm_epsilon', 3.5e-5)
self.lr = self.config.get('lr',2e-5)
self.eval_steps = self.config.get("eval_steps", None)
self.multi_label = self.config.get('multi_label', False)
#
self.date = time.strftime("%Y-%m-%d", time.localtime())
#
self.pos_weight = self.config.get('pos_weight', False)
#
# 是否使用模型最顶层token向量的平均embedding替换cls作为
self.mean_top_level_embedding = self.config.get(
'mean_top_level_embedding', False)
#
# 是否使用模型最顶层与label文本进行attention
self.top_level_embedding_attention_with_label = self.config.get(
'top_level_embedding_attention_with_label', False)
#
init_dir(self.save_dir)
def train(self):
"""train model use train_path
Parameters
----------
model_path: model_path
Returns
-------
report:model performance in test
"""
raise NotImplementedError
def load_model(self, model_path):
"""load model from model_path
Parameters
----------
model_path: model_path
Returns
-------
None
"""
raise NotImplementedError
def demo(self, text):
"""demo for one text
Parameters
----------
text: input text
Returns
-------
p:the probability of text
"""
raise NotImplementedError
def demo_text_list(self, text_list):
"""demo input text_list
Parameters
----------
text_list: text_list
Returns
-------
p_list:the probability of all text
"""
raise NotImplementedError
def predict(self, text):
"""
text: str
"""
return self.demo(text)
def predict_list(self, text_list):
return self.demo_text_list(text_list)
def evaluate(self, df, single_sample=False):
df = load_df(df)
y_pred = []
if single_sample:
for text in tqdm(df['text'].tolist()):
y_pred.append(self.demo(text))
else:
y_pred = self.demo_text_list(df['text'].tolist())
#
if self.multi_label:
y_pred = np.array(y_pred)
y_true = [eval(x) for x in df['label'].tolist()]
else:
y_pred = np.array(y_pred)
y_true = np.array(df['label'])
#
if self.num_labels == 2:
report = get_model_metrics(y_true, y_pred)
else:
report = get_multi_class_report(y_true, y_pred)
return report
def release(self):
pass
def set_seed(self, seed=-1):
if seed != -1:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def train_cv(self, df, cv):
df = load_df(df)
data_get = DataGet(df=df,n_splits=cv,random_state=self.seed)
root_dir = self.save_dir
report_list = []
try:
for kf_i in range(cv):
print("Start cv {}/{}".format(kf_i+1,cv))
self.save_dir = os.path.join(root_dir, str(kf_i))
df_train, df_dev, df_test = data_get.get_data(kf_i=kf_i)
report = self.train(df_train, df_dev, df_test)
report['kf_i'] = kf_i
report_list.append(report)
print("Finish cv {}/{}".format(kf_i+1,cv))
self.release()
except Exception as e:
print(traceback.format_exc())
finally:
self.save_dir = root_dir # 避免修改全局变量
return pd.DataFrame(report_list)
def eval_cv(self, df, cv):
df = load_df(df)
root_dir = self.save_dir
try:
kf_name_list = []
for kf_i in range(cv):
print("Start cv {}/{}".format(kf_i+1,cv))
model_dir = os.path.join(root_dir, str(kf_i))
_ = self.load_model(model_dir)
kf_name = 'kf_{}'.format(kf_i)
kf_name_list.append(kf_name)
df[kf_name] = self.predict_list(df['text'].tolist())
self.release()
print("Finish cv {}/{}".format(kf_i+1,cv))
df['kf_avg'] = df[kf_name_list].mean(axis=1)
except Exception as e:
print(traceback.format_exc())
finally:
self.save_dir = root_dir # 避免修改全局变量
return df
| 6,316
| 32.247368
| 87
|
py
|
DialogID
|
DialogID-main/src/auto_text_classifier/atc/models/__init__.py
|
from atc.models.bert.bert import BERT
from atc.models.electra.electra import ELECTRA
from atc.models.roberta.roberta import ROBERTA
from atc.models.xlnet.xlnet import XLNet
from atc.models.macbert.macbert import MacBERT
| 220
| 35.833333
| 46
|
py
|
DialogID
|
DialogID-main/src/auto_text_classifier/atc/models/aml.py
|
import os
import copy
import time
import pandas as pd
import numpy as np
from tqdm import tqdm
from keras.layers import Lambda, Dense
from atc.utils.data_utils import init_dir
from atc.models.base_model import BaseModel
from atc.utils.metrics_utils import get_model_metrics,get_multi_class_report
from atc.utils.data_utils import load_df
from atc.configs.aml_config import model_dict, default_model_list
from atc.utils.data_utils import load_df
import traceback
import json
class AML():
def __init__(self, save_dir, config={}):
self.model_dict = model_dict
self.save_dir = save_dir
self.config = config
self.batch_size = int(self.config.get('batch_size', 32))
self.max_len = int(self.config.get('max_len', 128))
self.epochs = int(self.config.get("epochs",100))
self.patience = int(self.config.get("patience", 5))
self.num_labels = int(self.config.get('num_labels',2))
init_dir(self.save_dir)
def get_model_config(self, model_name):
model_class = copy.deepcopy(self.model_dict[model_name]['model_class'])
config = copy.deepcopy(self.model_dict[model_name]['config'])
return model_class, config
def __evaluate_one_model(self, model, df, model_name, data_set):
df = load_df(df)
# add time
tic = time.time()
y_pred = model.demo_text_list(df['text'].tolist())
toc = time.time()
# cal avg time
avg_time_s = (toc-tic)/df.shape[0]
# get report
#
if model.multi_label:
y_pred = np.array(y_pred)
y_true = [eval(x) for x in df['label'].tolist()]
else:
y_pred = np.array(y_pred)
y_true = np.array(df['label'])
#
if self.num_labels == 2:
report = get_model_metrics(y_true, y_pred)
else:
report = get_multi_class_report(y_true, y_pred)
report['model_name'] = model_name
report['data_set'] = data_set
report['avg_time_s'] = avg_time_s
return report
def __check_model_list(self, model_list):
if len(model_list) == 0:
return default_model_list
for model_name in model_list:
if model_name not in self.model_dict:
raise Exception(
"model:{} is not support now!".format(model_name))
return model_list
def __get_one_model(self, model_name, df_train, df_dev, df_test, train=True):
model_class, config = self.get_model_config(model_name)
config.update(self.config)
config['save_dir'] = os.path.join(self.save_dir, model_name)
print("config is :{}".format(config))
model = model_class(config)
if train:
print('Training...')
print("Start train {}".format(model_name))
_ = model.train(df_train, df_dev, df_test)
print("release after train")
else:
print("Load model")
model.load_model(model.model_path)
print("Load finish")
return model
def __get_report(self, train_path, dev_path, test_path, model_list=[], train=True):
model_list = self.__check_model_list(model_list)
# load data
df_train = load_df(train_path)
df_dev = load_df(dev_path)
df_test = load_df(test_path)
# train or eval all model
self.all_report = []
for model_name in tqdm(model_list):
try:
# get model
model = self.__get_one_model(
model_name, df_train, df_dev, df_test, train=train)
# get dev/test report
dev_report = self.__evaluate_one_model(
model, df_dev, model_name, "dev")
test_report = self.__evaluate_one_model(
model, df_test, model_name, "test")
# append report to list
self.all_report.append(dev_report)
self.all_report.append(test_report)
# release
model.release()
print("model_name:{} eval finish!,dev_report:{},test_report:{}".format(
model_name, dev_report, test_report))
except:
print("model_name:{},fail,detail is {}".format(model_name,traceback.format_exc()))
if self.num_labels == 2:
df_report = pd.DataFrame(self.all_report)
cols = ["Accuracy", "Precision", "Recall",
"F_meansure", "AUC_Value", "avg_time_s"]
df_report_table = df_report.pivot_table(
index=["data_set", "model_name"], values=cols)[cols]
else:
df_report_table = pd.concat(self.all_report)
return df_report_table
def fit(self, train_path, dev_path, test_path, model_list=[]):
"""等价于train()
"""
df_report = self.__get_report(
train_path, dev_path, test_path, model_list=model_list, train=True)
return df_report
def train(self, train_path, dev_path, test_path, model_list=[]):
'''等价于fit()'''
return self.fit(train_path, dev_path, test_path, model_list=model_list)
def evaluate(self, df_path, model_list):
'''在df_path上使用model_list进行评估,返回结果。'''
model_list = self.__check_model_list(model_list)
train = False
df = load_df(df_path)
all_report = []
for model_name in tqdm(model_list):
try:
# get model
model = self.__get_one_model(
model_name, df_train=None, df_dev=None, df_test=None, train=train)
model_report = self.__evaluate_one_model(model, df, model_name, "")
all_report.append(model_report)
# release
model.release()
print("model_name:{} eval finish!,model_report:{}".format(
model_name, model_report))
except:
print("model_name:{},fail,detail is {}".format(model_name,traceback.format_exc()))
if self.num_labels==2:
cols = ["model_name", "Accuracy", "Precision",
"Recall", "F_meansure", "AUC_Value", "avg_time_s"]
df_report = pd.DataFrame(all_report)[cols]
else:
df_report = pd.concat(all_report)
return df_report
def get_list_result(self, df_list, model_list):
'''获取所有模型的输出结果'''
model_list = self.__check_model_list(model_list)
train = False
df_list = [load_df(x) for x in df_list]
for model_name in tqdm(model_list):
# get model
try:
model = self.__get_one_model(
model_name, df_train=None, df_dev=None, df_test=None, train=train)
for df in df_list:
df[model_name] = model.predict_list(df['text'].tolist())
# release
model.release()
except:
print("model_name:{},fail,detail is {}".format(model_name,traceback.format_exc()))
return df_list
def pred_model_list(self, df_list, model_list):
'''输入df list和model list,返回每个模型在每个df中的预测结果'''
return self.get_list_result(df_list,model_list)
| 7,288
| 38.61413
| 98
|
py
|
DialogID
|
DialogID-main/src/auto_text_classifier/atc/models/xlnet/xlnet.py
|
from atc.models.hf_base import HFBase
from transformers import BertForSequenceClassification, BertModel, BertTokenizer,AutoTokenizer,AutoModelForSequenceClassification
from transformers import AdamW
class XLNet(HFBase):
def __init__(self,config):
super().__init__(config)
self.model_name = 'xlnet'
def get_tokenizer(self):
tokenizer = AutoTokenizer.from_pretrained(self.model_dir)
return tokenizer
| 439
| 35.666667
| 129
|
py
|
DialogID
|
DialogID-main/src/auto_text_classifier/atc/models/macbert/macbert.py
|
from atc.models.hf_base import HFBase
from transformers import BertForSequenceClassification, BertModel, BertTokenizer,AutoTokenizer,AutoModelForSequenceClassification
from transformers import AdamW
from transformers import BertConfig
class MacBERT(HFBase):
def __init__(self,config):
super().__init__(config)
self.model_name = 'macbert'
def get_tokenizer(self):
tokenizer = BertTokenizer.from_pretrained(self.model_dir)
return tokenizer
def load_model(self, model_path):
self.model = BertForSequenceClassification.from_pretrained(model_path)
# Copy the model to the GPU.
self.model = self.model.to(self.device)
return self.model
| 716
| 36.736842
| 129
|
py
|
DialogID
|
DialogID-main/src/auto_text_classifier/atc/models/automodel/automodel.py
|
from atc.models.hf_base import HFBase
from transformers import BertForSequenceClassification, BertModel, BertTokenizer,AutoTokenizer,AutoModelForSequenceClassification
from transformers import AdamW
from transformers import BertConfig
class AutoModel(HFBase):
def __init__(self,config):
super().__init__(config)
self.model_name = 'automodel'
def get_tokenizer(self):
try:
tokenizer = AutoTokenizer.from_pretrained(self.model_dir)
except:
tokenizer = BertTokenizer.from_pretrained(self.model_dir)
return tokenizer
def load_model(self, model_path):
if self.config.get('use_bert_type'):
self.model = BertForSequenceClassification.from_pretrained(model_path)
else:
self.model = AutoModelForSequenceClassification.from_pretrained(model_path)
# Copy the model to the GPU.
self.model = self.model.to(self.device)
return self.model
| 974
| 38
| 129
|
py
|
DialogID
|
DialogID-main/src/auto_text_classifier/atc/models/electra/electra.py
|
from atc.models.hf_base import HFBase
from transformers import BertForSequenceClassification, BertModel, BertTokenizer,AutoTokenizer,AutoModelForSequenceClassification
from transformers import AdamW
class ELECTRA(HFBase):
def __init__(self,config):
super().__init__(config)
self.model_name = 'electra'
def get_tokenizer(self):
tokenizer = AutoTokenizer.from_pretrained(self.model_dir)
return tokenizer
| 443
| 36
| 129
|
py
|
DialogID
|
DialogID-main/src/auto_text_classifier/atc/models/roberta/roberta.py
|
from atc.models.hf_base import HFBase
from transformers import BertForSequenceClassification, BertModel, BertTokenizer,AutoTokenizer,AutoModelForSequenceClassification
from transformers import AdamW
from transformers import BertConfig
class ROBERTA(HFBase):
def __init__(self,config):
super().__init__(config)
self.model_name = 'roberta'
def get_tokenizer(self):
try:
tokenizer = AutoTokenizer.from_pretrained(self.model_dir)
except:
tokenizer = BertTokenizer.from_pretrained(self.model_dir)
return tokenizer
def load_model(self, model_path):
if self.config.get('use_bert_type'):
self.model = BertForSequenceClassification.from_pretrained(model_path)
else:
self.model = AutoModelForSequenceClassification.from_pretrained(model_path)
# Copy the model to the GPU.
self.model = self.model.to(self.device)
return self.model
# def load_raw_config(self):
# '''获取原始的config'''
# config = BertConfig.from_pretrained(self.model_dir)
# return config
| 1,124
| 34.15625
| 129
|
py
|
DialogID
|
DialogID-main/src/auto_text_classifier/atc/models/bert/bert.py
|
from atc.models.hf_base import HFBase
from transformers import BertForSequenceClassification, BertModel, BertTokenizer,AutoTokenizer,AutoModelForSequenceClassification
from transformers import AdamW
from transformers import BertConfig
class BERT(HFBase):
def __init__(self,config):
super().__init__(config)
self.model_name = 'bert'
def get_tokenizer(self):
tokenizer = BertTokenizer.from_pretrained(self.model_dir)
return tokenizer
def load_raw_config(self):
'''获取原始的config'''
config = BertConfig.from_pretrained(self.model_dir)
return config
| 613
| 33.111111
| 129
|
py
|
DialogID
|
DialogID-main/src/auto_text_classifier/atc/configs/hf_config.py
|
import os
base_path = os.path.dirname(os.path.realpath(__file__))
from os.path import join
# chinese models
# xlnet base
xlnet_base_dir = join(base_path, '../data/hfl_chinese_xlnet_base')
xlnet_base_config = {"model_dir": xlnet_base_dir,
"save_dir": 'model/xlnet_base'}
# bert base
bert_base_dir = join(base_path, '../data/bert_base_chinese')
bert_base_config = {"model_dir": bert_base_dir,
"save_dir": 'model/bert_base',
"epochs": 100,
}
# chinese-roberta-wwm-ext
chinese_roberta_wwm_ext_dir = join(
base_path, '../data/chinese_roberta_wwm_ext')
chinese_roberta_wwm_ext_config = {"model_dir": chinese_roberta_wwm_ext_dir,
"save_dir": 'model/chinese_roberta_wwm_ext/'}
# chinese_electra_base
hfl_chinese_electra_base_dir = join(
base_path, '../data/hfl_chinese_electra_base_d')
hfl_chinese_electra_base_config = {"model_dir": hfl_chinese_electra_base_dir,
"save_dir": 'model/electra_base/'}
# macbert model
## macbert_base
macbert_base_config = {"model_dir":join(base_path, '../data/hfl_chinese_macbert_base'),
"save_dir":"model/macbert_base"}
| 1,233
| 34.257143
| 87
|
py
|
DialogID
|
DialogID-main/src/auto_text_classifier/atc/configs/aml_config.py
|
from atc.models import *
from atc.configs import *
model_dict = {
"macbert_base": {"model_class": MacBERT, "config": macbert_base_config},
"bert_base": {"model_class": BERT, "config": bert_base_config},
"roberta": {"model_class": ROBERTA, "config": chinese_roberta_wwm_ext_config},
"electra_base": {"model_class": ELECTRA, "config": hfl_chinese_electra_base_config},
"xlnet_base": {"model_class": XLNet, "config": xlnet_base_config},
}
default_model_list = list(model_dict.keys())
| 506
| 30.6875
| 88
|
py
|
DialogID
|
DialogID-main/src/auto_text_classifier/atc/configs/log_config.py
|
import os
import logging
import logging.handlers
S_LOG_FORMAT = "[%(asctime)s - %(filename)s:%(lineno)d - %(levelname)s] %(message)s"
S_LOG_SUFFIX = "%Y-%m-%d_%H-%M-%S.log"
def init_logger(s_log_local_path, b_log_debug=False, mode="day"):
s_log_name = os.path.basename(s_log_local_path)
# 一天一个日志
logger_handler = logging.handlers.TimedRotatingFileHandler(s_log_local_path, 'midnight', 1, 0, encoding="utf-8")
logger_handler.suffix = S_LOG_SUFFIX
logger_handler.setFormatter(logging.Formatter(S_LOG_FORMAT))
run_logger = logging.getLogger(s_log_name)
run_logger.setLevel(logging.INFO)
run_logger.addHandler(logger_handler)
if b_log_debug:
consle_handler = logging.StreamHandler()
consle_handler.setFormatter(logging.Formatter(S_LOG_FORMAT))
run_logger.addHandler(consle_handler)
return run_logger
| 869
| 33.8
| 116
|
py
|
DialogID
|
DialogID-main/src/auto_text_classifier/atc/configs/__init__.py
|
from atc.configs.hf_config import *
| 35
| 35
| 35
|
py
|
DialogID
|
DialogID-main/src/auto_text_classifier/atc/utils/adt_utils.py
|
'''
对抗训练
参考实现
https://fyubang.com/2019/10/15/adversarial-train/
'''
import torch
import numpy as np
from torch.autograd import Variable
# from loguru import logger
class FGM():
def __init__(self, model):
self.model = model
self.backup = {}
def attack(self, epsilon=1., emb_name='emb.'):
'''
对抗攻击
Parameters:
emb_name -- 替换成你模型中embedding的参数名
'''
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
if param.grad is None:
continue
self.backup[name] = param.data.clone()
# print(f"adt emb name is {name}")
# print(f"param.grad is {param.grad}")
norm = torch.norm(param.grad)
if norm != 0:
r_at = epsilon * param.grad / norm
param.data.add_(r_at)
def restore(self, emb_name='emb.'):
'''
Parameters:
emb_name -- 替换成你模型中embedding的参数名
'''
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
if param.grad is None:
continue
assert name in self.backup
param.data = self.backup[name]
self.backup = {}
class PGD():
def __init__(self, model):
self.model = model
self.emb_backup = {}
self.grad_backup = {}
def attack(self, epsilon=1., alpha=0.3, emb_name='embedding', is_first_attack=False):
'''
Parameters:
emb_name -- 替换成你模型中embedding的参数名
'''
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
if is_first_attack:
self.emb_backup[name] = param.data.clone()
norm = torch.norm(param.grad)
if norm != 0:
r_at = alpha * param.grad / norm
param.data.add_(r_at)
param.data = self.project(name, param.data, epsilon)
def restore(self, emb_name='embedding'):
'''
Parameters:
emb_name -- 替换成你模型中embedding的参数名
'''
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
assert name in self.emb_backup
param.data = self.emb_backup[name]
self.emb_backup = {}
def project(self, param_name, param_data, epsilon):
r = param_data - self.emb_backup[param_name]
if torch.norm(r) > epsilon:
r = epsilon * r / torch.norm(r)
return self.emb_backup[param_name] + r
def backup_grad(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
self.grad_backup[name] = param.grad.clone()
def restore_grad(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
param.grad = self.grad_backup[name]
class FreeAT():
def __init__(self, model):
self.model = model
self.emb_backup = {}
self.grad_backup = {}
def attack(self, epsilon=1., alpha=0.3, emb_name='embedding', is_first_attack=False):
'''
Parameters:
emb_name -- 替换成你模型中embedding的参数名
'''
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
if is_first_attack:
self.emb_backup[name] = param.data.clone()
norm = torch.norm(param.grad)
if norm != 0:
r_at = alpha * param.grad / norm
param.data.add_(r_at)
param.data = self.project(name, param.data, epsilon)
def restore(self, emb_name='embedding'):
'''
Parameters:
emb_name -- 替换成你模型中embedding的参数名
'''
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
assert name in self.emb_backup
param.data = self.emb_backup[name]
self.emb_backup = {}
def project(self, param_name, param_data, epsilon):
r = param_data - self.emb_backup[param_name]
if torch.norm(r) > epsilon:
r = epsilon * r / torch.norm(r)
return self.emb_backup[param_name] + r
def backup_grad(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
self.grad_backup[name] = param.grad.clone()
def restore_grad(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
param.grad = self.grad_backup[name]
class FreeLB():
def __init__(self, model):
self.model = model
self.emb_backup = {}
self.grad_backup = {}
def attack(self, epsilon=1., alpha=0.3, emb_name='embedding', is_first_attack=False):
'''
Parameters:
emb_name -- 替换成你模型中embedding的参数名
'''
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
if is_first_attack:
self.emb_backup[name] = param.data.clone()
norm = torch.norm(param.grad)
if norm != 0:
r_at = alpha * param.grad / norm
param.data.add_(r_at)
param.data = self.project(name, param.data, epsilon)
def restore(self, emb_name='embedding'):
'''
Parameters:
emb_name -- 替换成你模型中embedding的参数名
'''
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
assert name in self.emb_backup
param.data = self.emb_backup[name]
self.emb_backup = {}
def project(self, param_name, param_data, epsilon):
r = param_data - self.emb_backup[param_name]
if torch.norm(r) > epsilon:
r = epsilon * r / torch.norm(r)
return self.emb_backup[param_name] + r
def backup_grad(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
self.grad_backup[name] = param.grad.clone()
def restore_grad(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
param.grad = self.grad_backup[name]
| 7,143
| 31.770642
| 89
|
py
|
DialogID
|
DialogID-main/src/auto_text_classifier/atc/utils/data_utils.py
|
import os
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from transformers.data.processors.utils import InputFeatures
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
def init_dir(dir_path):
"""
Create dir if not exists.
Parameters:
dir_path: dir path
Returns:
None
"""
os.makedirs(dir_path,exist_ok=True)
def train_dev_test_split(df, train_size=0.8):
"""
Split data to train,dev,test. Train_size can be int or float in (0,1).
Parameters:
df: df need to split.
train_size: can be int or float in (0,1).
Returns:
df_train: train data
df_dev: dev data
df_test: test data
"""
df = df.sample(frac=1, random_state=0).copy()
if train_size < 1:
train_size = int(train_size*df.shape[0])
num = df.shape[0]
dev_size = (num-train_size)//2
df_train = df[:train_size]
df_dev = df[train_size:dev_size+train_size]
df_test = df[dev_size+train_size:]
return df_train, df_dev, df_test
def split_3_save_data(save_dir,df,train_size=0.8):
"""
Split data to train,dev,test. Than save data to savedir.Train_size can be int or float in (0,1).
Parameters:
save_dir: where to save data
df: df need to split.
train_size: can be int or float in (0,1).
Returns:
df_train: train data
df_dev: dev data
df_test: test data
"""
df_train,df_dev,df_test = train_dev_test_split(df,train_size)
init_dir(save_dir)
df_train.to_csv(os.path.join(save_dir,"train.csv"),index=False)
df_dev.to_csv(os.path.join(save_dir,"dev.csv"),index=False)
df_test.to_csv(os.path.join(save_dir,"test.csv"),index=False)
return df_train, df_dev, df_test
def load_df(path):
"""
load dataframe data, support csv/xlsx/pickle path or df object
Parameters:
path: ccsv/xlsx/pickle path/df object
Returns:
df:df object
"""
df = None
if isinstance(path, str):
for pd_read_fun in [pd.read_csv, pd.read_excel, pd.read_pickle]:
try:
df = pd_read_fun(path)
break
except:
pass
else:
df = path
#df['label'] = df['label'].apply(int)
#df = df.fillna("")
return df
def load_df_1(path):
"""
load dataframe data, support csv/xlsx/pickle path or df object
without any other constraint
Parameters:
path: ccsv/xlsx/pickle path/df object
Returns:
df:df object
"""
if isinstance(path,str):
for pd_read_fun in [pd.read_csv,pd.read_excel,pd.read_pickle]:
try:
df = pd_read_fun(path)
break
except:
pass
else:
df = path
return df
def get_one_data_report(path, name=""):
"""
get report of one data
Parameters:
path: train_path
name: data name
Returns:
df_data_report:df_data_report
"""
df = load_df(path)
report = df['label'].value_counts().to_dict()
report['总量'] = df.shape[0]
report['数据集'] = name
raw_report_norm = df['label'].value_counts(normalize=True).to_dict()
report_norm = {}
for key, value in raw_report_norm.items():
report_norm["{}占比".format(key)] = round(value, 3)
report.update(report_norm)
return report
def get_data_report(train_path, dev_path, test_path):
"""
get report of all data
Parameters:
train_path: train_path
dev_path: dev_path
test_path: test_path
Returns:
df_data_report:df_data_report
"""
all_report = [get_one_data_report(train_path, "train"),
get_one_data_report(dev_path, "dev"),
get_one_data_report(test_path, "test")]
df_data_report = pd.DataFrame(all_report)
all_cols = df_data_report.columns.tolist()
head_cols = ["数据集","总量"]
other_cols = [x for x in all_cols if x not in head_cols]
df_data_report = df_data_report[head_cols+other_cols]
return df_data_report
class DataGet():
'''
实现K折数据读取,模型会返回 df_train, df_dev, df_test
'''
def __init__(self, df, n_splits=5, random_state=5):
self.df = df
self.n_splits = n_splits
self.random_state = random_state
self.df['index_cv'] = range(len(self.df))
ids = self.df['index_cv'].unique()
self.index_col = 'index_cv'
self.all_split_info = self.get_split_info(ids, n_splits)
def get_split_id(self, all_split_info, kf_i):
split_info = all_split_info[kf_i]
train_ids, dev_ids, test_ids = split_info['train_ids'], split_info['dev_ids'], split_info['test_ids']
return train_ids, dev_ids, test_ids
def get_split_info(self, ids, n_splits=5):
kf = KFold(n_splits=n_splits, shuffle=True, random_state=self.random_state)
split_info = {}
for kf_i, (train_ids, test_ids) in enumerate(kf.split(ids)):
train_ids, dev_ids = train_test_split(
train_ids, test_size=0.1, random_state=self.random_state)
split_info[kf_i] = {"train_ids": list(train_ids), "dev_ids": list(
dev_ids), "test_ids": list(test_ids)}
return split_info
def get_data_index(self, kf_i):
split_info = self.all_split_info[kf_i]
train_ids, dev_ids, test_ids = split_info['train_ids'], split_info['dev_ids'], split_info['test_ids']
return train_ids, dev_ids, test_ids
def get_index_data(self, ids, sep_token="[SEP]"):
df_seg = self.df[self.df[self.index_col].isin(ids)].copy()
return df_seg
def get_data(self, kf_i, sep_token="[SEP]"):
train_ids, dev_ids, test_ids = self.get_data_index(
kf_i=kf_i)
df_train = self.get_index_data(train_ids, sep_token=sep_token)
df_dev = self.get_index_data(dev_ids, sep_token=sep_token)
df_test = self.get_index_data(test_ids, sep_token=sep_token)
return df_train, df_dev, df_test
class DFDataset(Dataset):
def __init__(self, dataframe, tokenizer, max_len, multi_label=False, num_labels=1):
dataframe.index = list(range(len(dataframe)))
if 'label' not in dataframe.columns:
if multi_label:
dataframe['label'] = [[0]*num_labels]*dataframe.shape[0]
else:
dataframe['label'] = 0
#
self.len = len(dataframe)
self.data = dataframe
self.tokenizer = tokenizer
self.max_len = max_len
self.multi_label = multi_label
def __getitem__(self, index):
title = str(self.data.text[index])
if title.count("[SEP]") == 1:
text1, text2 = title.split("[SEP]")
else:
text1 = title
text2 = None
inputs = self.tokenizer.encode_plus(
text1,
text2,
add_special_tokens=True,
max_length=self.max_len,
padding='max_length',
return_token_type_ids=True,
truncation=True
)
#
label = self.data.label[index]
if self.multi_label:
# 多标签分类label
if type(label) == str:
label = eval(label)
label = [float(x) for x in label]
else:
# 单标签分类label
label = int(label)
#
feature = InputFeatures(input_ids=inputs['input_ids'],
attention_mask=inputs['attention_mask'],
token_type_ids=inputs['token_type_ids'],
label=label)
return feature
def __len__(self):
return len(self.data)
| 7,870
| 28.927757
| 109
|
py
|
DialogID
|
DialogID-main/src/auto_text_classifier/atc/utils/metrics_utils.py
|
from sklearn.metrics import *
import pandas as pd
def get_model_metrics(y_true, y_pred, show=False, tradeoff = 0.5):
"""
Compute metrics to evaluate the model of a classification.
Parameters:
y_true: 1d array-like Ground truth (correct) labels.
y_pred: Predicted labels, as returned by a classifier.
show: Print result. Default value is False.
Returns:
report:Value of the metrics.
Examples:
::
{
'Accuracy': 1,
'Precision': 1,
'Recall': 1,
'F_measure': 1,
'AUC_Value': 1
}
"""
try:
auc = roc_auc_score(y_true, y_pred)
except:
auc = -1
y_pred = y_pred > tradeoff
recall = recall_score(y_true, y_pred)
precision = precision_score(y_true, y_pred)
f1 = f1_score(y_true, y_pred)
accuracy = accuracy_score(y_true, y_pred)
if show:
for name, value in zip(('Accuracy', 'Precision', 'Recall', 'F_meansure', 'AUC_Value'),
(accuracy, precision, recall, f1, auc)):
print('{} : {:.4f}'.format(name, value))
report = {'Accuracy': round(accuracy, 4),
'Precision': round(precision, 4),
'Recall': round(recall, 4),
'F_meansure': round(f1, 4),
'AUC_Value': round(auc, 4),
}
return report
def print_metris(report):
columns = ['Accuracy','Precision','Recall','F_measure','AUC_Value']
print('\t'.join([str(report[x]) for x in columns]))
def get_multi_class_report(y_true, y_pred):
report = classification_report(y_true, y_pred, output_dict=True)
df_report = pd.DataFrame(report).transpose()
return df_report
refit_map = {"acc": "Accuracy",
"p": "Precision",
"r": "Recall",
"f1": "F_measure",
"auc": "AUC_Value"}
| 1,926
| 28.19697
| 94
|
py
|
DialogID
|
DialogID-main/src/auto_text_classifier/atc/utils/hf_train.py
|
import logging
import math
import os
import re
import shutil
import warnings
from contextlib import contextmanager
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import random
import numpy as np
import torch
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, Sampler, SequentialSampler
from tqdm.auto import tqdm, trange
from transformers.data.data_collator import DataCollator,DefaultDataCollator
from transformers.data.processors.utils import InputFeatures
from transformers.modeling_utils import PreTrainedModel
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from transformers.trainer_utils import (
PREFIX_CHECKPOINT_DIR,
EvalPrediction,
PredictionOutput,
TrainOutput,
)
from atc.utils.hf_training_args import TrainingArguments
try:
from apex import amp
_has_apex = True
except ImportError:
_has_apex = False
def is_apex_available():
return _has_apex
try:
import torch_xla.core.xla_model as xm
_has_tpu = True
except ImportError:
_has_tpu = False
def is_tpu_available():
return _has_tpu
try:
import wandb
wandb.ensure_configured()
if wandb.api.api_key is None:
_has_wandb = False
wandb.termwarn("W&B installed but not logged in. Run `wandb login` or set the WANDB_API_KEY env variable.")
else:
_has_wandb = False if os.getenv("WANDB_DISABLED") else True
except ImportError:
_has_wandb = False
def is_wandb_available():
return _has_wandb
def set_seed(seed: int):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def is_torch_tpu_available():
return False
if is_apex_available():
from apex import amp
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
try:
from torch.utils.tensorboard import SummaryWriter
_has_tensorboard = True
except ImportError:
try:
from tensorboardX import SummaryWriter
_has_tensorboard = True
except ImportError:
_has_tensorboard = False
def is_tensorboard_available():
return _has_tensorboard
if is_wandb_available():
import wandb
logger = logging.getLogger(__name__)
@contextmanager
def torch_distributed_zero_first(local_rank: int):
"""
Decorator to make all processes in distributed training wait for each local_master to do something.
Parameters:
local_rank (:obj:`int`): The rank of the local process.
"""
if local_rank not in [-1, 0]:
torch.distributed.barrier()
yield
if local_rank == 0:
torch.distributed.barrier()
class SequentialDistributedSampler(Sampler):
"""
Distributed Sampler that subsamples indicies sequentially,
making it easier to collate all results at the end.
Even though we only use this sampler for eval and predict (no training),
which means that the model params won't have to be synced (i.e. will not hang
for synchronization even if varied number of forward passes), we still add extra
samples to the sampler to make it evenly divisible (like in `DistributedSampler`)
to make it easy to `gather` or `reduce` resulting tensors at the end of the loop.
"""
def __init__(self, dataset, num_replicas=None, rank=None):
if num_replicas is None:
if not torch.distributed.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = torch.distributed.get_world_size()
if rank is None:
if not torch.distributed.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = torch.distributed.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices += indices[: (self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank * self.num_samples : (self.rank + 1) * self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def make_weights_for_balanced_classes(datapoints, nclasses):
count = [0] * nclasses
# Get the class counts
for i in range(len(datapoints)):
item = datapoints.__getitem__(i)
if isinstance(item, InputFeatures):
count[item.label] += 1
else:
count[item[1]] += 1
weight_per_class = [0.0] * nclasses
N = float(sum(count))
for i in range(nclasses):
if count[i] == 0:
weight_per_class[i] = 0.0
else:
weight_per_class[i] = N / float(count[i])
weight = [0] * len(datapoints)
for idx in range(len(datapoints)):
val = datapoints.__getitem__(idx)
# for idx, val in enumerate(datapoints):
if isinstance(item, InputFeatures):
weight[idx] = weight_per_class[val.label]
else:
weight[idx] = weight_per_class[val[1]]
return weight
def get_weighted_random_sampler(dataset):
'''
to use this method assumes that dataset has a get_labels method, will raise an exception if it does not
which means this needs to be modified to support that type of dataset
'''
# to use this method assumes that dataset has a get_labels method, will raise an exception if it does not
# which means this needs to be modified to support that type of dataset
labels = dataset.get_labels()
weights = make_weights_for_balanced_classes(dataset, len(labels))
weights = torch.DoubleTensor(weights)
sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights))
return sampler
def get_tpu_sampler(dataset: Dataset):
if xm.xrt_world_size() <= 1:
return RandomSampler(dataset)
return DistributedSampler(dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
class Trainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch,
optimized for 🤗 Transformers.
Parameters:
model (:class:`~transformers.PreTrainedModel`):
The model to train, evaluate or use for predictions.
args (:class:`~transformers.TrainingArguments`):
The arguments to tweak training.
data_collator (:obj:`DataCollator`, `optional`, defaults to :func:`~transformers.default_data_collator`):
The function to use to from a batch from a list of elements of :obj:`train_dataset` or
:obj:`eval_dataset`.
train_dataset (:obj:`Dataset`, `optional`):
The dataset to use for training.
eval_dataset (:obj:`Dataset`, `optional`):
The dataset to use for evaluation.
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
prediction_loss_only (:obj:`bool`, `optional`, defaults to `False`):
When performing evaluation and predictions, only returns the loss.
tb_writer (:obj:`SummaryWriter`, `optional`):
Object to write to TensorBoard.
optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`):
A tuple containing the optimizer and the scheduler to use. Will default to an instance of
:class:`~transformers.AdamW` on your model and a scheduler given by
:func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.
"""
model: PreTrainedModel
args: TrainingArguments
data_collator: DataCollator
train_dataset: Optional[Dataset]
eval_dataset: Optional[Dataset]
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None
prediction_loss_only: bool
tb_writer: Optional["SummaryWriter"] = None
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = None
global_step: Optional[int] = None
epoch: Optional[float] = None
def __init__(
self,
model: PreTrainedModel,
args: TrainingArguments,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
prediction_loss_only=False,
tb_writer: Optional["SummaryWriter"] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = None,
):
self.model = model.to(args.device)
self.args = args
if self.args.patience > 0 and not self.args.evaluate_during_training:
raise ValueError("Patience requires evaluate_during_training.")
if data_collator is not None:
self.data_collator = data_collator
else:
self.data_collator = DefaultDataCollator()
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.prediction_loss_only = prediction_loss_only
self.optimizers = optimizers
if tb_writer is not None:
self.tb_writer = tb_writer
elif is_tensorboard_available() and self.is_world_master():
self.tb_writer = SummaryWriter(log_dir=self.args.logging_dir)
if not is_tensorboard_available():
logger.warning(
"You are instantiating a Trainer but Tensorboard is not installed. You should consider installing it."
)
if is_wandb_available():
self._setup_wandb()
else:
logger.info(
"You are instantiating a Trainer but W&B is not installed. To use wandb logging, "
"run `pip install wandb; wandb login` see https://docs.wandb.com/huggingface."
)
set_seed(self.args.seed)
# Create output directory if needed
if self.is_world_master():
os.makedirs(self.args.output_dir, exist_ok=True)
if is_torch_tpu_available():
# Set an xla_device flag on the model's config.
# We'll find a more elegant and not need to do this in the future.
self.model.config.xla_device = True
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
self.data_collator = self.data_collator.collate_batch
warnings.warn(
(
"The `data_collator` should now be a simple callable (function, class with `__call__`), classes "
+ "with a `collate_batch` are deprecated and won't be supported in a future version."
),
FutureWarning,
)
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training :class:`~torch.utils.data.DataLoader`.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
if is_torch_tpu_available():
train_sampler = get_tpu_sampler(self.train_dataset)
else:
if self.args.use_weighted_random_sampling:
train_sampler = get_weighted_random_sampler(self.train_dataset)
else:
train_sampler = (
RandomSampler(self.train_dataset)
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset)
)
data_loader = DataLoader(
self.train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator
)
return data_loader
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Parameters:
eval_dataset (:obj:`Dataset`, `optional`): If provided, will override `self.eval_dataset`.
Returns:
the evaluation :class:`~torch.utils.data.DataLoader`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
if is_torch_tpu_available():
sampler = SequentialDistributedSampler(
eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()
)
elif self.args.local_rank != -1:
sampler = SequentialDistributedSampler(eval_dataset)
else:
sampler = SequentialSampler(eval_dataset)
data_loader = DataLoader(
eval_dataset,
sampler=sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator
)
return data_loader
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
"""
Returns the test :class:`~torch.utils.data.DataLoader`.
Parameters:
test_dataset (obj:`Dataset`): The test dataset to use.
"""
# We use the same batch_size as for eval.
if is_torch_tpu_available():
sampler = SequentialDistributedSampler(
test_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()
)
elif self.args.local_rank != -1:
sampler = SequentialDistributedSampler(test_dataset)
else:
sampler = SequentialSampler(test_dataset)
data_loader = DataLoader(
test_dataset,
sampler=sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator
)
return data_loader
def get_optimizers(
self, num_training_steps: int
) -> Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]:
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or override this method in a subclass.
"""
if self.optimizers is not None:
return self.optimizers
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=self.args.learning_rate, eps=self.args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=num_training_steps
)
return optimizer, scheduler
def _setup_wandb(self):
"""
Setup the optional Weights & Biases (`wandb`) integration.
One can override this method to customize the setup if needed. Find more information at https://docs.wandb.com/huggingface
You can also override the following environment variables:
Environment:
WANDB_WATCH:
(Optional, ["gradients", "all", "false"]) "gradients" by default, set to "false" to disable gradient logging
or "all" to log gradients and parameters
WANDB_PROJECT:
(Optional): str - "huggingface" by default, set this to a custom string to store results in a different project
WANDB_DISABLED:
(Optional): boolean - defaults to false, set to "true" to disable wandb entirely
"""
if self.is_world_master():
logger.info(
'Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"'
)
wandb.init(project=os.getenv("WANDB_PROJECT", "huggingface"), config=vars(self.args))
# keep track of model topology and gradients, unsupported on TPU
if not is_torch_tpu_available() and os.getenv("WANDB_WATCH") != "false":
wandb.watch(
self.model, log=os.getenv("WANDB_WATCH", "gradients"), log_freq=max(100, self.args.logging_steps)
)
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its Dataset.
"""
return len(dataloader.dataset)
def train(self, model_path: Optional[str] = None):
"""
Main training entry point.
Parameters:
model_path (:obj:`str`, `optional`):
Local path to the model if the model to train has been instantiated from a local path. If present,
training will resume from the optimizer/scheduler states loaded here.
"""
train_dataloader = self.get_train_dataloader()
if self.args.max_steps > 0:
t_total = self.args.max_steps
num_train_epochs = (
self.args.max_steps // (len(train_dataloader) // self.args.gradient_accumulation_steps) + 1
)
else:
t_total = int(len(train_dataloader) // self.args.gradient_accumulation_steps * self.args.num_train_epochs)
num_train_epochs = self.args.num_train_epochs
optimizer, scheduler = self.get_optimizers(num_training_steps=t_total)
# Check if saved optimizer or scheduler states exist
if (
model_path is not None
and os.path.isfile(os.path.join(model_path, "optimizer.pt"))
and os.path.isfile(os.path.join(model_path, "scheduler.pt"))
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(
torch.load(os.path.join(model_path, "optimizer.pt"), map_location=self.args.device)
)
scheduler.load_state_dict(torch.load(os.path.join(model_path, "scheduler.pt")))
model = self.model
if self.args.fp16:
if not is_apex_available():
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=self.args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if self.args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=True,
)
if self.tb_writer is not None:
self.tb_writer.add_text("args", self.args.to_json_string())
self.tb_writer.add_hparams(self.args.to_sanitized_dict(), metric_dict={})
# Train!
if is_torch_tpu_available():
total_train_batch_size = self.args.train_batch_size * xm.xrt_world_size()
else:
total_train_batch_size = (
self.args.train_batch_size
* self.args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if self.args.local_rank != -1 else 1)
)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", self.num_examples(train_dataloader))
logger.info(" Num Epochs = %d", num_train_epochs)
logger.info(" Instantaneous batch size per device = %d", self.args.per_device_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", total_train_batch_size)
logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
self.global_step = 0
self.epoch = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if model_path is not None:
# set global_step to global_step of last saved checkpoint from model path
try:
self.global_step = int(model_path.split("-")[-1].split("/")[0])
epochs_trained = self.global_step // (len(train_dataloader) // self.args.gradient_accumulation_steps)
steps_trained_in_current_epoch = self.global_step % (
len(train_dataloader) // self.args.gradient_accumulation_steps
)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", self.global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
except ValueError:
self.global_step = 0
logger.info(" Starting fine-tuning.")
tr_loss = 0.0
logging_loss = 0.0
patience_best_eval_loss = None
patience_evals_without_improvement = 0
patience_should_stop = False
model.zero_grad()
train_iterator = trange(
epochs_trained, int(num_train_epochs), desc="Epoch", disable=not self.is_local_master()
)
for epoch in train_iterator:
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader(
self.args.device
)
epoch_iterator = tqdm(parallel_loader, desc="Iteration", disable=not self.is_local_master())
else:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=not self.is_local_master())
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
tr_loss += self._training_step(model, inputs, optimizer)
if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
len(epoch_iterator) <= self.args.gradient_accumulation_steps
and (step + 1) == len(epoch_iterator)
):
if self.args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), self.args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), self.args.max_grad_norm)
if is_torch_tpu_available():
xm.optimizer_step(optimizer)
else:
optimizer.step()
scheduler.step()
model.zero_grad()
self.global_step += 1
self.epoch = epoch + (step + 1) / len(epoch_iterator)
if (self.args.logging_steps > 0 and self.global_step % self.args.logging_steps == 0) or (
self.global_step == 1 and self.args.logging_first_step
):
logs: Dict[str, float] = {}
logs["loss"] = (tr_loss - logging_loss) / self.args.logging_steps
# backward compatibility for pytorch schedulers
logs["learning_rate"] = (
scheduler.get_last_lr()[0]
if version.parse(torch.__version__) >= version.parse("1.4")
else scheduler.get_lr()[0]
)
logging_loss = tr_loss
self._log(logs)
if self.args.evaluate_during_training and self.global_step % self.args.eval_steps == 0:
results = self.evaluate()
if self.args.patience > 0:
# Keep track of best loss to determine if we should stop early
eval_loss = results["eval_loss"]
if not patience_best_eval_loss or eval_loss < patience_best_eval_loss:
patience_evals_without_improvement = 0
patience_best_eval_loss = eval_loss
self.save_model(os.path.join(self.args.output_dir,"best_model"))
logger.info(
f"Save the best model eval loss is {patience_best_eval_loss}"
)
else:
patience_evals_without_improvement += 1
if patience_evals_without_improvement >= self.args.patience:
patience_should_stop = True
logger.info(
f"Patience threshold ({self.args.patience}) exceeded, stopping training"
)
if self.args.save_steps > 0 and self.global_step % self.args.save_steps == 0:
# In all cases (even distributed/parallel), self.model is always a reference
# to the model we want to save.
if hasattr(model, "module"):
assert model.module is self.model
else:
assert model is self.model
# Save model checkpoint
output_dir = os.path.join(self.args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.global_step}")
self.save_model(output_dir)
if self.is_world_master():
self._rotate_checkpoints()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
xm.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
elif self.is_world_master():
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
if (self.args.max_steps > 0 and self.global_step > self.args.max_steps) or patience_should_stop:
epoch_iterator.close()
break
if (self.args.max_steps > 0 and self.global_step > self.args.max_steps) or patience_should_stop:
train_iterator.close()
break
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
if self.tb_writer:
self.tb_writer.close()
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
return TrainOutput(self.global_step, tr_loss / self.global_step)
def _log(self, logs: Dict[str, float], iterator: Optional[tqdm] = None) -> None:
if self.epoch is not None:
logs["epoch"] = self.epoch
if self.global_step is None:
# when logging evaluation metrics without training
self.global_step = 0
if self.tb_writer:
for k, v in logs.items():
if isinstance(v, (int, float)):
self.tb_writer.add_scalar(k, v, self.global_step)
else:
logger.warning(
"Trainer is attempting to log a value of "
'"%s" of type %s for key "%s" as a scalar. '
"This invocation of Tensorboard's writer.add_scalar() "
"is incorrect so we dropped this attribute.",
v,
type(v),
k,
)
self.tb_writer.flush()
if is_wandb_available():
if self.is_world_master():
wandb.log(logs, step=self.global_step)
output = {**logs, **{"step": self.global_step}}
if iterator is not None:
iterator.write(output)
else:
print(output)
# logger.info(output)
def _training_step(
self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], optimizer: torch.optim.Optimizer
) -> float:
model.train()
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
# Our model outputs do not work with DataParallel, so forcing return tuple.
# if self.args.n_gpu > 1:
# inputs["return_tuple"] = True
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1:
loss = loss / self.args.gradient_accumulation_steps
if self.args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
return loss.item()
def is_local_master(self) -> bool:
if is_torch_tpu_available():
return xm.is_master_ordinal(local=True)
else:
return self.args.local_rank in [-1, 0]
def is_world_master(self) -> bool:
"""
This will be True only in one process, even in distributed mode,
even when training on multiple machines.
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=False)
else:
return self.args.local_rank == -1 or torch.distributed.get_rank() == 0
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
Will only save from the world_master process (unless in TPUs).
"""
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif self.is_world_master():
self._save(output_dir)
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model checkpoint to %s", output_dir)
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
raise ValueError("Trainer.model appears to not be a PreTrainedModel")
xm.rendezvous("saving_checkpoint")
self.model.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", output_dir)
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
raise ValueError("Trainer.model appears to not be a PreTrainedModel")
self.model.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# save entire model
# https://pytorch.org/tutorials/beginner/saving_loading_models.html#save-load-entire-model
torch.save(self.model, os.path.join(output_dir, "raw_model.bin"))
def _sorted_checkpoints(self, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(self.args.output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def evaluate(
self, eval_dataset: Optional[Dataset] = None, prediction_loss_only: Optional[bool] = None,
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are
task-dependent (pass it to the init :obj:`compute_metrics` argument).
Parameters:
eval_dataset (:obj:`Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`.
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions.
"""
eval_dataloader = self.get_eval_dataloader(eval_dataset)
output = self._prediction_loop(eval_dataloader, description="Evaluation")
self._log(output.metrics)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
return output.metrics
def predict(self, test_dataset: Dataset) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in :obj:`evaluate()`.
Parameters:
test_dataset (:obj:`Dataset`):
Dataset to run the predictions on.
Returns:
`NamedTuple`:
predictions (:obj:`np.ndarray`):
The predictions on :obj:`test_dataset`.
label_ids (:obj:`np.ndarray`, `optional`):
The labels (if the dataset contained some).
metrics (:obj:`Dict[str, float]`, `optional`):
The potential dictionary of metrics (if the dataset contained labels).
"""
test_dataloader = self.get_test_dataloader(test_dataset)
return self._prediction_loop(test_dataloader, description="Prediction")
def _prediction_loop(
self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by `evaluate()` and `predict()`.
Works both with or without labels.
"""
prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else self.prediction_loss_only
model = self.model
# multi-gpu eval
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
else:
model = self.model
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
batch_size = dataloader.batch_size
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", self.num_examples(dataloader))
logger.info(" Batch size = %d", batch_size)
eval_losses: List[float] = []
preds: torch.Tensor = None
label_ids: torch.Tensor = None
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
past = None
for inputs in tqdm(dataloader, desc=description):
has_labels = any(inputs.get(k) is not None for k in ["labels", "lm_labels", "masked_lm_labels"])
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
if self.args.past_index >= 0:
inputs["mems"] = past
# Our model outputs do not work with DataParallel, so forcing return tuple.
# if self.args.n_gpu > 1:
# inputs["return_tuple"] = True
with torch.no_grad():
outputs = model(**inputs)
if has_labels:
step_eval_loss, logits = outputs[:2]
eval_losses += [step_eval_loss.mean().item()]
else:
logits = outputs[0]
if self.args.past_index >= 0:
past = outputs[self.args.past_index if has_labels else self.args.past_index - 1]
if not prediction_loss_only:
if preds is None:
preds = logits.detach()
else:
preds = torch.cat((preds, logits.detach()), dim=0)
if inputs.get("labels") is not None:
if label_ids is None:
label_ids = inputs["labels"].detach()
else:
label_ids = torch.cat((label_ids, inputs["labels"].detach()), dim=0)
if self.args.local_rank != -1:
# In distributed mode, concatenate all results from all nodes:
if preds is not None:
preds = self.distributed_concat(preds, num_total_examples=self.num_examples(dataloader))
if label_ids is not None:
label_ids = self.distributed_concat(label_ids, num_total_examples=self.num_examples(dataloader))
elif is_torch_tpu_available():
# tpu-comment: Get all predictions and labels from all worker shards of eval dataset
if preds is not None:
preds = xm.mesh_reduce("eval_preds", preds, torch.cat)
if label_ids is not None:
label_ids = xm.mesh_reduce("eval_label_ids", label_ids, torch.cat)
# Finally, turn the aggregated tensors into numpy arrays.
if preds is not None:
preds = preds.cpu().numpy()
if label_ids is not None:
label_ids = label_ids.cpu().numpy()
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
if len(eval_losses) > 0:
metrics["eval_loss"] = np.mean(eval_losses)
# Prefix all keys with eval_
for key in list(metrics.keys()):
if not key.startswith("eval_"):
metrics[f"eval_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def distributed_concat(self, tensor: torch.Tensor, num_total_examples: int) -> torch.Tensor:
assert self.args.local_rank != -1
output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(output_tensors, tensor)
concat = torch.cat(output_tensors, dim=0)
# truncate the dummy elements added by SequentialDistributedSampler
output = concat[:num_total_examples]
return output
| 43,013
| 41.170588
| 131
|
py
|
DialogID
|
DialogID-main/src/auto_text_classifier/atc/utils/__init__.py
| 0
| 0
| 0
|
py
|
|
DialogID
|
DialogID-main/src/auto_text_classifier/atc/utils/hf_training_args.py
|
import dataclasses
import json
import logging
import os
from dataclasses import dataclass, field
from typing import Any, Dict, Optional, Tuple
from transformers.file_utils import cached_property, is_torch_available, torch_required
def is_torch_tpu_available():
return False
if is_torch_available():
import torch
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
logger = logging.getLogger(__name__)
def default_logdir() -> str:
"""
Same default as PyTorch
"""
import socket
from datetime import datetime
current_time = datetime.now().strftime("%b%d_%H-%M-%S")
return os.path.join("runs", current_time + "_" + socket.gethostname())
@dataclass
class TrainingArguments:
"""
TrainingArguments is the subset of the arguments we use in our example scripts
**which relate to the training loop itself**.
Using :class:`~transformers.HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on the command line.
Parameters:
output_dir (:obj:`str`):
The output directory where the model predictions and checkpoints will be written.
overwrite_output_dir (:obj:`bool`, `optional`, defaults to :obj:`False`):
If :obj:`True`, overwrite the content of the output directory. Use this to continue training if
:obj:`output_dir` points to a checkpoint directory.
do_train (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to run training or not.
do_eval (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to run evaluation on the dev set or not.
do_predict (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to run predictions on the test set or not.
evaluate_during_training (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to run evaluation during training at each logging step or not.
per_device_train_batch_size (:obj:`int`, `optional`, defaults to 8):
The batch size per GPU/TPU core/CPU for training.
per_device_eval_batch_size (:obj:`int`, `optional`, defaults to 8):
The batch size per GPU/TPU core/CPU for evaluation.
gradient_accumulation_steps: (:obj:`int`, `optional`, defaults to 1):
Number of updates steps to accumulate the gradients for, before performing a backward/update pass.
learning_rate (:obj:`float`, `optional`, defaults to 5e-5):
The initial learning rate for Adam.
weight_decay (:obj:`float`, `optional`, defaults to 0):
The weight decay to apply (if not zero).
adam_epsilon (:obj:`float`, `optional`, defaults to 1e-8):
Epsilon for the Adam optimizer.
max_grad_norm (:obj:`float`, `optional`, defaults to 1.0):
Maximum gradient norm (for gradient clipping).
num_train_epochs(:obj:`float`, `optional`, defaults to 3.0):
Total number of training epochs to perform.
max_steps (:obj:`int`, `optional`, defaults to -1):
If set to a positive number, the total number of training steps to perform. Overrides
:obj:`num_train_epochs`.
warmup_steps (:obj:`int`, `optional`, defaults to 0):
Number of steps used for a linear warmup from 0 to :obj:`learning_rate`.
logging_dir (:obj:`str`, `optional`):
Tensorboard log directory. Will default to `runs/**CURRENT_DATETIME_HOSTNAME**`.
logging_first_step (:obj:`bool`, `optional`, defaults to :obj:`False`):
Wheter to log and evalulate the first :obj:`global_step` or not.
logging_steps (:obj:`int`, `optional`, defaults to 500):
Number of update steps between two logs.
save_steps (:obj:`int`, `optional`, defaults to 500):
Number of updates steps before two checkpoint saves.
save_total_limit (:obj:`int`, `optional`):
If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in
:obj:`output_dir`.
no_cuda (:obj:`bool`, `optional`, defaults to :obj:`False`):
Wherher to not use CUDA even when it is available or not.
seed (:obj:`int`, `optional`, defaults to 42):
Random seed for initialization.
fp16 (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to use 16-bit (mixed) precision training (through NVIDIA apex) instead of 32-bit training.
fp16_opt_level (:obj:`str`, `optional`, defaults to 'O1'):
For :obj:`fp16` training, apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. See details
on the `apex documentation <https://nvidia.github.io/apex/amp.html>`__.
local_rank (:obj:`int`, `optional`, defaults to -1):
During distributed training, the rank of the process.
tpu_num_cores (:obj:`int`, `optional`):
When training on TPU, the mumber of TPU cores (automatically passed by launcher script).
debug (:obj:`bool`, `optional`, defaults to :obj:`False`):
When training on TPU, whether to print debug metrics or not.
dataloader_drop_last (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size)
or not.
eval_steps (:obj:`int`, `optional`, defaults to 1000):
Number of update steps between two evaluations.
past_index (:obj:`int`, `optional`, defaults to -1):
Some models like :doc:`TransformerXL <../model_doc/transformerxl>` or :doc`XLNet <../model_doc/xlnet>` can
make use of the past hidden states for their predictions. If this argument is set to a positive int, the
``Trainer`` will use the corresponding output (usually index 2) as the past state and feed it to the model
at the next training step under the keyword argument ``mems``.
"""
output_dir: str = field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."}
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory."
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(default=False, metadata={"help": "Whether to run eval on the dev set."})
do_predict: bool = field(default=False, metadata={"help": "Whether to run predictions on the test set."})
evaluate_during_training: bool = field(
default=False, metadata={"help": "Run evaluation during training at each logging step."},
)
per_device_train_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."}
)
per_device_eval_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."}
)
per_gpu_train_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Deprecated, the use of `--per_device_train_batch_size` is preferred. "
"Batch size per GPU/TPU core/CPU for training."
},
)
per_gpu_eval_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Deprecated, the use of `--per_device_eval_batch_size` is preferred."
"Batch size per GPU/TPU core/CPU for evaluation."
},
)
gradient_accumulation_steps: int = field(
default=1,
metadata={"help": "Number of updates steps to accumulate before performing a backward/update pass."},
)
learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for Adam."})
weight_decay: float = field(default=0.0, metadata={"help": "Weight decay if we apply some."})
adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for Adam optimizer."})
max_grad_norm: float = field(default=1.0, metadata={"help": "Max gradient norm."})
num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."})
max_steps: int = field(
default=-1,
metadata={"help": "If > 0: set total number of training steps to perform. Override num_train_epochs."},
)
warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."})
logging_dir: Optional[str] = field(default_factory=default_logdir, metadata={"help": "Tensorboard log dir."})
logging_first_step: bool = field(default=False, metadata={"help": "Log and eval the first global_step"})
logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."})
save_steps: int = field(default=500, metadata={"help": "Save checkpoint every X updates steps."})
save_total_limit: Optional[int] = field(
default=None,
metadata={
"help": (
"Limit the total amount of checkpoints."
"Deletes the older checkpoints in the output_dir. Default is unlimited checkpoints"
)
},
)
no_cuda: bool = field(default=False, metadata={"help": "Do not use CUDA even when it is available"})
seed: int = field(default=42, metadata={"help": "random seed for initialization"})
fp16: bool = field(
default=False,
metadata={"help": "Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit"},
)
fp16_opt_level: str = field(
default="O1",
metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
)
},
)
local_rank: int = field(default=-1, metadata={"help": "For distributed training: local_rank"})
tpu_num_cores: Optional[int] = field(
default=None, metadata={"help": "TPU: Number of TPU cores (automatically passed by launcher script)"}
)
tpu_metrics_debug: bool = field(
default=False,
metadata={"help": "Deprecated, the use of `--debug` is preferred. TPU: Whether to print debug metrics"},
)
debug: bool = field(default=False, metadata={"help": "Whether to print debug metrics on TPU"})
dataloader_drop_last: bool = field(
default=False, metadata={"help": "Drop the last incomplete batch if it is not divisible by the batch size."}
)
eval_steps: int = field(default=1000, metadata={"help": "Run an evaluation every X steps."})
past_index: int = field(
default=-1,
metadata={"help": "If >=0, uses the corresponding part of the output as the past state for next step."},
)
patience: int = field(
default=-1,
metadata={
"help": (
"If > 0: stops training after evaluating this many times consecutively with non-decreasing loss."
"Requires evaluate_during_training."
)
},
)
use_weighted_random_sampling: bool = field(
default=False,
metadata={
"help": (
"For classification task, reweight sampling mechanism so classes are evenly sampled.",
"Not compatible with distributed sampling or TPU for now.",
)
},
)
@property
def train_batch_size(self) -> int:
"""
The actual batch size for training (may differ from :obj:`per_gpu_train_batch_size` in distributed training).
"""
if self.per_gpu_train_batch_size:
logger.warning(
"Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future "
"version. Using `--per_device_train_batch_size` is preferred."
)
per_device_batch_size = self.per_gpu_train_batch_size or self.per_device_train_batch_size
return per_device_batch_size * max(1, self.n_gpu)
@property
def eval_batch_size(self) -> int:
"""
The actual batch size for evaluation (may differ from :obj:`per_gpu_eval_batch_size` in distributed training).
"""
if self.per_gpu_eval_batch_size:
logger.warning(
"Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future "
"version. Using `--per_device_eval_batch_size` is preferred."
)
per_device_batch_size = self.per_gpu_eval_batch_size or self.per_device_eval_batch_size
return per_device_batch_size * max(1, self.n_gpu)
@cached_property
@torch_required
def _setup_devices(self) -> Tuple["torch.device", int]:
logger.info("PyTorch: setting up devices")
if self.no_cuda:
device = torch.device("cpu")
n_gpu = 0
elif is_torch_tpu_available():
device = xm.xla_device()
n_gpu = 0
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend="nccl")
device = torch.device("cuda", self.local_rank)
n_gpu = 1
if device.type == "cuda":
torch.cuda.set_device(device)
return device, n_gpu
@property
@torch_required
def device(self) -> "torch.device":
"""
The device used by this process.
"""
return self._setup_devices[0]
@property
@torch_required
def n_gpu(self):
"""
The number of GPUs used by this process.
Note:
This will only be greater than one when you have multiple GPUs available but are not using distributed
training. For distributed training, it will always be 1.
"""
return self._setup_devices[1]
def to_json_string(self):
"""
Serializes this instance to a JSON string.
"""
return json.dumps(dataclasses.asdict(self), indent=2)
def to_sanitized_dict(self) -> Dict[str, Any]:
"""
Sanitized serialization to use with TensorBoard’s hparams
"""
d = dataclasses.asdict(self)
valid_types = [bool, int, float, str]
if is_torch_available():
valid_types.append(torch.Tensor)
return {k: v if type(v) in valid_types else str(v) for k, v in d.items()}
| 15,395
| 44.821429
| 119
|
py
|
rosbag2
|
rosbag2-master/rosbag2_storage_sqlite3/ros2bag_sqlite3_cli/__init__.py
|
# Copyright 2023 Foxglove Technologies Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def get_preset_profiles():
return [
('none', 'Default profile, optimized for performance.'),
('resilient', 'Avoid data corruption in case of crashes at the cost of performance.'),
]
| 819
| 38.047619
| 94
|
py
|
rosbag2
|
rosbag2-master/rosbag2_storage_mcap/ros2bag_mcap_cli/__init__.py
|
# Copyright 2023 Foxglove Technologies Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def get_preset_profiles():
return [
('none', 'Default profile, no special settings.'),
('fastwrite', 'Disables CRC and chunking for faster writing.'),
('zstd_fast', 'Use Zstd chunk compression on Fastest level.'),
('zstd_small', 'Use Zstd chunk compression on Slowest level, for smallest file size.'),
]
| 957
| 40.652174
| 95
|
py
|
rosbag2
|
rosbag2-master/rosbag2_performance/rosbag2_performance_benchmarking/launch/benchmark_launch.py
|
# Copyright 2021, Robotec.ai sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Launchfile for benchmarking rosbag2.
This launchfile can only be launched with 'ros2 launch' command.
Two launch arguments are required:
* benchmark - path to benchmark description in yaml format ('benchmark:=<PATH>'),
* producers - path to producers description in yaml format ('producers:=<PATH>').
Goal of this launchfile is to launch in sequence all processes and/or nodes with right parameters
required for selected benchmark. Cross section of parameters is generated based on parameters from
'benchmark' yaml description file.
Based on 'no_transport' parameter in benchmark description, a single run in launch sequence
looks as follow:
NO TRANSPORT:
Only 'writer_benchmark' node is used as 'producer node' (PN). It directly writes messages to
a storage and then fill up a result file. No additional processes are required.
PN starts -> PN exits
TRANSPORT:
For end-to-end benchmark, `ros2 bag record` (ROSBAG) process and 'result writer' (RW) are also
included in a single launch sequence run. In this case 'benchmark_publishers' node act as
producer node. Result writer node writes final result file.
ROSBAG starts -> PN starts -> PN exits -> ROSBAG exits -> RW starts
After the whole sequence is finished, both producers and benchmark description files are copied
to benchmark folder.
"""
import datetime
import os
import pathlib
import shutil
import signal
import sys
import time
from ament_index_python import get_package_share_directory
import launch
import launch_ros
import yaml
_bench_cfg_path = None
_producers_cfg_path = None
_producer_idx = 0
_producer_nodes = []
_rosbag_processes = []
_rosbag_pid = None
_result_writers = []
def _parse_arguments(args=sys.argv[4:]):
"""Parse benchmark and producers config file paths."""
bench_cfg_path = None
producers_cfg_path = None
err_str = 'Missing or invalid arguments detected. ' \
'Launchfile requires "benchmark:=" and "producers:=" arguments ' \
'with coresponding config files.'
if len(args) != 2:
raise RuntimeError(err_str)
else:
for arg in args:
if 'benchmark:=' in arg:
bench_cfg_path = pathlib.Path(arg.replace('benchmark:=', ''))
if not bench_cfg_path.is_file():
raise RuntimeError(
'Batch config file {} does not exist.'.format(bench_cfg_path)
)
elif 'producers:=' in arg:
producers_cfg_path = pathlib.Path(arg.replace('producers:=', ''))
if not producers_cfg_path.is_file():
raise RuntimeError(
'Producers config file {} does not exist.'.format(producers_cfg_path)
)
else:
raise RuntimeError(err_str)
return bench_cfg_path, producers_cfg_path
def _copy_config_files():
"""Copy benchmark and producers config files to benchmark folder."""
global _bench_cfg_path, _producers_cfg_path
# Copy yaml configs for current benchmark after benchmark is finished
benchmark_path = pathlib.Path(_producer_nodes[0]['parameters']['bag_folder'])
shutil.copy(str(_bench_cfg_path), str(benchmark_path.with_name('benchmark.yaml')))
shutil.copy(str(_producers_cfg_path), str(benchmark_path.with_name('producers.yaml')))
def _launch_sequence(transport):
"""
Continue with launch sequence (launch entry action of next run).
Launches next producer node or rosbag2 record process, based on transport (end to end)
or transportless type of benchmark.
:param" transport If True launch a 'ros2 bag record' process, else a producer node.
"""
global _producer_idx, _producer_nodes, _rosbag_processes
if _producer_idx == len(_producer_nodes):
_copy_config_files()
return launch.actions.LogInfo(msg='Benchmark finished!')
action = None
if transport:
action = _rosbag_processes[_producer_idx]
else:
action = _producer_nodes[_producer_idx]['node']
return action
def _rosbag_proc_started(event, context):
"""Register current rosbag2 PID so we can terminate it when producer exits."""
global _rosbag_pid
_rosbag_pid = event.pid
def _rosbag_ready_check(event):
"""
Consider rosbag2 ready when 'Listening for topics...' string is printed.
Launches producer node if ready.
"""
target_str = 'Listening for topics...'
if target_str in event.text.decode():
return _launch_sequence(transport=False)
def _rosbag_proc_exited(event, context):
"""
Start next rosbag2 record process after current one exits.
Launches result writer on exit.
"""
global _producer_idx, _result_writers, _rosbag_pid
# ROS2 bag returns 2 if terminated with SIGINT, which we expect here
if event.returncode != 2:
_rosbag_pid = None
return [
launch.actions.LogInfo(msg='Rosbag2 record error. Shutting down benchmark.'),
launch.actions.EmitEvent(
event=launch.events.Shutdown(
reason='Rosbag2 record error'
)
)
]
return [
_result_writers[_producer_idx-1]
]
def _producer_node_started(event, context):
"""Log current benchmark progress on producer start."""
global _producer_idx
return launch.actions.LogInfo(
msg='-----------{}/{}-----------'.format(_producer_idx + 1, len(_producer_nodes))
)
def _producer_node_exited(event, context):
"""
Launch new producer when current has finished.
If transport is on, then stops rosbag2 recorder process.
Handles clearing of bags.
"""
global _producer_idx, _producer_nodes, _rosbag_pid
node_params = _producer_nodes[_producer_idx]['parameters']
transport = node_params['transport']
# Handle clearing bag files
if not node_params['preserve_bags']:
bag_files = pathlib.Path.cwd().joinpath(node_params['bag_folder']).glob('*.db3')
stats_path = pathlib.Path.cwd().joinpath(node_params['bag_folder'], 'bagfiles_info.yaml')
stats = {
'total_size': 0,
'bagfiles': []
}
# Delete rosbag files
for f in bag_files:
filesize = f.stat().st_size
f.unlink()
stats['bagfiles'].append({f.name: {'size': filesize}})
stats['total_size'] += filesize
# Dump files size information
with open(stats_path, 'w') as stats_file:
yaml.dump(stats, stats_file)
# If we have non empty rosbag PID, then we need to kill it (end-to-end transport case)
if _rosbag_pid is not None and transport:
os.kill(_rosbag_pid, signal.SIGINT)
_rosbag_pid = None
# Shutdown benchmark with error if producer node crashes
if event.returncode != 0:
return [
launch.actions.LogInfo(msg='Writer error. Shutting down benchmark.'),
launch.actions.EmitEvent(
event=launch.events.Shutdown(
reason='Writer error'
)
)
]
# Bump up producer index, so the launch sequence can continue
_producer_idx += 1
# Give disks some time to flush their internal cache before starting next experiment
time.sleep(5)
return [
launch.actions.LogInfo(
msg='---------------------------'
),
_launch_sequence(transport=transport)
]
def generate_launch_description():
"""Generate launch description for ros2 launch system."""
global _producer_nodes, _bench_cfg_path, _producers_cfg_path
_bench_cfg_path, _producers_cfg_path = _parse_arguments()
# Parse yaml config for benchmark
bench_cfg = None
with open(_bench_cfg_path, 'r') as config_file:
bench_cfg_yaml = yaml.load(config_file, Loader=yaml.FullLoader)
bench_cfg = (bench_cfg_yaml['rosbag2_performance_benchmarking']
['benchmark_node']
['ros__parameters'])
# Benchmark options
benchmark_params = bench_cfg['benchmark']
repeat_each = benchmark_params.get('repeat_each')
bag_root_folder = benchmark_params.get('bag_root_folder')
summary_result_file = benchmark_params.get('summary_result_file')
transport = not benchmark_params.get('no_transport')
preserve_bags = benchmark_params.get('preserve_bags')
# Producers options
producers_params = bench_cfg['benchmark']['parameters']
max_cache_size_params = producers_params.get('max_cache_size')
max_bag_size_params = producers_params.get('max_bag_size')
compression_params = producers_params.get('compression')
compression_queue_size_params = producers_params.get('compression_queue_size')
compression_threads_params = producers_params.get('compression_threads')
storage_config_file_params = producers_params.get('storage_config_file')
# Parameters cross section for whole benchmark
# Parameters cross section is a list of all possible parameters variants
params_cross_section = []
# Generate unique benchmark directory name
benchmark_cfg_name = pathlib.Path(_bench_cfg_path).name.replace('.yaml', '')
producer_cfg_name = pathlib.Path(_producers_cfg_path).name.replace('.yaml', '')
timestamp = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
transport_postfix = 'transport' if transport else 'no_transport'
benchmark_dir_name = benchmark_cfg_name + \
'_' + producer_cfg_name + \
'_' + transport_postfix + \
'_' + timestamp
# Helper function for generating cross section list
def __generate_cross_section_parameter(i,
cache,
compression,
compression_queue_size,
compression_threads,
storage_config,
max_bag_size):
# Storage conf parameter for each producer
st_conf_filename = storage_config.replace('.yaml', '')
storage_conf_path = ''
if storage_config != '':
storage_conf_path = pathlib.Path(
get_package_share_directory(
'rosbag2_performance_benchmarking'
)
).joinpath('config', 'storage', storage_config)
if not storage_conf_path.exists():
raise RuntimeError(
'Config {} does not exist.'.format(storage_config))
st_conf_filename = pathlib.Path(storage_config).with_suffix('')
# Generates unique title for producer
node_title = 'run_' + \
'{i}_{cache}_{comp}_{comp_q}_{comp_t}_{st_conf}_{bag_size}'.format(
i=i,
cache=cache,
comp=compression if compression else 'default_compression',
comp_q=compression_queue_size,
comp_t=compression_threads,
st_conf=st_conf_filename if st_conf_filename else 'default_config',
bag_size=max_bag_size
)
# Result file path for producer
result_file = pathlib.Path(bag_root_folder).joinpath(
benchmark_dir_name,
summary_result_file
)
# Bag folder path for producer
bag_folder = pathlib.Path(bag_root_folder).joinpath(
benchmark_dir_name,
node_title
)
# Filling up parameters cross section list for benchmark
params_cross_section.append(
{
'node_title': node_title,
'bag_folder': str(bag_folder),
'cache': cache,
'preserve_bags': preserve_bags,
'transport': transport,
'result_file': str(result_file),
'compression_format': compression,
'compression_queue_size': compression_queue_size,
'compression_threads': compression_threads,
'storage_config_file': str(storage_conf_path),
'config_file': str(_producers_cfg_path),
'max_bag_size': max_bag_size
}
)
# For the sake of python indentation, multiple for loops in alternative way with helper func
[
__generate_cross_section_parameter(
i,
cache,
compression,
compression_queue_size,
compression_threads,
storage_config,
max_bag_size)
for i in range(0, repeat_each)
for cache in max_cache_size_params
for compression in compression_params
for compression_queue_size in compression_queue_size_params
for compression_threads in compression_threads_params
for storage_config in storage_config_file_params
for max_bag_size in max_bag_size_params
]
ld = launch.LaunchDescription()
ld.add_action(
launch.actions.LogInfo(msg='Launching benchmark!'),
)
# Create all required nodes and processes for benchmark
for producer_param in params_cross_section:
parameters = [
producer_param['config_file'],
{'max_cache_size': producer_param['cache']},
{'max_bag_size': producer_param['max_bag_size']},
{'bag_folder': producer_param['bag_folder']},
{'results_file': producer_param['result_file']},
{'compression_queue_size': producer_param['compression_queue_size']},
{'compression_threads': producer_param['compression_threads']}
]
if producer_param['storage_config_file'] != '':
parameters.append({'storage_config_file': producer_param['storage_config_file']})
if producer_param['compression_format'] != '':
parameters.append({'compression_format': producer_param['compression_format']})
if not transport:
# Writer benchmark node writes messages directly to a storage, uses no publishers
producer_node = launch_ros.actions.Node(
package='rosbag2_performance_benchmarking',
executable='writer_benchmark',
name='rosbag2_performance_benchmarking_node',
parameters=parameters
)
else:
# Benchmark publishers node uses standard publishers for publishing messages
producer_node = launch_ros.actions.Node(
package='rosbag2_performance_benchmarking',
executable='benchmark_publishers',
name='rosbag2_performance_benchmarking_node',
parameters=parameters
)
# ROS2 bag process for recording messages
rosbag_args = []
if producer_param['storage_config_file']:
rosbag_args += [
'--storage-config-file',
str(producer_param['storage_config_file'])
]
if producer_param['cache']:
rosbag_args += [
'--max-cache-size',
str(producer_param['cache'])
]
if producer_param['compression_format']:
rosbag_args += [
'--compression-mode',
'message'
]
rosbag_args += [
'--compression-format',
str(producer_param['compression_format'])
]
if producer_param['compression_queue_size']:
rosbag_args += [
'--compression-queue-size',
str(producer_param['compression_queue_size'])
]
if producer_param['compression_threads']:
rosbag_args += [
'--compression-threads',
str(producer_param['compression_threads'])
]
if producer_param['max_bag_size']:
rosbag_args += [
'-b',
str(producer_param['max_bag_size'])
]
rosbag_args += ['-o', str(producer_param['bag_folder'])]
rosbag_process = launch.actions.ExecuteProcess(
sigkill_timeout=launch.substitutions.LaunchConfiguration(
'sigkill_timeout', default=60),
sigterm_timeout=launch.substitutions.LaunchConfiguration(
'sigterm_timeout', default=60),
cmd=['ros2', 'bag', 'record', '-e', r'\/.*_benchmarking_node\/.*'] + rosbag_args
)
# Result writer node walks through output metadata files and generates
# output results file
result_writer = launch_ros.actions.Node(
package='rosbag2_performance_benchmarking',
executable='results_writer',
name='rosbag2_performance_benchmarking_node',
parameters=parameters
)
# Fill up list with rosbag record process and result writers actions
_rosbag_processes.append(rosbag_process)
_result_writers.append(result_writer)
# Fill up dict with producer nodes and their corresponding parameters
_producer_nodes.append({'node': producer_node, 'parameters': producer_param})
# Connect start and exit events for a proper sequence
if not transport:
for producer_node in _producer_nodes:
ld.add_action(
launch.actions.RegisterEventHandler(
launch.event_handlers.OnProcessExit(
target_action=producer_node['node'],
on_exit=_producer_node_exited
)
)
)
ld.add_action(
launch.actions.RegisterEventHandler(
launch.event_handlers.OnProcessStart(
target_action=producer_node['node'],
on_start=_producer_node_started
)
)
)
else:
for producer_node, rosbag_proc in zip(_producer_nodes, _rosbag_processes):
ld.add_action(
launch.actions.RegisterEventHandler(
launch.event_handlers.OnProcessExit(
target_action=producer_node['node'],
on_exit=_producer_node_exited
)
)
)
ld.add_action(
launch.actions.RegisterEventHandler(
launch.event_handlers.OnProcessStart(
target_action=producer_node['node'],
on_start=_producer_node_started
)
)
),
ld.add_action(
launch.actions.RegisterEventHandler(
launch.event_handlers.OnProcessStart(
target_action=rosbag_proc,
on_start=_rosbag_proc_started
)
)
)
ld.add_action(
launch.actions.RegisterEventHandler(
launch.event_handlers.OnProcessIO(
target_action=rosbag_proc,
on_stdout=_rosbag_ready_check,
on_stderr=_rosbag_ready_check
)
)
)
ld.add_action(
launch.actions.RegisterEventHandler(
launch.event_handlers.OnProcessExit(
target_action=rosbag_proc,
on_exit=_rosbag_proc_exited
)
)
)
# Launch nodes one after another. Next node is launched after previous is finished.
ld.add_action(_launch_sequence(transport=transport))
return ld
if __name__ == '__main__':
raise RuntimeError('Benchmark launchfile does not support standalone execution.')
| 20,673
| 36.795247
| 98
|
py
|
rosbag2
|
rosbag2-master/rosbag2_performance/rosbag2_performance_benchmarking/scripts/report_gen.py
|
#!/usr/bin/env python3
# Copyright 2021, Robotec.ai sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for generating human friendly benchmark report."""
import argparse
import csv
import pathlib
import statistics
import yaml
class Postprocess:
"""Base class for posprocess calculations."""
def process(self, grouped_data, benchmark_config, producers_config):
raise NotImplementedError
class PostprocessStorageConfig(Postprocess):
"""
Postprocess.
Calculate percent of recorded messages per storage config for different
benchmark parameters.
"""
def process(self, grouped_data, benchmark_config, producers_config):
"""
Process grouped data and prints human friendly informations.
:param: grouped data List of grouped results. Grouped result is a list with rows from
single benchmark run (ie. run with two publisher groups returns two rows in results
file).
:param: benchmark_config Benchmark description from yaml config.
:param: producers_config Producers description from yaml config.
"""
benchmark_config_cleaned = (benchmark_config['rosbag2_performance_benchmarking']
['benchmark_node']
['ros__parameters'])
benchmark_parameters = benchmark_config_cleaned['benchmark']['parameters']
repeat_each = benchmark_config_cleaned['benchmark']['repeat_each']
producers_config_publishers = (producers_config['rosbag2_performance_benchmarking_node']
['ros__parameters']
['publishers'])
# Split data for storage configs
splitted_data = {}
for data in grouped_data:
storage_cfg_name = data[0]['storage_config']
storage_cfg_name = storage_cfg_name if storage_cfg_name != '' else 'default'
if storage_cfg_name not in splitted_data.keys():
splitted_data.update({storage_cfg_name: []})
splitted_data[storage_cfg_name].append(data)
cache_data_per_storage_conf = {}
print(yaml.dump(producers_config_publishers))
def __process_test(compression_selected,
compression_queue_size_selected,
compression_threads_selected,
max_bagfile_size_selected):
for storage_cfg_name, data in splitted_data.items():
cache_samples = {}
for sample in data:
# Single sample contains multiple rows
if len(sample) != len(producers_config_publishers['publisher_groups']):
raise RuntimeError('Invalid number of records in results detected.')
# These parameters are same for all rows in sample
# (multiple publishers in publisher group)
if sample[0]['compression'] != compression_selected:
continue
if int(sample[0]['compression_queue']) != compression_queue_size_selected:
continue
if int(sample[0]['compression_threads']) != compression_threads_selected:
continue
if int(sample[0]['max_bagfile_size']) != max_bagfile_size_selected:
continue
if sample[0]['cache_size'] not in cache_samples.keys():
cache_samples.update({sample[0]['cache_size']: []})
# TODO(piotr.jaroszek) WARNING, currently results in 'total_produced' column
# are correct (per publisher group), but 'total_recorded' is already summed
# for all the publisher groups!
sample_total_produced = 0
for row in sample:
sample_total_produced += int(row['total_produced'])
cache_samples[sample[0]['cache_size']].append(
int(sample[0]['total_recorded_count'])/sample_total_produced)
cache_recorded_percentage_stats = {
cache: {
'avg': statistics.mean(samples),
'min': min(samples),
'max': max(samples)
}
for cache, samples in cache_samples.items()
}
cache_data_per_storage_conf.update(
{storage_cfg_name: cache_recorded_percentage_stats}
)
result = {
'repeat_each': repeat_each,
'max_bagfile_size': max_bagfile_size_selected,
'compression': compression_selected,
'compression_threads': compression_threads_selected,
'compression_queue_size': compression_queue_size_selected,
'cache_data': cache_data_per_storage_conf
}
print('Results: ')
print('\tRepetitions: {}'.format(result['repeat_each']))
print('\tMax bagfile size: {}'.format(result['max_bagfile_size']))
print('\tCompression: {}'.format(
result['compression'] if result['compression'] else '<default>')
)
print('\tCompression threads: {}'.format(result['compression_threads']))
print('\tCompression queue size: {}'.format(result['compression_queue_size']))
print('\tRecorded messages for different caches and storage config:')
for storage_cfg, caches in result['cache_data'].items():
print('\t\tstorage config: {}:'.format(pathlib.Path(storage_cfg).name))
for cache, percent_recorded in caches.items():
print('\t\t\tcache {:,} - min: {:.2%}, average: {:.2%}, max: {:.2%}'.format(
int(cache),
percent_recorded['min'],
percent_recorded['avg'],
percent_recorded['max']))
[
__process_test(
compression_selected,
compression_queue_size_selected,
compression_threads_selected,
max_bagfile_size_selected)
for compression_selected in benchmark_parameters['compression']
for compression_queue_size_selected in benchmark_parameters['compression_queue_size']
for compression_threads_selected in benchmark_parameters['compression_threads']
for max_bagfile_size_selected in benchmark_parameters['max_bag_size']
]
class Report:
"""Report generator main class."""
def __init__(self, benchmark_dir):
"""Initialize with config and results data."""
self.__benchmark_dir = benchmark_dir
self.__load_configs()
self.__load_results()
def generate(self):
"""Handle data posprocesses."""
psc = PostprocessStorageConfig()
psc.process(
self.__results_data,
self.__benchmark_config,
self.__producers_config
)
def __load_configs(self):
producers_config_path = pathlib.Path(self.__benchmark_dir).joinpath('producers.yaml')
benchmark_config_path = pathlib.Path(self.__benchmark_dir).joinpath('benchmark.yaml')
with open(producers_config_path, 'r') as fp:
self.__producers_config = yaml.load(fp, Loader=yaml.FullLoader)
with open(benchmark_config_path, 'r') as fp:
self.__benchmark_config = yaml.load(fp, Loader=yaml.FullLoader)
def __load_results(self):
results_path = pathlib.Path(self.__benchmark_dir).joinpath('results.csv')
with open(results_path, mode='r') as fp:
reader = csv.DictReader(fp, delimiter=' ')
results = []
for result in reader:
results.append(result)
publishers_groups = (
self.__producers_config['rosbag2_performance_benchmarking_node']
['ros__parameters']
['publishers']
['publisher_groups'])
publishers_groups_num = len(publishers_groups)
# Group rows in results file, so that rows within same benchmark run are
# in one list
# Example: one benchmark run with two publisher groups returns two rows in results
# file. We want to group these.
results_grouped = [
results[i:i+(publishers_groups_num)]
for i in range(0, len(results), publishers_groups_num)
]
self.__results_data = results_grouped
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', help='Benchmark results folder.')
args = parser.parse_args()
benchmark_dir = args.input
if benchmark_dir:
raport = Report(benchmark_dir)
raport.generate()
else:
parser.print_help()
| 9,694
| 41.336245
| 97
|
py
|
rosbag2
|
rosbag2-master/rosbag2_test_common/rosbag2_test_common/__init__.py
|
# Copyright 2022 Foxglove Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
TESTED_STORAGE_IDS = ['sqlite3', 'mcap']
| 633
| 38.625
| 74
|
py
|
rosbag2
|
rosbag2-master/rosbag2_py/test/test_sequential_reader.py
|
# Copyright 2020 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
from common import get_rosbag_options
import pytest
from rcl_interfaces.msg import Log
from rclpy.serialization import deserialize_message
import rosbag2_py
from rosbag2_test_common import TESTED_STORAGE_IDS
from rosidl_runtime_py.utilities import get_message
from std_msgs.msg import String
RESOURCES_PATH = Path(os.environ['ROSBAG2_PY_TEST_RESOURCES_DIR'])
@pytest.mark.parametrize('storage_id', TESTED_STORAGE_IDS)
def test_sequential_reader(storage_id):
bag_path = str(RESOURCES_PATH / storage_id / 'talker')
storage_options, converter_options = get_rosbag_options(bag_path, storage_id)
reader = rosbag2_py.SequentialReader()
reader.open(storage_options, converter_options)
topic_types = reader.get_all_topics_and_types()
# Create a map for quicker lookup
type_map = {topic_types[i].name: topic_types[i].type for i in range(len(topic_types))}
# Set filter for topic of string type
storage_filter = rosbag2_py.StorageFilter(topics=['/topic'])
reader.set_filter(storage_filter)
msg_counter = 0
while reader.has_next():
(topic, data, t) = reader.read_next()
msg_type = get_message(type_map[topic])
msg = deserialize_message(data, msg_type)
assert isinstance(msg, String)
assert msg.data == f'Hello, world! {msg_counter}'
msg_counter += 1
# No filter
reader.reset_filter()
reader = rosbag2_py.SequentialReader()
reader.open(storage_options, converter_options)
msg_counter = 0
while reader.has_next():
(topic, data, t) = reader.read_next()
msg_type = get_message(type_map[topic])
msg = deserialize_message(data, msg_type)
assert isinstance(msg, Log) or isinstance(msg, String)
if isinstance(msg, String):
assert msg.data == f'Hello, world! {msg_counter}'
msg_counter += 1
@pytest.mark.parametrize('storage_id', TESTED_STORAGE_IDS)
def test_sequential_reader_seek(storage_id):
bag_path = str(RESOURCES_PATH / storage_id / 'talker')
storage_options, converter_options = get_rosbag_options(bag_path, storage_id)
reader = rosbag2_py.SequentialReader()
reader.open(storage_options, converter_options)
topic_types = reader.get_all_topics_and_types()
# Create a map for quicker lookup
type_map = {topic_types[i].name: topic_types[i].type for i in range(len(topic_types))}
# Seek No Filter
reader = rosbag2_py.SequentialReader()
reader.open(storage_options, converter_options)
reader.seek(1585866237113147888)
msg_counter = 5
(topic, data, t) = reader.read_next()
msg_type = get_message(type_map[topic])
msg = deserialize_message(data, msg_type)
assert isinstance(msg, Log)
(topic, data, t) = reader.read_next()
msg_type = get_message(type_map[topic])
msg = deserialize_message(data, msg_type)
isinstance(msg, String)
assert msg.data == f'Hello, world! {msg_counter}'
msg_counter += 1
# Set Filter will continue
storage_filter = rosbag2_py.StorageFilter(topics=['/topic'])
reader.set_filter(storage_filter)
(topic, data, t) = reader.read_next()
msg_type = get_message(type_map[topic])
msg = deserialize_message(data, msg_type)
isinstance(msg, String)
assert msg.data == f'Hello, world! {msg_counter}'
# Seek will keep filter
reader.seek(1585866239113147888)
msg_counter = 8
(topic, data, t) = reader.read_next()
msg_type = get_message(type_map[topic])
msg = deserialize_message(data, msg_type)
isinstance(msg, String)
assert msg.data == f'Hello, world! {msg_counter}'
msg_counter += 1
(topic, data, t) = reader.read_next()
msg_type = get_message(type_map[topic])
msg = deserialize_message(data, msg_type)
isinstance(msg, String)
assert msg.data == f'Hello, world! {msg_counter}'
def test_plugin_list():
reader_plugins = rosbag2_py.get_registered_readers()
assert 'my_read_only_test_plugin' in reader_plugins
| 4,644
| 30.385135
| 90
|
py
|
rosbag2
|
rosbag2-master/rosbag2_py/test/test_sequential_writer.py
|
# Copyright 2020 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common import get_rosbag_options
import pytest
from rclpy.serialization import deserialize_message, serialize_message
import rosbag2_py
from rosbag2_test_common import TESTED_STORAGE_IDS
from rosidl_runtime_py.utilities import get_message
from std_msgs.msg import String
def create_topic(writer, topic_name, topic_type, serialization_format='cdr'):
"""
Create a new topic.
:param writer: writer instance
:param topic_name:
:param topic_type:
:param serialization_format:
:return:
"""
topic_name = topic_name
topic = rosbag2_py.TopicMetadata(name=topic_name, type=topic_type,
serialization_format=serialization_format)
writer.create_topic(topic)
@pytest.mark.parametrize('storage_id', TESTED_STORAGE_IDS)
def test_sequential_writer(tmp_path, storage_id):
"""
Test for sequential writer.
:return:
"""
bag_path = str(tmp_path / 'tmp_write_test')
storage_options, converter_options = get_rosbag_options(bag_path, storage_id)
writer = rosbag2_py.SequentialWriter()
writer.open(storage_options, converter_options)
# create topic
topic_name = '/chatter'
create_topic(writer, topic_name, 'std_msgs/msg/String')
for i in range(10):
msg = String()
msg.data = f'Hello, world! {str(i)}'
time_stamp = i * 100
writer.write(topic_name, serialize_message(msg), time_stamp)
# close bag and create new storage instance
del writer
storage_options, converter_options = get_rosbag_options(bag_path, storage_id)
reader = rosbag2_py.SequentialReader()
reader.open(storage_options, converter_options)
topic_types = reader.get_all_topics_and_types()
# Create a map for quicker lookup
type_map = {topic_types[i].name: topic_types[i].type for i in range(len(topic_types))}
msg_counter = 0
while reader.has_next():
topic, data, t = reader.read_next()
msg_type = get_message(type_map[topic])
msg_deserialized = deserialize_message(data, msg_type)
assert isinstance(msg_deserialized, String)
assert msg_deserialized.data == f'Hello, world! {msg_counter}'
assert t == msg_counter * 100
msg_counter += 1
def test_plugin_list():
writer_plugins = rosbag2_py.get_registered_writers()
assert 'my_test_plugin' in writer_plugins
def test_compression_plugin_list():
"""
Testing retrieval of available compression format plugins.
:return:
"""
compression_formats = rosbag2_py.get_registered_compressors()
assert 'fake_comp' in compression_formats
def test_serialization_plugin_list():
"""
Testing retrieval of available serialization format plugins.
:return:
"""
serialization_formats = rosbag2_py.get_registered_serializers()
assert 's_converter' in serialization_formats, \
'get_registered_serializers should return SerializationFormatSerializer plugins'
assert 'a_converter' in serialization_formats, \
'get_registered_serializers should also return SerializationFormatConverter plugins'
| 3,720
| 30.268908
| 92
|
py
|
rosbag2
|
rosbag2-master/rosbag2_py/test/test_convert.py
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
import pytest
from rosbag2_py import bag_rewrite, StorageOptions
from rosbag2_test_common import TESTED_STORAGE_IDS
RESOURCES_PATH = Path(os.environ['ROSBAG2_PY_TEST_RESOURCES_DIR'])
def test_no_toplevel_key(tmpdir):
output_options_path = tmpdir / 'no_toplevel_key.yml'
output_options_content = '[{key: value}]'
with output_options_path.open('w') as f:
f.write(output_options_content)
with pytest.raises(RuntimeError):
bag_rewrite([], str(output_options_path))
def test_output_bags_not_a_list(tmpdir):
output_options_path = tmpdir / 'not_a_list.yml'
output_options_content = '{output_bags: {key: value}}'
with output_options_path.open('w') as f:
f.write(output_options_content)
with pytest.raises(RuntimeError):
bag_rewrite([], str(output_options_path))
@pytest.mark.parametrize('storage_id', TESTED_STORAGE_IDS)
def test_basic_convert(tmpdir, storage_id):
# This test is just to test that the rosbag2_py wrapper parses input
# It is not a comprehensive test of bag_rewrite.
bag_a_path = RESOURCES_PATH / storage_id / 'convert_a'
bag_b_path = RESOURCES_PATH / storage_id / 'convert_b'
output_uri_1 = tmpdir / storage_id / 'converted_1'
output_uri_2 = tmpdir / storage_id / 'converted_2'
input_options = [
StorageOptions(uri=str(bag_a_path)),
StorageOptions(uri=str(bag_b_path)),
]
output_options_path = tmpdir / 'simple_convert.yml'
output_options_content = f"""
output_bags:
- uri: {output_uri_1}
storage_id: {storage_id}
topics: [a_empty]
- uri: {output_uri_2}
storage_id: {storage_id}
exclude: ".*empty.*"
"""
with output_options_path.open('w') as f:
f.write(output_options_content)
bag_rewrite(input_options, str(output_options_path))
assert output_uri_1.exists()
assert output_uri_1.isdir()
assert (output_uri_1 / 'metadata.yaml').exists()
assert output_uri_2.exists()
assert output_uri_2.isdir()
assert (output_uri_2 / 'metadata.yaml').exists()
| 2,685
| 34.342105
| 74
|
py
|
rosbag2
|
rosbag2-master/rosbag2_py/test/test_sequential_reader_multiple_files.py
|
# Copyright 2020 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
from common import get_rosbag_options
import pytest
import rosbag2_py
from rosbag2_test_common import TESTED_STORAGE_IDS
RESOURCES_PATH = Path(os.environ['ROSBAG2_PY_TEST_RESOURCES_DIR'])
@pytest.mark.parametrize('storage_id', TESTED_STORAGE_IDS)
def test_reset_filter(storage_id):
bag_path = str(RESOURCES_PATH / storage_id / 'wbag')
storage_options, converter_options = get_rosbag_options(bag_path, storage_id=storage_id)
reader = rosbag2_py.SequentialReader()
reader.open(storage_options, converter_options)
# Set filter for topic of string type
storage_filter = rosbag2_py.StorageFilter(topics=['AAA', 'CCC', 'DDD'])
reader.set_filter(storage_filter)
(topic, data, t) = reader.read_next()
assert topic == 'AAA'
assert t == 1001
(topic, data, t) = reader.read_next()
assert topic == 'CCC'
assert t == 1002
(topic, data, t) = reader.read_next()
assert topic == 'AAA'
assert t == 1004
# No filter and bag continues same location
reader.reset_filter()
(topic, data, t) = reader.read_next()
assert topic == 'FFF'
assert t == 1004
(topic, data, t) = reader.read_next()
assert topic == 'BBB'
assert t == 1004
(topic, data, t) = reader.read_next()
assert topic == 'EEE'
assert t == 1005
@pytest.mark.parametrize('storage_id', TESTED_STORAGE_IDS)
def test_seek_forward(storage_id):
bag_path = str(RESOURCES_PATH / storage_id / 'wbag')
storage_options, converter_options = get_rosbag_options(bag_path, storage_id)
reader = rosbag2_py.SequentialReader()
reader.open(storage_options, converter_options)
# seek forward
reader.seek(1822)
(topic, data, t) = reader.read_next()
assert topic == 'CCC'
assert t == 1822
# set filter continues in same location
storage_filter = rosbag2_py.StorageFilter(topics=['BBB', 'GGG'])
reader.set_filter(storage_filter)
(topic, data, t) = reader.read_next()
assert topic == 'GGG'
assert t == 1822
(topic, data, t) = reader.read_next()
assert topic == 'GGG'
assert t == 1822
(topic, data, t) = reader.read_next()
assert topic == 'BBB'
assert t == 1826
@pytest.mark.parametrize('storage_id', TESTED_STORAGE_IDS)
def test_seek_backward(storage_id):
bag_path = str(RESOURCES_PATH / storage_id / 'wbag')
storage_options, converter_options = get_rosbag_options(bag_path, storage_id)
reader = rosbag2_py.SequentialReader()
reader.open(storage_options, converter_options)
# seek forward first
reader.seek(1822)
storage_filter = rosbag2_py.StorageFilter(topics=['BBB', 'GGG'])
reader.set_filter(storage_filter)
(topic, data, t) = reader.read_next()
# seek backwards & filter preserved
reader.seek(1408)
(topic, data, t) = reader.read_next()
assert topic == 'BBB'
assert t == 1408
(topic, data, t) = reader.read_next()
assert topic == 'GGG'
assert t == 1408
(topic, data, t) = reader.read_next()
assert topic == 'BBB'
assert t == 1413
| 3,704
| 25.091549
| 92
|
py
|
rosbag2
|
rosbag2-master/rosbag2_py/test/test_reindexer.py
|
# Copyright 2021 DCS Corporation, All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DISTRIBUTION A. Approved for public release; distribution unlimited.
# OPSEC #4584.
#
# Delivered to the U.S. Government with Unlimited Rights, as defined in DFARS
# Part 252.227-7013 or 7014 (Feb 2014).
#
# This notice must appear in all copies of this file and its derivatives.
import os
from pathlib import Path
from common import get_rosbag_options
import pytest
import rosbag2_py
from rosbag2_test_common import TESTED_STORAGE_IDS
RESOURCES_PATH = Path(os.environ['ROSBAG2_PY_TEST_RESOURCES_DIR'])
@pytest.mark.parametrize('storage_id', TESTED_STORAGE_IDS)
def test_reindexer_multiple_files(storage_id):
bag_path = RESOURCES_PATH / storage_id / 'reindex_test_bags' / 'multiple_files'
result_path = bag_path / 'metadata.yaml'
storage_options, _ = get_rosbag_options(str(bag_path), storage_id=storage_id)
reindexer = rosbag2_py.Reindexer()
reindexer.reindex(storage_options)
assert(result_path.exists())
try:
result_path.unlink()
except FileNotFoundError:
pass
| 1,628
| 30.326923
| 83
|
py
|
rosbag2
|
rosbag2-master/rosbag2_py/test/test_storage.py
|
# Copyright 2022, Foxglove Technologies. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import common # noqa
from rclpy.duration import Duration
from rclpy.time import Time
from rosbag2_py import (
BagMetadata,
ConverterOptions,
FileInformation,
StorageFilter,
StorageOptions,
TopicInformation,
TopicMetadata,
)
class TestStorageStructs(unittest.TestCase):
def test_bag_metadata_default_ctor(self):
metadata = BagMetadata()
assert metadata
def test_converter_options_ctor(self):
converter_options = ConverterOptions()
assert converter_options
def test_file_information_ctor(self):
file_information = FileInformation(
path='test_path',
starting_time=Time(nanoseconds=1000),
duration=Duration(),
message_count=1234,
)
assert file_information
def test_storage_options_ctor(self):
storage_options = StorageOptions(uri='path')
assert storage_options
def test_storage_filter_ctor(self):
storage_filter = StorageFilter()
assert storage_filter
def test_topic_metadata_ctor(self):
topic_metadata = TopicMetadata(
name='topic',
type='msgs/Msg',
serialization_format='format'
)
assert topic_metadata
def test_topic_information_ctor(self):
topic_information = TopicInformation(
topic_metadata=TopicMetadata(
name='topic',
type='msgs/Msg',
serialization_format='format'),
message_count=10
)
assert topic_information
def test_bag_metadata_ctor_named_args(self):
duration = Duration(nanoseconds=200)
starting_time = Time(nanoseconds=100)
file_information = FileInformation(
path='something',
starting_time=starting_time,
duration=duration,
message_count=12)
topic_information = TopicInformation(
topic_metadata=TopicMetadata(
name='topic',
type='msgs/Msg',
serialization_format='format'),
message_count=10
)
metadata = BagMetadata(
version=1,
bag_size=2,
storage_identifier='foo',
relative_file_paths=['bar', 'baz'],
files=[file_information],
duration=duration,
starting_time=starting_time,
message_count=12,
topics_with_message_count=[topic_information],
compression_format='aaaa',
compression_mode='bbbbb',
custom_data={
'keya': 'valuea',
'keyb': 'valueb'
}
)
assert metadata
| 3,339
| 28.821429
| 74
|
py
|
rosbag2
|
rosbag2-master/rosbag2_py/test/test_transport.py
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from pathlib import Path
import threading
from common import get_rosbag_options, wait_for
import pytest
import rclpy
from rclpy.qos import QoSProfile
import rosbag2_py
from rosbag2_test_common import TESTED_STORAGE_IDS
from std_msgs.msg import String
def test_options_qos_conversion():
# Tests that the to-and-from C++ conversions are working properly in the pybind structs
simple_overrides = {
'/topic': QoSProfile(depth=10)
}
play_options = rosbag2_py.PlayOptions()
play_options.topic_qos_profile_overrides = simple_overrides
assert play_options.topic_qos_profile_overrides == simple_overrides
record_options = rosbag2_py.RecordOptions()
record_options.topic_qos_profile_overrides = simple_overrides
assert record_options.topic_qos_profile_overrides == simple_overrides
@pytest.mark.parametrize('storage_id', TESTED_STORAGE_IDS)
def test_record_cancel(tmp_path, storage_id):
bag_path = str(tmp_path / 'test_record_cancel')
storage_options, converter_options = get_rosbag_options(bag_path, storage_id)
recorder = rosbag2_py.Recorder()
record_options = rosbag2_py.RecordOptions()
record_options.all = True
record_options.is_discovery_disabled = False
record_options.topic_polling_interval = datetime.timedelta(milliseconds=100)
ctx = rclpy.Context()
ctx.init()
record_thread = threading.Thread(
target=recorder.record,
args=(storage_options, record_options),
daemon=True)
record_thread.start()
node = rclpy.create_node('test_record_cancel', context=ctx)
executor = rclpy.executors.SingleThreadedExecutor(context=ctx)
executor.add_node(node)
pub = node.create_publisher(String, 'chatter', 10)
i = 0
msg = String()
while rclpy.ok() and i < 10:
msg.data = 'Hello World: {0}'.format(i)
i += 1
pub.publish(msg)
recorder.cancel()
metadata_io = rosbag2_py.MetadataIo()
assert wait_for(lambda: metadata_io.metadata_file_exists(bag_path),
timeout=rclpy.duration.Duration(seconds=3))
metadata = metadata_io.read_metadata(bag_path)
assert(len(metadata.relative_file_paths))
storage_path = Path(metadata.relative_file_paths[0])
assert wait_for(lambda: storage_path.is_file(),
timeout=rclpy.duration.Duration(seconds=3))
| 2,995
| 32.662921
| 91
|
py
|
rosbag2
|
rosbag2-master/rosbag2_py/test/common.py
|
# Copyright 2020 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
from typing import Callable
if os.environ.get('ROSBAG2_PY_TEST_WITH_RTLD_GLOBAL', None) is not None:
# This is needed on Linux when compiling with clang/libc++.
# TL;DR This makes class_loader work when using a python extension compiled with libc++.
#
# For the fun RTTI ABI details, see https://whatofhow.wordpress.com/2015/03/17/odr-rtti-dso/.
sys.setdlopenflags(os.RTLD_GLOBAL | os.RTLD_LAZY)
from rclpy.clock import Clock, ClockType # noqa
from rclpy.duration import Duration # noqa
import rosbag2_py # noqa
def get_rosbag_options(path, storage_id, serialization_format='cdr'):
storage_options = rosbag2_py.StorageOptions(
uri=path, storage_id=storage_id)
converter_options = rosbag2_py.ConverterOptions(
input_serialization_format=serialization_format,
output_serialization_format=serialization_format)
return storage_options, converter_options
def wait_for(
condition: Callable[[], bool],
timeout: Duration,
sleep_time: float = 0.1,
):
clock = Clock(clock_type=ClockType.STEADY_TIME)
start = clock.now()
while not condition():
if clock.now() - start > timeout:
return False
time.sleep(sleep_time)
return True
| 1,877
| 33.145455
| 97
|
py
|
rosbag2
|
rosbag2-master/rosbag2_py/rosbag2_py/__init__.py
|
# Copyright 2020 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rpyutils import add_dll_directories_from_env
# Since Python 3.8, on Windows we should ensure DLL directories are explicitly added
# to the search path.
# See https://docs.python.org/3/whatsnew/3.8.html#bpo-36085-whatsnew
with add_dll_directories_from_env('PATH'):
from rosbag2_py._reader import (
SequentialCompressionReader,
SequentialReader,
get_registered_readers,
)
from rosbag2_py._storage import (
BagMetadata,
ConverterOptions,
FileInformation,
MetadataIo,
ReadOrder,
ReadOrderSortBy,
StorageFilter,
StorageOptions,
TopicMetadata,
TopicInformation,
get_default_storage_id,
)
from rosbag2_py._writer import (
SequentialCompressionWriter,
SequentialWriter,
get_registered_writers,
get_registered_compressors,
get_registered_serializers,
)
from rosbag2_py._info import (
Info,
)
from rosbag2_py._transport import (
Player,
PlayOptions,
Recorder,
RecordOptions,
bag_rewrite,
)
from rosbag2_py._reindexer import (
Reindexer
)
__all__ = [
'bag_rewrite',
'ConverterOptions',
'FileInformation',
'get_default_storage_id',
'get_registered_readers',
'get_registered_writers',
'get_registered_compressors',
'get_registered_serializers',
'ReadOrder',
'ReadOrderSortBy',
'Reindexer',
'SequentialCompressionReader',
'SequentialCompressionWriter',
'SequentialReader',
'SequentialWriter',
'StorageFilter',
'StorageOptions',
'TopicMetadata',
'TopicInformation',
'BagMetadata',
'MetadataIo',
'Info',
'Player',
'PlayOptions',
'Recorder',
'RecordOptions',
]
| 2,413
| 26.431818
| 84
|
py
|
rosbag2
|
rosbag2-master/ros2bag/setup.py
|
from setuptools import find_packages
from setuptools import setup
package_name = 'ros2bag'
setup(
name=package_name,
version='0.20.0',
packages=find_packages(exclude=['test']),
data_files=[
('share/' + package_name, ['package.xml']),
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
],
install_requires=['ros2cli'],
zip_safe=True,
author='Karsten Knese',
author_email='karsten@osrfoundation.org',
maintainer='Karsten Knese',
maintainer_email='karsten@osrfoundation.org',
keywords=[],
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
],
description='Entry point for rosbag in ROS 2',
long_description="""\
The package provides the rosbag command for the ROS 2 command line tools.""",
license='Apache License, Version 2.0',
tests_require=['pytest'],
entry_points={
'ros2cli.command': [
'bag = ros2bag.command.bag:BagCommand',
],
'ros2cli.extension_point': [
'ros2bag.verb = ros2bag.verb:VerbExtension',
],
'ros2bag.verb': [
'burst = ros2bag.verb.burst:BurstVerb',
'convert = ros2bag.verb.convert:ConvertVerb',
'info = ros2bag.verb.info:InfoVerb',
'list = ros2bag.verb.list:ListVerb',
'play = ros2bag.verb.play:PlayVerb',
'record = ros2bag.verb.record:RecordVerb',
'reindex = ros2bag.verb.reindex:ReindexVerb'
],
}
)
| 1,654
| 31.45098
| 77
|
py
|
rosbag2
|
rosbag2-master/ros2bag/test/test_record.py
|
# Copyright 2020 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
import shutil
import tempfile
import unittest
from launch import LaunchDescription
from launch.actions import ExecuteProcess
import launch_testing
import launch_testing.actions
import launch_testing.asserts
from launch_testing.asserts import EXIT_OK
import pytest
from rosbag2_test_common import TESTED_STORAGE_IDS
@pytest.mark.launch_test
@launch_testing.parametrize('storage_id', TESTED_STORAGE_IDS)
def generate_test_description(storage_id):
tmp_dir_name = tempfile.mkdtemp()
output_path = Path(tmp_dir_name) / 'ros2bag_test_record'
record_all_process = ExecuteProcess(
cmd=['ros2', 'bag', 'record', '-a', '-s', storage_id, '--output', output_path.as_posix()],
name='ros2bag-cli',
output='screen',
)
return LaunchDescription([
record_all_process,
launch_testing.actions.ReadyToTest()
]), locals()
class TestRecord(unittest.TestCase):
def test_output(self, record_all_process, proc_output):
proc_output.assertWaitFor(
'Listening for topics...',
process=record_all_process
)
proc_output.assertWaitFor(
"Subscribed to topic '/rosout'",
process=record_all_process
)
@launch_testing.post_shutdown_test()
class TestRecordAfterShutdown(unittest.TestCase):
def test_exit_code(self, tmp_dir_name, record_all_process, proc_info):
# Cleanup
shutil.rmtree(tmp_dir_name, ignore_errors=True)
# Check that the process exited with code 0
launch_testing.asserts.assertExitCodes(
proc_info,
# SIGINT (2) is the typical exit code we see coming from rclcpp
# On Windows, we get value '1'
allowable_exit_codes=[EXIT_OK, 2] if os.name != 'nt' else [EXIT_OK, 1, 2],
process=record_all_process
)
| 2,479
| 30
| 98
|
py
|
rosbag2
|
rosbag2-master/ros2bag/test/test_api.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from rclpy.qos import QoSDurabilityPolicy
from rclpy.qos import QoSHistoryPolicy
from rclpy.qos import QoSReliabilityPolicy
from ros2bag.api import convert_yaml_to_qos_profile
from ros2bag.api import dict_to_duration
from ros2bag.api import interpret_dict_as_qos_profile
class TestRos2BagRecord(unittest.TestCase):
def test_dict_to_duration_valid(self):
expected_nanoseconds = 1000000002
duration_dict = {'sec': 1, 'nsec': 2}
duration = dict_to_duration(duration_dict)
assert duration.nanoseconds == expected_nanoseconds
def test_dict_to_duration_invalid(self):
duration_dict = {'sec': 1}
with self.assertRaises(ValueError):
dict_to_duration(duration_dict)
def test_interpret_dict_as_qos_profile_valid(self):
qos_dict = {'history': 'keep_last', 'depth': 10}
qos_profile = interpret_dict_as_qos_profile(qos_dict)
assert qos_profile.history == QoSHistoryPolicy.KEEP_LAST
expected_seconds = 1
expected_nanoseconds = int((expected_seconds * 1e9))
qos_dict = {'history': 'keep_all', 'deadline': {'sec': expected_seconds, 'nsec': 0}}
qos_profile = interpret_dict_as_qos_profile(qos_dict)
assert qos_profile.deadline.nanoseconds == expected_nanoseconds
expected_convention = False
qos_dict = {'history': 'keep_all', 'avoid_ros_namespace_conventions': expected_convention}
qos_profile = interpret_dict_as_qos_profile(qos_dict)
assert qos_profile.avoid_ros_namespace_conventions == expected_convention
def test_interpret_dict_as_qos_profile_invalid(self):
qos_dict = {'foo': 'bar'}
with self.assertRaises(ValueError):
interpret_dict_as_qos_profile(qos_dict)
def test_convert_yaml_to_qos_profile(self):
topic_name_1 = '/topic1'
topic_name_2 = '/topic2'
expected_convention = False
qos_dict = {
topic_name_1: {
'history': 'keep_all', 'durability': 'volatile', 'reliability': 'reliable'},
topic_name_2: {
'history': 'keep_all', 'avoid_ros_namespace_conventions': expected_convention}
}
qos_profiles = convert_yaml_to_qos_profile(qos_dict)
assert qos_profiles[topic_name_1].durability == \
QoSDurabilityPolicy.VOLATILE
assert qos_profiles[topic_name_1].reliability == \
QoSReliabilityPolicy.RELIABLE
assert qos_profiles[topic_name_1].history == \
QoSHistoryPolicy.KEEP_ALL
assert qos_profiles[topic_name_2].avoid_ros_namespace_conventions == expected_convention
assert qos_profiles[topic_name_2].history == \
QoSHistoryPolicy.KEEP_ALL
def test_interpret_dict_as_qos_profile_negative(self):
qos_dict = {'history': 'keep_all', 'depth': -1}
with self.assertRaises(ValueError):
interpret_dict_as_qos_profile(qos_dict)
qos_dict = {'history': 'keep_all', 'deadline': {'sec': -1, 'nsec': -1}}
with self.assertRaises(ValueError):
interpret_dict_as_qos_profile(qos_dict)
qos_dict = {'history': 'keep_all', 'lifespan': {'sec': -1, 'nsec': -1}}
with self.assertRaises(ValueError):
interpret_dict_as_qos_profile(qos_dict)
qos_dict = {'history': 'keep_all', 'liveliness_lease_duration': {'sec': -1, 'nsec': -1}}
with self.assertRaises(ValueError):
interpret_dict_as_qos_profile(qos_dict)
| 4,120
| 44.285714
| 98
|
py
|
rosbag2
|
rosbag2-master/ros2bag/test/test_cli_extension.py
|
# Copyright 2023 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from launch import LaunchDescription
from launch.actions import ExecuteProcess
import launch_testing
import launch_testing.actions
import launch_testing.asserts
import pytest
from rosbag2_test_common import TESTED_STORAGE_IDS
UNIQUE_PRESET_PROFILES = {
'mcap': ['zstd_small'],
'sqlite3': ['resilient'],
}
@pytest.mark.launch_test
@launch_testing.parametrize('storage_id', TESTED_STORAGE_IDS)
@launch_testing.markers.keep_alive
def generate_test_description(storage_id):
return LaunchDescription([
launch_testing.actions.ReadyToTest()
]), {'storage_id': storage_id}
class TestCLIExtension(unittest.TestCase):
def test_output(self, launch_service, proc_info, proc_output, storage_id):
unique_profiles = UNIQUE_PRESET_PROFILES[storage_id]
help_proc = ExecuteProcess(
cmd=['ros2', 'bag', 'record', '-s', storage_id, '--help'],
name='ros2bag-cli',
output='screen')
with launch_testing.tools.launch_process(
launch_service, help_proc, proc_info, proc_output
):
proc_info.assertWaitForShutdown(process=help_proc, timeout=4)
for prof in unique_profiles:
launch_testing.asserts.assertInStdout(
proc_output, prof, help_proc)
launch_testing.asserts.assertExitCodes(proc_info, process=help_proc)
| 1,988
| 32.711864
| 78
|
py
|
rosbag2
|
rosbag2-master/ros2bag/test/test_record_qos_profiles.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
from pathlib import Path
import re
import sys
import tempfile
import time
import unittest
from launch import LaunchDescription
from launch.actions import ExecuteProcess
import launch_testing
import launch_testing.actions
import launch_testing.asserts
import launch_testing.markers
import launch_testing.tools
import pytest
PROFILE_PATH = Path(__file__).parent / 'resources'
TEST_NODE = 'ros2bag_record_qos_profile_test_node'
TEST_NAMESPACE = 'ros2bag_record_qos_profile'
ERROR_STRING_MSG = 'ros2bag CLI did not produce the expected output'\
'\n Expected output pattern: {}\n Actual output: {}'
OUTPUT_WAIT_TIMEOUT = 10
SHUTDOWN_TIMEOUT = 5
@pytest.mark.rostest
@launch_testing.markers.keep_alive
def generate_test_description():
return LaunchDescription([launch_testing.actions.ReadyToTest()])
class TestRos2BagRecord(unittest.TestCase):
@classmethod
def setUpClass(cls, launch_service, proc_info, proc_output):
@contextlib.contextmanager
def launch_bag_command(self, arguments, **kwargs):
pkg_command_action = ExecuteProcess(
cmd=['ros2', 'bag', *arguments],
additional_env={'PYTHONUNBUFFERED': '1'},
name='ros2bag-cli',
output='screen',
**kwargs
)
with launch_testing.tools.launch_process(
launch_service, pkg_command_action, proc_info, proc_output
) as pkg_command:
yield pkg_command
cls.launch_bag_command = launch_bag_command
cls.tmpdir = tempfile.TemporaryDirectory()
@classmethod
def tearDownClass(cls):
try:
cls.tmpdir.cleanup()
except OSError:
if sys.platform != 'win32':
raise
# HACK to allow Windows to close pending file handles
time.sleep(3)
cls.tmpdir.cleanup()
def test_qos_simple(self):
profile_path = PROFILE_PATH / 'qos_profile.yaml'
output_path = Path(self.tmpdir.name) / 'ros2bag_test_basic'
arguments = ['record', '-a', '--qos-profile-overrides-path', profile_path.as_posix(),
'--output', output_path.as_posix()]
expected_output = 'Listening for topics...'
with self.launch_bag_command(arguments=arguments) as bag_command:
bag_command.wait_for_output(
condition=lambda output: expected_output in output,
timeout=OUTPUT_WAIT_TIMEOUT)
bag_command.wait_for_shutdown(timeout=SHUTDOWN_TIMEOUT)
assert bag_command.terminated
matches = expected_output in bag_command.output
assert matches, ERROR_STRING_MSG.format(expected_output, bag_command.output)
def test_incomplete_qos_profile(self):
profile_path = PROFILE_PATH / 'incomplete_qos_profile.yaml'
output_path = Path(self.tmpdir.name) / 'ros2bag_test_incomplete'
arguments = ['record', '-a', '--qos-profile-overrides-path', profile_path.as_posix(),
'--output', output_path.as_posix()]
expected_output = 'Listening for topics...'
with self.launch_bag_command(arguments=arguments) as bag_command:
bag_command.wait_for_output(
condition=lambda output: expected_output in output,
timeout=OUTPUT_WAIT_TIMEOUT)
bag_command.wait_for_shutdown(timeout=SHUTDOWN_TIMEOUT)
assert bag_command.terminated
matches = expected_output in bag_command.output
assert matches, ERROR_STRING_MSG.format(expected_output, bag_command.output)
def test_incomplete_qos_duration(self):
profile_path = PROFILE_PATH / 'incomplete_qos_duration.yaml'
output_path = Path(self.tmpdir.name) / 'ros2bag_test_incomplete_duration'
arguments = ['record', '-a', '--qos-profile-overrides-path', profile_path.as_posix(),
'--output', output_path.as_posix()]
expected_string_regex = re.compile(
r'\[ERROR] \[ros2bag]: Time overrides must include both')
with self.launch_bag_command(arguments=arguments) as bag_command:
bag_command.wait_for_output(
condition=lambda output: expected_string_regex.search(output) is not None,
timeout=OUTPUT_WAIT_TIMEOUT)
bag_command.wait_for_shutdown(timeout=SHUTDOWN_TIMEOUT)
assert bag_command.terminated
assert bag_command.exit_code != launch_testing.asserts.EXIT_OK
matches = expected_string_regex.search(bag_command.output)
assert matches, ERROR_STRING_MSG.format(expected_string_regex.pattern, bag_command.output)
def test_nonexistent_qos_profile(self):
profile_path = PROFILE_PATH / 'foobar.yaml'
output_path = Path(self.tmpdir.name) / 'ros2bag_test_nonexistent'
arguments = ['record', '-a', '--qos-profile-overrides-path', profile_path.as_posix(),
'--output', output_path.as_posix()]
expected_string_regex = re.compile(
r'ros2 bag record: error: argument --qos-profile-overrides-path: can\'t open')
with self.launch_bag_command(arguments=arguments) as bag_command:
bag_command.wait_for_output(
condition=lambda output: expected_string_regex.search(output) is not None,
timeout=OUTPUT_WAIT_TIMEOUT)
bag_command.wait_for_shutdown(timeout=SHUTDOWN_TIMEOUT)
assert bag_command.terminated
assert bag_command.exit_code != launch_testing.asserts.EXIT_OK
matches = expected_string_regex.search(bag_command.output)
assert matches, ERROR_STRING_MSG.format(expected_string_regex.pattern, bag_command.output)
| 6,331
| 42.668966
| 98
|
py
|
rosbag2
|
rosbag2-master/ros2bag/test/test_burst.py
|
# Copyright 2022 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
from pathlib import Path
import re
import unittest
from launch import LaunchDescription
from launch.actions import ExecuteProcess
import launch_testing
import launch_testing.actions
import launch_testing.asserts
import launch_testing.markers
import launch_testing.tools
import pytest
RESOURCES_PATH = Path(__file__).parent / 'resources'
TEST_NODE = 'ros2bag_burst_test_node'
TEST_NAMESPACE = 'ros2bag_record_qos_profile'
ERROR_STRING = r'\[ERROR] \[ros2bag]:'
@pytest.mark.rostest
@launch_testing.markers.keep_alive
def generate_test_description():
return LaunchDescription([launch_testing.actions.ReadyToTest()])
class TestRos2BagBurst(unittest.TestCase):
@classmethod
def setUpClass(cls, launch_service, proc_info, proc_output):
@contextlib.contextmanager
def launch_bag_command(self, arguments, **kwargs):
pkg_command_action = ExecuteProcess(
cmd=['ros2', 'bag', *arguments],
additional_env={'PYTHONUNBUFFERED': '1'},
name='ros2bag-cli',
output='screen',
**kwargs
)
with launch_testing.tools.launch_process(
launch_service, pkg_command_action, proc_info, proc_output
) as pkg_command:
yield pkg_command
cls.launch_bag_command = launch_bag_command
def test_burst(self):
"""Test the burst mode of playback."""
bag_path = RESOURCES_PATH / 'empty_bag'
arguments = ['burst', bag_path.as_posix()]
with self.launch_bag_command(arguments=arguments) as bag_command:
bag_command.wait_for_shutdown(timeout=5)
expected_string_regex = re.compile(ERROR_STRING)
matches = expected_string_regex.search(bag_command.output)
assert not matches, 'ros2bag CLI did not produce the expected output'
| 2,481
| 33.957746
| 78
|
py
|
rosbag2
|
rosbag2-master/ros2bag/test/test_info.py
|
# Copyright 2022 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
from pathlib import Path
import unittest
from launch import LaunchDescription
from launch.actions import ExecuteProcess
import launch_testing
import launch_testing.actions
from launch_testing.tools.text import normalize_lineseps
import pytest
RESOURCES_PATH = Path(__file__).parent / 'resources'
EXPECTED_OUTPUT = """/parameter_events
/rosout
"""
@pytest.mark.rostest
@launch_testing.markers.keep_alive
def generate_test_description():
return LaunchDescription([launch_testing.actions.ReadyToTest()])
class TestRos2BagInfo(unittest.TestCase):
@classmethod
def setUpClass(cls, launch_service, proc_info, proc_output):
@contextlib.contextmanager
def launch_bag_command(self, arguments, **kwargs):
pkg_command_action = ExecuteProcess(
cmd=['ros2', 'bag', *arguments],
additional_env={
'PYTHONUNBUFFERED': '1',
'TZ': 'UTC',
},
name='ros2bag-cli',
output='screen',
**kwargs
)
with launch_testing.tools.launch_process(
launch_service, pkg_command_action, proc_info, proc_output
) as pkg_command:
yield pkg_command
cls.launch_bag_command = launch_bag_command
def test_info_with_topic_name_option(self):
"""Test the output with --topic-name options."""
bag_path = RESOURCES_PATH / 'empty_bag'
arguments = ['info', '--topic-name', bag_path.as_posix()]
with self.launch_bag_command(arguments=arguments) as bag_command:
bag_command.wait_for_shutdown(timeout=5)
assert normalize_lineseps(bag_command.output) == EXPECTED_OUTPUT, \
'ros2bag CLI did not produce the expected output'
| 2,422
| 33.614286
| 78
|
py
|
rosbag2
|
rosbag2-master/ros2bag/test/test_copyright.py
|
# Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ament_copyright.main import main
import pytest
@pytest.mark.copyright
@pytest.mark.linter
def test_copyright():
rc = main(argv=['.', 'test'])
assert rc == 0, 'Found errors'
| 790
| 31.958333
| 74
|
py
|
rosbag2
|
rosbag2-master/ros2bag/test/test_play_qos_profiles.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
from pathlib import Path
import re
import unittest
from launch import LaunchDescription
from launch.actions import ExecuteProcess
import launch_testing
import launch_testing.actions
import launch_testing.asserts
import launch_testing.markers
import launch_testing.tools
import pytest
RESOURCES_PATH = Path(__file__).parent / 'resources'
TEST_NODE = 'ros2bag_record_qos_profile_test_node'
TEST_NAMESPACE = 'ros2bag_record_qos_profile'
ERROR_STRING = r'\[ERROR] \[ros2bag]:'
@pytest.mark.rostest
@launch_testing.markers.keep_alive
def generate_test_description():
return LaunchDescription([launch_testing.actions.ReadyToTest()])
class TestRos2BagPlay(unittest.TestCase):
@classmethod
def setUpClass(cls, launch_service, proc_info, proc_output):
@contextlib.contextmanager
def launch_bag_command(self, arguments, **kwargs):
pkg_command_action = ExecuteProcess(
cmd=['ros2', 'bag', *arguments],
additional_env={'PYTHONUNBUFFERED': '1'},
name='ros2bag-cli',
output='screen',
**kwargs
)
with launch_testing.tools.launch_process(
launch_service, pkg_command_action, proc_info, proc_output
) as pkg_command:
yield pkg_command
cls.launch_bag_command = launch_bag_command
def test_qos_simple(self):
"""Test with a full QoS profile override for a single topic."""
profile_path = RESOURCES_PATH / 'qos_profile.yaml'
bag_path = RESOURCES_PATH / 'empty_bag'
arguments = ['play', '--qos-profile-overrides-path', profile_path.as_posix(),
bag_path.as_posix()]
with self.launch_bag_command(arguments=arguments) as bag_command:
bag_command.wait_for_shutdown(timeout=5)
expected_string_regex = re.compile(ERROR_STRING)
matches = expected_string_regex.search(bag_command.output)
assert not matches, 'ros2bag CLI did not produce the expected output'
def test_qos_incomplete(self):
"""Test a partially filled QoS profile for a single topic."""
profile_path = RESOURCES_PATH / 'incomplete_qos_profile.yaml'
bag_path = RESOURCES_PATH / 'empty_bag'
arguments = ['play', '--qos-profile-overrides-path', profile_path.as_posix(),
bag_path.as_posix()]
with self.launch_bag_command(arguments=arguments) as bag_command:
bag_command.wait_for_shutdown(timeout=5)
expected_string_regex = re.compile(ERROR_STRING)
matches = expected_string_regex.search(bag_command.output)
assert not matches, 'ros2bag CLI did not produce the expected output'
| 3,359
| 38.529412
| 85
|
py
|
rosbag2
|
rosbag2-master/ros2bag/test/test_flake8.py
|
# Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ament_flake8.main import main_with_errors
import pytest
@pytest.mark.flake8
@pytest.mark.linter
def test_flake8():
rc, errors = main_with_errors(argv=[])
assert rc == 0, \
'Found %d code style errors / warnings:\n' % len(errors) + \
'\n'.join(errors)
| 884
| 33.038462
| 74
|
py
|
rosbag2
|
rosbag2-master/ros2bag/test/test_pep257.py
|
# Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ament_pep257.main import main
import pytest
@pytest.mark.linter
@pytest.mark.pep257
def test_pep257():
rc = main(argv=[])
assert rc == 0, 'Found code style errors / warnings'
| 792
| 32.041667
| 74
|
py
|
rosbag2
|
rosbag2-master/ros2bag/ros2bag/__init__.py
| 0
| 0
| 0
|
py
|
|
rosbag2
|
rosbag2-master/ros2bag/ros2bag/api/__init__.py
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import (
ArgumentParser,
ArgumentTypeError,
FileType,
HelpFormatter,
)
import os
from typing import Any
from typing import Dict
from typing import Optional
from rclpy.duration import Duration
from rclpy.qos import QoSDurabilityPolicy
from rclpy.qos import QoSHistoryPolicy
from rclpy.qos import QoSLivelinessPolicy
from rclpy.qos import QoSProfile
from rclpy.qos import QoSReliabilityPolicy
from ros2cli.entry_points import get_entry_points
import rosbag2_py
# This map needs to be updated when new policies are introduced
_QOS_POLICY_FROM_SHORT_NAME = {
'history': QoSHistoryPolicy.get_from_short_key,
'reliability': QoSReliabilityPolicy.get_from_short_key,
'durability': QoSDurabilityPolicy.get_from_short_key,
'liveliness': QoSLivelinessPolicy.get_from_short_key
}
_DURATION_KEYS = ['deadline', 'lifespan', 'liveliness_lease_duration']
_VALUE_KEYS = ['depth', 'avoid_ros_namespace_conventions']
class SplitLineFormatter(HelpFormatter):
"""Extend argparse HelpFormatter to allow for explicit newlines in help string."""
def _split_lines(self, text, width):
lines = text.splitlines()
result_lines = []
for line in lines:
result_lines.extend(HelpFormatter._split_lines(self, line, width))
return result_lines
def print_error(string: str) -> str:
return '[ERROR] [ros2bag]: {}'.format(string)
def dict_to_duration(time_dict: Optional[Dict[str, int]]) -> Duration:
"""Convert a QoS duration profile from YAML into an rclpy Duration."""
if time_dict:
try:
if (Duration(seconds=time_dict['sec'], nanoseconds=time_dict['nsec']) <
Duration(seconds=0)):
raise ValueError('Time duration may not be a negative value.')
return Duration(seconds=time_dict['sec'], nanoseconds=time_dict['nsec'])
except KeyError:
raise ValueError(
'Time overrides must include both seconds (sec) and nanoseconds (nsec).')
else:
return Duration()
def interpret_dict_as_qos_profile(qos_profile_dict: Dict) -> QoSProfile:
"""Sanitize a user provided dict of a QoS profile and verify all keys are valid."""
new_profile_dict = {}
for policy_key, policy_value in qos_profile_dict.items():
if policy_key in _DURATION_KEYS:
new_profile_dict[policy_key] = dict_to_duration(policy_value)
elif policy_key in _QOS_POLICY_FROM_SHORT_NAME:
new_profile_dict[policy_key] = _QOS_POLICY_FROM_SHORT_NAME[policy_key](policy_value)
elif policy_key in _VALUE_KEYS:
if policy_value < 0:
raise ValueError('`{}` may not be a negative value.'.format(policy_key))
new_profile_dict[policy_key] = policy_value
else:
raise ValueError('Unexpected key `{}` for QoS profile.'.format(policy_key))
return QoSProfile(**new_profile_dict)
def convert_yaml_to_qos_profile(qos_profile_dict: Dict) -> Dict[str, QoSProfile]:
"""Convert a YAML file to use rclpy's QoSProfile."""
topic_profile_dict = {}
for topic, profile in qos_profile_dict.items():
topic_profile_dict[topic] = interpret_dict_as_qos_profile(profile)
return topic_profile_dict
def create_bag_directory(uri: str) -> Optional[str]:
"""Create a directory."""
try:
os.makedirs(uri)
except OSError:
return print_error("Could not create bag folder '{}'.".format(uri))
def check_positive_float(value: Any) -> float:
"""Argparse validator to verify that a value is a float and positive."""
try:
fvalue = float(value)
if fvalue <= 0.0:
raise ArgumentTypeError('{} is not in the valid range (> 0.0)'.format(value))
return fvalue
except ValueError:
raise ArgumentTypeError('{} is not the valid type (float)'.format(value))
def check_path_exists(value: Any) -> str:
"""Argparse validator to verify a path exists."""
try:
if os.path.exists(value):
return value
raise ArgumentTypeError("Bag path '{}' does not exist!".format(value))
except ValueError:
raise ArgumentTypeError('{} is not the valid type (string)'.format(value))
def check_not_negative_int(arg: str) -> int:
"""Argparse validator to verify that a value is a int and not negative."""
try:
value = int(arg)
if value < 0:
raise ArgumentTypeError(f'Value {value} is less than zero.')
return value
except ValueError:
raise ArgumentTypeError('{} is not the valid type (int)'.format(value))
def add_standard_reader_args(parser: ArgumentParser) -> None:
reader_choices = rosbag2_py.get_registered_readers()
parser.add_argument(
'bag_path', type=check_path_exists, help='Bag to open')
parser.add_argument(
'-s', '--storage', default='', choices=reader_choices,
help='Storage implementation of bag. '
'By default attempts to detect automatically - use this argument to override.')
def _parse_cli_storage_plugin():
plugin_choices = set(rosbag2_py.get_registered_writers())
default_storage = rosbag2_py.get_default_storage_id()
if default_storage not in plugin_choices:
default_storage = next(iter(plugin_choices))
storage_parser = ArgumentParser(add_help=False)
storage_parser.add_argument(
'-s', '--storage',
default=default_storage,
choices=plugin_choices,
help='Storage implementation of bag. '
'By default attempts to detect automatically - use this argument to override.')
storage_parsed_args, _ = storage_parser.parse_known_args()
plugin_id = storage_parsed_args.storage
if plugin_id not in plugin_choices:
raise ValueError(f'No storage plugin found with ID "{plugin_id}". Found {plugin_choices}.')
return plugin_id
def add_writer_storage_plugin_extensions(parser: ArgumentParser) -> None:
plugin_id = _parse_cli_storage_plugin()
try:
extension = get_entry_points('ros2bag.storage_plugin_cli_extension')[plugin_id].load()
except KeyError:
print(f'No CLI extension module found for plugin name {plugin_id} '
'in entry_point group "ros2bag.storage_plugin_cli_extension".')
# Commandline arguments should still be added when no extension present
# None will throw AttributeError for all method calls
extension = None
parser.add_argument(
'--storage-config-file', type=FileType('r'),
help='Path to a yaml file defining storage specific configurations. '
f'See {plugin_id} plugin documentation for the format of this file.')
try:
preset_profiles = extension.get_preset_profiles() or \
[('none', 'Default writer configuration.')]
except AttributeError:
print(f'Storage plugin {plugin_id} does not provide function "get_preset_profiles".')
preset_profiles = ['none']
default_preset_profile = preset_profiles[0][0]
parser.add_argument(
'--storage-preset-profile', type=str, default=default_preset_profile,
choices=[preset[0] for preset in preset_profiles],
help=f'R|Select a preset configuration for storage plugin "{plugin_id}". '
'Settings in this profile can still be overriden by other explicit options '
'and --storage-config-file. Profiles:\n' +
'\n'.join([f'{preset[0]}: {preset[1]}' for preset in preset_profiles]))
| 8,130
| 39.054187
| 99
|
py
|
rosbag2
|
rosbag2-master/ros2bag/ros2bag/command/bag.py
|
# Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ros2cli.command import add_subparsers_on_demand
from ros2cli.command import CommandExtension
class BagCommand(CommandExtension):
"""Various rosbag related sub-commands."""
def add_arguments(self, parser, cli_name):
self._subparser = parser
# get verb extensions and let them add their arguments
add_subparsers_on_demand(
parser, cli_name, '_verb', 'ros2bag.verb', required=False)
def main(self, *, parser, args):
if not hasattr(args, '_verb'):
# in case no verb was passed
self._subparser.print_help()
return 0
extension = getattr(args, '_verb')
# call the verb's main method
return extension.main(args=args)
| 1,339
| 33.358974
| 74
|
py
|
rosbag2
|
rosbag2-master/ros2bag/ros2bag/command/__init__.py
| 0
| 0
| 0
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.