code stringlengths 17 6.64M |
|---|
def get_optimizer(params, name, **kwargs):
if (name == 'adam'):
from torch.optim import Adam
return Adam(params, **kwargs)
elif (name == 'adamw'):
from torch.optim import AdamW
return AdamW(params, **kwargs)
else:
raise NotImplementedError(name)
|
def customized_lr_scheduler(optimizer, warmup_steps=(- 1)):
from torch.optim.lr_scheduler import LambdaLR
def fn(step):
if (warmup_steps > 0):
return min((step / warmup_steps), 1)
else:
return 1
return LambdaLR(optimizer, fn)
|
def get_lr_scheduler(optimizer, name, **kwargs):
if (name == 'customized'):
return customized_lr_scheduler(optimizer, **kwargs)
elif (name == 'cosine'):
from torch.optim.lr_scheduler import CosineAnnealingLR
return CosineAnnealingLR(optimizer, **kwargs)
else:
raise NotImple... |
def ema(model_dest: nn.Module, model_src: nn.Module, rate):
param_dict_src = dict(model_src.named_parameters())
for (p_name, p_dest) in model_dest.named_parameters():
p_src = param_dict_src[p_name]
assert (p_src is not p_dest)
p_dest.data.mul_(rate).add_(((1 - rate) * p_src.data))
|
class TrainState(object):
def __init__(self, optimizer, lr_scheduler, step, nnet=None, nnet_ema=None):
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.step = step
self.nnet = nnet
self.nnet_ema = nnet_ema
def ema_update(self, rate=0.9999):
if ... |
def cnt_params(model):
return sum((param.numel() for param in model.parameters()))
|
def initialize_train_state(config, device):
params = []
nnet = get_nnet(**config.nnet)
params += nnet.parameters()
nnet_ema = get_nnet(**config.nnet)
nnet_ema.eval()
logging.info(f'nnet has {cnt_params(nnet)} parameters')
optimizer = get_optimizer(params, **config.optimizer)
lr_schedul... |
def amortize(n_samples, batch_size):
k = (n_samples // batch_size)
r = (n_samples % batch_size)
return ((k * [batch_size]) if (r == 0) else ((k * [batch_size]) + [r]))
|
def sample2dir(accelerator, path, n_samples, mini_batch_size, sample_fn, unpreprocess_fn=None):
os.makedirs(path, exist_ok=True)
idx = 0
batch_size = (mini_batch_size * accelerator.num_processes)
for _batch_size in tqdm(amortize(n_samples, batch_size), disable=(not accelerator.is_main_process), desc='... |
def grad_norm(model):
total_norm = 0.0
for p in model.parameters():
param_norm = p.grad.data.norm(2)
total_norm += (param_norm.item() ** 2)
total_norm = (total_norm ** (1.0 / 2))
return total_norm
|
def get_dataset(url: str, data_directory: str, file_name: str, unzip: bool):
if (not os.path.exists('data/')):
os.mkdir('data/')
print(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
if (not os.path.exists(data_directory)):
os.makedirs(data_directory)
if (no... |
class AttentionWithContext(tf.keras.layers.Layer):
'\n Attention operation, with a context/query vector, for temporal data.\n Supports Masking.\n Follows the work of Yang et al. [https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf]\n "Hierarchical Attention Networks for Document Classificat... |
def create_model(n_timesteps, n_features, n_outputs, _dff=512, d_model=128, nh=4, dropout_rate=0.2, use_pe=True):
inputs = tf.keras.layers.Input(shape=(n_timesteps, n_features))
(si, _) = SensorAttention(n_filters=128, kernel_size=3, dilation_rate=2)(inputs)
x = tf.keras.layers.Conv1D(d_model, 1, activati... |
def point_wise_feed_forward_network(d_model, dff):
return tf.keras.Sequential([tf.keras.layers.Dense(dff, activation='relu'), tf.keras.layers.Dense(d_model)])
|
class EncoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(EncoderLayer, self).__init__()
self.mha = MultiHeadAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.Layer... |
def scaled_dot_product_attention(q, k, v, mask):
'Calculate the attention weights.\n q, k, v must have matching leading dimensions.\n k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.\n The mask has different shapes depending on its type(padding or look ahead)\n but it must b... |
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
assert ((d_model % self.num_heads) == 0)
self.depth = (d_model // self.num_heads)
s... |
class SensorAttention(tf.keras.layers.Layer):
def __init__(self, n_filters, kernel_size, dilation_rate):
super(SensorAttention, self).__init__()
self.conv_1 = tf.keras.layers.Conv2D(n_filters, kernel_size=kernel_size, dilation_rate=dilation_rate, padding='same', activation='relu')
self.co... |
class data_reader():
def __init__(self, train_test_split, cols):
(self.data, self.idToLabel) = self.readOpportunity(train_test_split, cols)
self.save_data()
def save_data(self):
f = h5py.File('data/processed/opportunity.h5')
for key in self.data:
f.create_group(ke... |
def windowz(data, size, use_overlap=True, overlap=0.5):
start = 0
while (start < len(data)):
(yield (start, (start + size)))
if use_overlap:
start += (size - int((size * overlap)))
else:
start += size
|
def segment_opp(x_train, y_train, window_size, n_sensor_val):
segments = np.zeros((((len(x_train) // (window_size // 2)) - 1), window_size, n_sensor_val))
labels = np.zeros(((len(y_train) // (window_size // 2)) - 1))
i_segment = 0
i_label = 0
for (start, end) in windowz(x_train, window_size):
... |
def segment_opp_test(x_test, y_test, window_size, n_sensor_val):
segments = np.zeros((((len(x_test) // window_size) + 1), window_size, n_sensor_val))
labels = np.zeros(((len(y_test) // window_size) + 1))
i_segment = 0
i_label = 0
for (start, end) in windowz(x_test, window_size, use_overlap=False):... |
def unsegment_opp_test(y_preds, total_length, window_size):
unsegmented_preds = np.zeros((total_length,))
start = 0
end = window_size
for element in y_preds:
if (end >= total_length):
end = total_length
for i in range(start, end):
unsegmented_preds[i] = element
... |
def get_opp_data():
data_config_file = open('configs/data.yaml', mode='r')
data_config = yaml.load(data_config_file, Loader=yaml.FullLoader)
config = data_config['opp']
cols = (np.array(config['feature_columns']) - 1)
train_test_split = {'train': config['train_files'], 'test': config['test_files']... |
def preprocess(n_sensor_val=77, verbose=False):
path = os.path.join('data/processed/opportunity.h5')
f = h5py.File(path, 'r')
x_train = f.get('train').get('inputs')[()]
y_train = f.get('train').get('targets')[()]
x_val = f.get('validation').get('inputs')[()]
y_val = f.get('validation').get('ta... |
class data_reader():
def __init__(self, train_test_files, use_columns, output_file_name):
if (not os.path.exists(output_file_name)):
(self.data, self.idToLabel) = self.readPamap2(train_test_files, use_columns)
self.save_data(output_file_name)
def save_data(self, output_file_n... |
def read_dataset(train_test_files, use_columns, output_file_name):
print('[Reading PAMAP2] ...')
data_reader(train_test_files, use_columns, output_file_name)
print('[Reading PAMAP2] : DONE')
|
def windowz(data, size, use_overlap=True):
start = 0
while (start < len(data)):
(yield (start, (start + size)))
if use_overlap:
start += (size // 2)
else:
start += size
|
def segment_pa2_test(x_test, y_test, window_size, n_sensor_val):
segments = np.zeros((((len(x_test) // window_size) + 1), window_size, n_sensor_val))
labels = np.zeros(((len(y_test) // window_size) + 1))
i_segment = 0
i_label = 0
for (start, end) in windowz(x_test, window_size, use_overlap=False):... |
def segment_pa2(x_train, y_train, window_size, n_sensor_val):
segments = np.zeros((((len(x_train) // (window_size // 2)) - 1), window_size, n_sensor_val))
labels = np.zeros(((len(y_train) // (window_size // 2)) - 1))
i_segment = 0
i_label = 0
for (start, end) in windowz(x_train, window_size):
... |
def unsegment_pa2_test(y_preds, total_length, window_size):
unsegmented_preds = np.zeros((total_length,))
start = 0
end = window_size
for element in y_preds:
if (end >= total_length):
end = total_length
for i in range(start, end):
unsegmented_preds[i] = element
... |
def segment_window_all(x_train, y_train, window_size, n_sensor_val):
window_segments = np.zeros((len(x_train), window_size, n_sensor_val))
labels = np.zeros((len(y_train),))
total_len = len(x_train)
for i in range(total_len):
end = (i + window_size)
if (end > total_len):
pa... |
def get_skoda_data():
data_config_file = open('configs/data.yaml', mode='r')
data_config = yaml.load(data_config_file, Loader=yaml.FullLoader)
data_dict = sio.loadmat(file_name=data_config['skoda']['data_file'], squeeze_me=True)
all_data = data_dict[list(data_dict.keys())[3]]
(x_train, y_train, x_... |
def read_dir(directory):
subject = []
act_num = []
sensor_readings = []
for (path, subdirs, files) in os.walk(directory):
for name in files:
if name.endswith('.mat'):
mat = scipy.io.loadmat(os.path.join(path, name))
subject.extend(mat['subject'])
... |
def read_uschad(save_csv=False):
(subject, act_num, sensor_readings) = read_dir('data/raw/uschad/USC-HAD')
acc_x = []
acc_y = []
acc_z = []
gyr_x = []
gyr_y = []
gyr_z = []
act_label = []
subject_id = []
for i in range(840):
for j in sensor_readings[i]:
acc_... |
def windowz(data, size, use_overlap=True):
start = 0
while (start < len(data)):
(yield (start, (start + size)))
if use_overlap:
start += (size // 2)
else:
start += size
|
def segment_window_test(x_test, y_test, window_size, n_sensor_val):
segments = np.zeros((((len(x_test) // window_size) + 1), window_size, n_sensor_val))
labels = np.zeros(((len(y_test) // window_size) + 1))
i_segment = 0
i_label = 0
for (start, end) in windowz(x_test, window_size, use_overlap=Fals... |
def segment_window(x_train, y_train, window_size, n_sensor_val):
segments = np.zeros((((len(x_train) // (window_size // 2)) - 1), window_size, n_sensor_val))
labels = np.zeros(((len(y_train) // (window_size // 2)) - 1))
i_segment = 0
i_label = 0
for (start, end) in windowz(x_train, window_size):
... |
def unsegment_window_test(y_preds, total_length, window_size):
unsegmented_preds = np.zeros((total_length,))
start = 0
end = window_size
for element in y_preds:
if (end >= total_length):
end = total_length
for i in range(start, end):
unsegmented_preds[i] = eleme... |
def segment_window_all(x_train, y_train, window_size, n_sensor_val):
window_segments = np.zeros((len(x_train), window_size, n_sensor_val))
labels = np.zeros((len(y_train),))
total_len = len(x_train)
for i in range(total_len):
end = (i + window_size)
if (end > total_len):
pa... |
def sliding_window(x_train, y_train, x_validation, y_validation, x_test, y_test, window_size, n_sensor_val, shuffle=False, verbose=False):
input_width = window_size
if verbose:
print('Window Size :', input_width)
print('Segmenting Signal...')
(train_x, train_y) = segment_window(x_train, y_... |
def get_data(dataset: str):
print(f'[Loading {dataset} data]')
if (dataset == 'pamap2'):
((train_x, train_y), (val_x, val_y), (test_x, test_y), y_test) = get_pamap2_data()
return (train_x, train_y, val_x, val_y, test_x, test_y)
elif (dataset == 'skoda'):
((train_x, train_y), (val_x... |
def generate_result(dataset, ground_truth, prediction):
activity_map = json.load(open(os.path.join('configs', 'activity_maps', (dataset + '.json'))))
activity_names = list(activity_map.values())
print('\n[CLASSIFICATION REPORT]')
print(classification_report(np.argmax(ground_truth, axis=1), np.argmax(p... |
def test_model(dataset: str, model_config, test_x):
if os.path.exists(os.path.join(model_config['dirs']['saved_models'], dataset)):
model = tf.keras.models.load_model(os.path.join(model_config['dirs']['saved_models'], dataset))
else:
print('PLEASE, TRAIN THE MODEL FIRST OR PUT PRETRAINED MODEL... |
def train_model(dataset: str, model_config, train_x, train_y, val_x, val_y, epochs, save_model=True):
(n_timesteps, n_features, n_outputs) = (train_x.shape[1], train_x.shape[2], train_y.shape[1])
model = create_model(n_timesteps, n_features, n_outputs, d_model=model_config[dataset]['d_model'], nh=model_config... |
def _parse_args():
(args_config, remaining) = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
args = parser.parse_args(remaining)
args_text = yaml.safe_dump(args.__dict__... |
def load_init_checkpoint(model, checkpoint_path):
if os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
if (isinstance(checkpoint, dict) and ('state_dict' in checkpoint)):
_logger.info('Restoring model state from checkpoint...')
ne... |
def main():
setup_default_logging()
(args, args_text) = _parse_args()
if args.log_wandb:
if has_wandb:
wandb.init(project=args.experiment, config=args)
else:
_logger.warning("You've requested to log metrics to wandb but package not found. Metrics not being logged to... |
def train_one_epoch(epoch, model, loader, optimizer, loss_fn, args, lr_scheduler=None, saver=None, output_dir=None, amp_autocast=suppress, loss_scaler=None, model_ema=None, mixup_fn=None, entropy_thr=None):
if (args.mixup_off_epoch and (epoch >= args.mixup_off_epoch)):
if (args.prefetcher and loader.mixup... |
@torch.no_grad()
def concat_all_gather(tensor):
'\n Performs all_gather operation on the provided tensors.\n *** Warning ***: torch.distributed.all_gather has no gradient.\n '
tensors_gather = [torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gathe... |
def validate(model, loader, loss_fn, args, amp_autocast=suppress, log_suffix=''):
batch_time_m = AverageMeter()
losses_m = AverageMeter()
top1_m = AverageMeter()
top5_m = AverageMeter()
model.eval()
end = time.time()
last_idx = (len(loader) - 1)
with torch.no_grad():
for (batch... |
def _parse_args():
(args_config, remaining) = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
args = parser.parse_args(remaining)
args_text = yaml.safe_dump(args.__dict__... |
def main():
setup_default_logging()
(args, args_text) = _parse_args()
if args.log_wandb:
if has_wandb:
wandb.init(project=args.experiment, config=args)
else:
_logger.warning("You've requested to log metrics to wandb but package not found. Metrics not being logged to... |
def train_one_epoch(epoch, model, loader, optimizer, loss_fn, args, lr_scheduler=None, saver=None, output_dir=None, amp_autocast=suppress, loss_scaler=None, model_ema=None, mixup_fn=None, entropy_thr=None):
if (args.mixup_off_epoch and (epoch >= args.mixup_off_epoch)):
if (args.prefetcher and loader.mixup... |
@torch.no_grad()
def concat_all_gather(tensor):
'\n Performs all_gather operation on the provided tensors.\n *** Warning ***: torch.distributed.all_gather has no gradient.\n '
tensors_gather = [torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gathe... |
def validate(model, loader, loss_fn, args, amp_autocast=suppress, log_suffix=''):
batch_time_m = AverageMeter()
losses_m = AverageMeter()
top1_m = AverageMeter()
top5_m = AverageMeter()
model.eval()
end = time.time()
last_idx = (len(loader) - 1)
with torch.no_grad():
for (batch... |
class Hourglass(nn.Module):
def __init__(self):
super(Hourglass, self).__init__()
self.leaky_relu = nn.LeakyReLU()
self.d_conv_1 = nn.Conv2d(2, 8, 5, stride=2, padding=2)
self.d_bn_1 = nn.BatchNorm2d(8)
self.d_conv_2 = nn.Conv2d(8, 16, 5, stride=2, padding=2)
self.... |
def timestamp(sync=False):
return time.perf_counter()
|
def cuda_timestamp(sync=False, device=None):
if sync:
torch.cuda.synchronize(device=device)
return time.perf_counter()
|
def count_params(model: nn.Module):
return sum([m.numel() for m in model.parameters()])
|
def resolve_precision(precision: str):
assert (precision in ('amp', 'float16', 'bfloat16', 'float32'))
use_amp = False
model_dtype = torch.float32
data_dtype = torch.float32
if (precision == 'amp'):
use_amp = True
elif (precision == 'float16'):
model_dtype = torch.float16
... |
def profile_deepspeed(model, input_size=(3, 224, 224), batch_size=1, detailed=False):
(_, macs, _) = get_model_profile(model=model, input_shape=((batch_size,) + input_size), print_profile=detailed, detailed=detailed, warm_up=10, as_string=False, output_file=None, ignore_modules=None)
return (macs, 0)
|
def profile_fvcore(model, input_size=(3, 224, 224), batch_size=1, detailed=False, force_cpu=False):
if force_cpu:
model = model.to('cpu')
(device, dtype) = (next(model.parameters()).device, next(model.parameters()).dtype)
example_input = torch.ones(((batch_size,) + input_size), device=device, dtyp... |
class BenchmarkRunner():
def __init__(self, model_name, detail=False, device='cuda', torchscript=False, aot_autograd=False, precision='float32', fuser='', num_warm_iter=10, num_bench_iter=50, use_train_size=False, **kwargs):
self.model_name = model_name
self.detail = detail
self.device = ... |
class InferenceBenchmarkRunner(BenchmarkRunner):
def __init__(self, model_name, device='cuda', torchscript=False, **kwargs):
super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs)
self.model.eval()
def run(self):
def _step():
t_step_sta... |
class TrainBenchmarkRunner(BenchmarkRunner):
def __init__(self, model_name, device='cuda', torchscript=False, **kwargs):
super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs)
self.model.train()
self.loss = nn.CrossEntropyLoss().to(self.device)
s... |
class ProfileRunner(BenchmarkRunner):
def __init__(self, model_name, device='cuda', profiler='', **kwargs):
super().__init__(model_name=model_name, device=device, **kwargs)
if (not profiler):
if has_deepspeed_profiling:
profiler = 'deepspeed'
elif has_fvcor... |
def _try_run(model_name, bench_fn, bench_kwargs, initial_batch_size, no_batch_size_retry=False):
batch_size = initial_batch_size
results = dict()
error_str = 'Unknown'
while batch_size:
try:
torch.cuda.empty_cache()
bench = bench_fn(model_name=model_name, batch_size=bat... |
def benchmark(args):
if args.amp:
_logger.warning("Overriding precision to 'amp' since --amp flag set.")
args.precision = 'amp'
_logger.info(f"Benchmarking in {args.precision} precision. {('NHWC' if args.channels_last else 'NCHW')} layout. torchscript {('enabled' if args.torchscript else 'disa... |
def main():
setup_default_logging()
args = parser.parse_args()
model_cfgs = []
model_names = []
if args.fast_norm:
set_fast_norm()
if args.model_list:
args.model = ''
with open(args.model_list) as f:
model_names = [line.rstrip() for line in f]
model_... |
def write_results(results_file, results):
with open(results_file, mode='w') as cf:
dw = csv.DictWriter(cf, fieldnames=results[0].keys())
dw.writeheader()
for r in results:
dw.writerow(r)
cf.flush()
|
class InceptionDWConv2d(nn.Module):
' Inception depthweise convolution\n '
def __init__(self, in_channels, square_kernel_size=3, band_kernel_size=11, branch_ratio=0.125):
super().__init__()
gc = int((in_channels * branch_ratio))
self.dwconv_hw = nn.Conv2d(gc, gc, square_kernel_size... |
class ConvMlp(nn.Module):
' MLP using 1x1 convs that keeps spatial dims\n copied from timm: https://github.com/huggingface/pytorch-image-models/blob/v0.6.11/timm/models/layers/mlp.py\n '
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, bias=Tr... |
class MlpHead(nn.Module):
' MLP classification head\n '
def __init__(self, dim, num_classes=1000, mlp_ratio=3, act_layer=nn.GELU, norm_layer=partial(nn.LayerNorm, eps=1e-06), drop=0.0, bias=True):
super().__init__()
hidden_features = int((mlp_ratio * dim))
self.fc1 = nn.Linear(dim,... |
class MetaNeXtBlock(nn.Module):
' MetaNeXtBlock Block\n Args:\n dim (int): Number of input channels.\n drop_path (float): Stochastic depth rate. Default: 0.0\n ls_init_value (float): Init value for Layer Scale. Default: 1e-6.\n '
def __init__(self, dim, token_mixer=nn.Identity, nor... |
class MetaNeXtStage(nn.Module):
def __init__(self, in_chs, out_chs, ds_stride=2, depth=2, drop_path_rates=None, ls_init_value=1.0, token_mixer=nn.Identity, act_layer=nn.GELU, norm_layer=None, mlp_ratio=4):
super().__init__()
self.grad_checkpointing = False
if (ds_stride > 1):
... |
class MetaNeXt(nn.Module):
' MetaNeXt\n A PyTorch impl of : `InceptionNeXt: When Inception Meets ConvNeXt` - https://arxiv.org/pdf/2203.xxxxx.pdf\n\n Args:\n in_chans (int): Number of input image channels. Default: 3\n num_classes (int): Number of classes for classification head. Default:... |
def _cfg(url='', **kwargs):
return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0', 'classifier': 'head.fc', **kwargs}
|
@register_model
def inceptionnext_tiny(pretrained=False, **kwargs):
model = MetaNeXt(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), token_mixers=InceptionDWConv2d, **kwargs)
model.default_cfg = default_cfgs['inceptionnext_tiny']
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(url=mo... |
@register_model
def inceptionnext_small(pretrained=False, **kwargs):
model = MetaNeXt(depths=(3, 3, 27, 3), dims=(96, 192, 384, 768), token_mixers=InceptionDWConv2d, **kwargs)
model.default_cfg = default_cfgs['inceptionnext_small']
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(url... |
@register_model
def inceptionnext_base(pretrained=False, **kwargs):
model = MetaNeXt(depths=(3, 3, 27, 3), dims=(128, 256, 512, 1024), token_mixers=InceptionDWConv2d, **kwargs)
model.default_cfg = default_cfgs['inceptionnext_base']
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(url... |
@register_model
def inceptionnext_base_384(pretrained=False, **kwargs):
model = MetaNeXt(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], mlp_ratios=[4, 4, 4, 3], token_mixers=InceptionDWConv2d, **kwargs)
model.default_cfg = default_cfgs['inceptionnext_base_384']
if pretrained:
state_dict = torch... |
def _parse_args():
(args_config, remaining) = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
args = parser.parse_args(remaining)
args_text = yaml.safe_dump(args.__dict__... |
def main():
utils.setup_default_logging()
(args, args_text) = _parse_args()
args.prefetcher = (not args.no_prefetcher)
args.distributed = False
if ('WORLD_SIZE' in os.environ):
args.distributed = (int(os.environ['WORLD_SIZE']) > 1)
args.device = 'cuda:0'
args.world_size = 1
arg... |
def train_one_epoch(epoch, model, loader, optimizer, loss_fn, args, lr_scheduler=None, saver=None, output_dir=None, amp_autocast=suppress, loss_scaler=None, model_ema=None, mixup_fn=None, grad_accum_steps=1, num_training_steps_per_epoch=None):
if (args.mixup_off_epoch and (epoch >= args.mixup_off_epoch)):
... |
def validate(model, loader, loss_fn, args, amp_autocast=suppress, log_suffix=''):
batch_time_m = utils.AverageMeter()
losses_m = utils.AverageMeter()
top1_m = utils.AverageMeter()
top5_m = utils.AverageMeter()
model.eval()
end = time.time()
last_idx = (len(loader) - 1)
with torch.no_gr... |
class ApexScalerAccum():
state_dict_key = 'amp'
def __call__(self, loss, optimizer, clip_grad=None, clip_mode='norm', parameters=None, create_graph=False, update_grad=True):
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward(create_graph=create_graph)
if upd... |
class NativeScalerAccum():
state_dict_key = 'amp_scaler'
def __init__(self):
self._scaler = torch.cuda.amp.GradScaler()
def __call__(self, loss, optimizer, clip_grad=None, clip_mode='norm', parameters=None, create_graph=False, update_grad=True):
self._scaler.scale(loss).backward(create_g... |
class Algo(abc.ABC):
'An Algo object corresponds to a learning agent which contains parameters and training steps.'
@abc.abstractmethod
def train(self, batch, **kwargs):
'Train step of an agent.'
@abc.abstractmethod
def _train_step(self, train_state, target_params, rng, batch, **kwargs):... |
class Trainer(abc.ABC):
'A Trainer object implements the training loop of an Algo.'
@abc.abstractmethod
def train(self):
'The training loop function.'
@abc.abstractmethod
def _setup(self):
'Set up the trainer, including logger, dataset samplers, networks, and the corresponding ag... |
class Dataset(object):
'Dataset.'
def __init__(self, data: dict) -> None:
self._data = data
self._keys = list(data.keys())
self._sampler = None
def size(self):
return len(self._data[self._keys[0]])
def retrieve(self, indices: np.ndarray):
'Get a batch of data... |
class RLUPDataset(object):
'RL Uplugged dataset.'
def __init__(self, task_class, task_name, dataset_path, num_threads=8, batch_size=256, num_shards=100, shuffle_buffer_size=100, action_clipping=1, sarsa=True) -> None:
self._batch_size = batch_size
self._num_shards = num_shards
self._s... |
class DM2Gym(object):
def __init__(self, env) -> None:
self._env = env
@property
def action_space(self):
action_spec = self._env.action_spec()
return spaces.Box(low=action_spec.minimum, high=action_spec.maximum, shape=action_spec.shape, dtype=action_spec.dtype)
@property
... |
def traj_fn(traj_length):
def step_proc_fn(batch):
obs = tf.concat(list(batch[rlds.OBSERVATION].values), axis=(- 1))
return {rlds.OBSERVATION: obs, rlds.REWARD: batch[rlds.REWARD], rlds.ACTION: batch[rlds.ACTION], rlds.IS_FIRST: batch[rlds.IS_FIRST], rlds.IS_LAST: batch[rlds.IS_LAST]}
def ma... |
class OfflineDataset():
def __init__(self, domain='rlu_control_suite', task='walker_walk', batch_size=256, episode_shuffle_size=10, traj_length=10, shuffle_num_steps=50000, buffer_size=10) -> None:
self._domain = domain
self._task = task
self._obs_keys = []
if ('control_suite' in ... |
class TransitionDataset(OfflineDataset):
def __init__(self, domain='rlu_control_suite', task='walker_walk', batch_size=256, episode_shuffle_size=10, shuffle_num_steps=50000) -> None:
super().__init__(domain, task, batch_size, episode_shuffle_size, 2, shuffle_num_steps)
def sample(self):
seq_... |
class RandSampler(object):
'A random sampler.'
def __init__(self, max_size: int, batch_size: int=1) -> None:
self._max_size = max_size
self._batch_size = batch_size
def sample(self):
'Return an array of sampled indices.'
return np.random.randint(self._max_size, size=self.... |
class ENV(IntEnum):
Adroit = 1
Kitchen = 2
Mujoco = 3
Antmaze = 4
|
class DATASET(IntEnum):
D4RL = 1
RLUP = 2
|
def mean_flat(tensor):
'\n Take the mean over all non-batch dimensions.\n '
return tensor.mean(axis=list(range(1, len(tensor.shape))))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.