code stringlengths 101 5.91M |
|---|
def bni_loss(pred, target, noise_var, bucket_centers, bucket_weights):
mse_term = ((F.mse_loss(pred, target, reduction='none') / 2) / noise_var)
num_bucket = bucket_centers.shape[0]
bucket_center = bucket_centers.unsqueeze(0).repeat(pred.shape[0], 1)
bucket_weights = bucket_weights.unsqueeze(0).repeat(pred.shape[0], 1)
balancing_term = ((((- 0.5) * (pred.expand((- 1), num_bucket) - bucket_center).pow(2)) / noise_var) + bucket_weights.log())
balancing_term = torch.logsumexp(balancing_term, dim=(- 1), keepdim=True)
loss = (mse_term + balancing_term)
loss = (loss * (2 * noise_var).detach())
return loss.mean() |
def run_hyper_attn(batch_size, head_size, seq_len, dim, causal, mode, impl='triton', warmup=20, rep=100):
(q, k, v) = get_tensors(batch_size, head_size, seq_len, dim)
block_size = 256
sample_size = 256
cuda = (impl == 'cuda')
attn = HyperAttention(input_dim=dim, block_size=block_size, sample_size=sample_size, min_seq_len=4096, cuda=cuda).to(device='cuda', dtype=q.dtype)
fn = (lambda : attn(q, k, v, causal=causal))
if (mode == 'fwd'):
return triton.testing.do_bench(fn, warmup=warmup, rep=rep, percentiles=[0.2, 0.5, 0.8])
elif (mode == 'bwd'):
o = fn()
do = torch.randn_like(o)
fn = (lambda : o.backward(do, retain_graph=True))
return triton.testing.do_bench(fn, warmup=warmup, rep=rep, percentiles=[0.2, 0.5, 0.8])
else:
(q20_fwd, median_fwd, q80_fwd) = triton.testing.do_bench(fn, warmup=warmup, rep=rep, percentiles=[0.2, 0.5, 0.8])
o = fn()
do = torch.randn_like(o)
fn = (lambda : o.backward(do, retain_graph=True))
(q20_bwd, median_bwd, q80_bwd) = triton.testing.do_bench(fn, warmup=warmup, rep=rep, percentiles=[0.2, 0.5, 0.8])
return ((q20_fwd + q20_bwd), (median_fwd + median_bwd), (q80_fwd + q80_bwd)) |
def add_plot_parser(subparsers):
parser_plt = subparsers.add_parser('plot_curve', help='parser for plotting curves')
parser_plt.add_argument('json_logs', type=str, nargs='+', help='path of train log in json format')
parser_plt.add_argument('--keys', type=str, nargs='+', default=['bbox_mAP'], help='the metric that you want to plot')
parser_plt.add_argument('--title', type=str, help='title of figure')
parser_plt.add_argument('--legend', type=str, nargs='+', default=None, help='legend of each plot')
parser_plt.add_argument('--backend', type=str, default=None, help='backend of plt')
parser_plt.add_argument('--style', type=str, default='dark', help='style of plt')
parser_plt.add_argument('--out', type=str, default=None) |
class INAdaptiveClient(AdaptiveClient):
def init_optimizer(self):
self.optimizer = SGD(self.model.parameters(), lr=INIT_LR, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
self.optimizer_scheduler = lr_scheduler.StepLR(self.optimizer, step_size=STEP_SIZE, gamma=(0.5 ** (STEP_SIZE / LR_HALF_LIFE)))
self.optimizer_wrapper = OptimizerWrapper(self.model, self.optimizer, self.optimizer_scheduler)
def init_train_loader(self, tl):
self.train_loader = tl |
def convert(file):
clip = VideoFileClip(file)
duration = clip.duration
if (duration < 30):
if (file[(- 4):] in ['.mov', '.avi', '.flv', '.wmv']):
filename = (file[0:(- 4)] + '.mp4')
os.system(('ffmpeg -i %s -an %s' % (file, filename)))
os.remove(file)
elif (file[(- 4):] == '.mp4'):
filename = file
else:
filename = file
os.remove(file)
else:
filename = file
os.remove(file)
return filename |
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(self, optimizer, milestones, gamma=0.1, warmup_factor=(1.0 / 3), warmup_iters=5, warmup_method='linear', last_epoch=(- 1)):
if (not (milestones == sorted(milestones))):
raise ValueError('Milestones should be a list of increasing integers. Got {}', milestones)
if (warmup_method not in ('constant', 'linear')):
raise ValueError("Only 'constant' or 'linear' warmup_method acceptedgot {}".format(warmup_method))
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if (self.last_epoch < self.warmup_iters):
if (self.warmup_method == 'constant'):
warmup_factor = self.warmup_factor
elif (self.warmup_method == 'linear'):
alpha = (float(self.last_epoch) / self.warmup_iters)
warmup_factor = ((self.warmup_factor * (1 - alpha)) + alpha)
return [((base_lr * warmup_factor) * (self.gamma ** bisect_right(self.milestones, self.last_epoch))) for base_lr in self.base_lrs] |
def run_hps(cfg, uuid):
print(cfg)
argv_plus_hps = sys.argv
script_name = argv_plus_hps[0]
script_name = script_name.replace('.py', '').replace('/', '.')
script_name = (script_name[1:] if script_name.startswith('.') else script_name)
for (hp, hp_range) in flatten(cfg['hps_kwargs']['hp']).items():
hp_val = np.power(10, np.random.uniform(*hp_range))
argv_plus_hps.append(f'cfg.{hp}={hp_val}')
argv_plus_hps = [a.replace('run_hps', cfg['hps_kwargs']['script_name']) for a in argv_plus_hps]
argv_plus_hps.append(f'uuid={uuid}_hps_run')
print(f"python -m {script_name} {LOG_DIR} {' '.join(argv_plus_hps[1:])}")
ex.run_commandline(argv=argv_plus_hps) |
class CWRU(object):
num_classes = 10
inputchannel = 1
def __init__(self, data_dir, normlizetype):
self.data_dir = data_dir
self.normlizetype = normlizetype
def data_preprare(self, test=False):
list_data = get_files(self.data_dir, test)
if test:
test_dataset = dataset(list_data=list_data, test=True, transform=None)
return test_dataset
else:
data_pd = pd.DataFrame({'data': list_data[0], 'label': list_data[1]})
(train_pd, val_pd) = train_test_split(data_pd, test_size=0.2, random_state=40, stratify=data_pd['label'])
train_dataset = dataset(list_data=train_pd, transform=data_transforms('train', self.normlizetype))
val_dataset = dataset(list_data=val_pd, transform=data_transforms('val', self.normlizetype))
return (train_dataset, val_dataset) |
def diapreresnet1202_cifar10(num_classes=10, **kwargs):
return get_diapreresnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False, model_name='diapreresnet1202_cifar10', **kwargs) |
def torch2onnx(model: SymbolNet, exportable: Union[(str, BytesIO)], verbose=False, dummy_inputs=None, do_constant_folding=False) -> None:
proxy_enabled = model.proxy_enabled
if proxy_enabled:
model.disable_proxy_grad()
if (dummy_inputs is None):
dummy_inputs = [torch.ones(size=svar.shape).uniform_(1, 2).to(dtype=svar.dtype.torch()) for (_, svar) in model.input_like.items()]
input_names = list(model.input_like.keys())
with torch.no_grad():
with warnings.catch_warnings():
warnings.simplefilter(('default' if verbose else 'ignore'), category=torch.jit.TracerWarning)
warnings.simplefilter(('default' if verbose else 'ignore'), category=UserWarning, append=True)
model.eval()
torch.onnx.export(model, tuple(dummy_inputs), exportable, input_names=input_names, output_names=list(model.output_like.keys()), verbose=verbose, do_constant_folding=do_constant_folding, opset_version=14)
if proxy_enabled:
model.enable_proxy_grad() |
def test_deflate():
pols = ['x**2+y-3;', 'x+0.125*y**2-1.5;']
sols = [((((('t : 1.E+00 0.E+00\n' + 'm : 1\n') + 'the solution for t :\n') + ' x : -3.E+00 0.E+00\n') + ' y : -6.E+00 0.E+00\n') + '== err : 0.000E+00 = rco : 1.965E-01 = res : 0.000E+00 =='), ((((('t : 1.E+00 0.E+00\n' + 'm : 1\n') + 'the solution for t :\n') + ' x : 9.E-01 2.E-08\n') + ' y : 2.E+00 -5.E-08\n') + '== err : 6.675E-06 = rco : 2.922E-12 = res : 7.423E-12 =='), ((((('t : 1.E+00 0.E+00\n' + 'm : 1\n') + 'the solution for t :\n') + ' x : 9.E-01 -5.E-06\n') + ' y : 2.E+00 1.E-05\n') + '== err : 3.885E-06 = rco : 9.307E-12 = res : 1.863E-12 =='), ((((('t : 1.E+00 0.E+00\n' + 'm : 1\n') + 'the solution for t :\n') + ' x : 9.E-01 -3.E-06\n') + ' y : 2.E+00 6.E-06\n') + '== err : 8.602E-08 = rco : 3.611E-12 = res : 7.957E-16 ==')]
print('the system :')
print(pols)
print('the solutions before deflation :')
for sol in sols:
print(sol)
result = standard_deflate(pols, sols)
print('the solutions after deflation in standard double precision:')
for sol in result:
print(sol)
result = dobldobl_deflate(pols, sols)
print('the solutions after deflation in double double precision:')
for sol in result:
print(sol)
result = quaddobl_deflate(pols, sols)
print('the solutions after deflation in quad double precision:')
for sol in result:
print(sol) |
def fetch_requirements(path):
with open(path, 'r') as fd:
return [r.strip() for r in fd.readlines()] |
class WhitespaceTokenizer(object):
def __init__(self, vocab):
self.vocab = vocab
def __call__(self, text):
words = text.split(' ')
spaces = ([True] * len(words))
return Doc(self.vocab, words=words, spaces=spaces) |
def index_to_mask(index, size):
mask = torch.zeros(size, dtype=torch.bool)
mask[index] = 1
return mask |
def train(model, training_data, validation_data, device, opt):
model = model.to(device)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in param_optimizer if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.01}, {'params': [p for (n, p) in param_optimizer if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
if (opt.ema_decay != (- 1)):
ema = EMA(opt.ema_decay)
for (name, p) in model.named_parameters():
if p.requires_grad:
ema.register(name, p.data)
else:
ema = None
num_train_optimization_steps = (len(training_data) * opt.n_epoch)
optimizer = BertAdam(optimizer_grouped_parameters, lr=opt.lr, warmup=opt.lr_warmup_proportion, t_total=num_train_optimization_steps, schedule='warmup_linear')
writer = SummaryWriter(opt.res_dir)
log_train_file = None
log_valid_file = None
if opt.log:
log_train_file = (opt.log + '.train.log')
log_valid_file = (opt.log + '.valid.log')
logger.info('Training performance will be written to file: {} and {}'.format(log_train_file, log_valid_file))
with open(log_train_file, 'w') as log_tf, open(log_valid_file, 'w') as log_vf:
log_tf.write('epoch,loss,ppl,accuracy\n')
log_vf.write('epoch,loss,ppl,accuracy,METEOR,,CIDEr,re4\n')
prev_best_score = 0.0
es_cnt = 0
for epoch_i in range(opt.n_epoch):
logger.info('[Epoch {}]'.format(epoch_i))
start = time.time()
if ((ema is not None) and (epoch_i != 0)):
ema.resume(model)
(train_loss, train_acc) = train_epoch(model, training_data, optimizer, ema, device, opt, writer, epoch_i)
logger.info('[Training] ppl: {ppl: 8.5f}, accuracy: {acc:3.3f} %, elapse {elapse:3.3f} min'.format(ppl=math.exp(min(train_loss, 100)), acc=(100 * train_acc), elapse=((time.time() - start) / 60.0)))
niter = ((epoch_i + 1) * len(training_data))
writer.add_scalar('Train/Acc', train_acc, niter)
writer.add_scalar('Train/Loss', train_loss, niter)
start = time.time()
if (ema is not None):
ema.assign(model)
(val_loss, val_acc) = eval_epoch(model, validation_data, device, opt)
logger.info('[Val] ppl: {ppl: 8.5f}, accuracy: {acc:3.3f} %, elapse {elapse:3.3f} min'.format(ppl=math.exp(min(val_loss, 100)), acc=(100 * val_acc), elapse=((time.time() - start) / 60.0)))
writer.add_scalar('Val/Acc', val_acc, niter)
writer.add_scalar('Val/Loss', val_loss, niter)
checkpoint = {'model': model.state_dict(), 'model_cfg': model.config, 'opt': opt, 'epoch': epoch_i}
(val_greedy_output, filepaths) = eval_language_metrics(checkpoint, validation_data, opt, eval_mode='val', model=model)
cider = val_greedy_output['CIDEr']
bleu4 = val_greedy_output['Bleu_4']
meteor = val_greedy_output['METEOR']
r4 = val_greedy_output['re4']
logger.info('[Val] METEOR {m:.2f} {b:.2f} CIDEr {c:.2f} re4 {r:.2f}'.format(m=(val_greedy_output['METEOR'] * 100), b=(val_greedy_output['Bleu_4'] * 100), c=(val_greedy_output['CIDEr'] * 100), r=(val_greedy_output['re4'] * 100)))
writer.add_scalar('Val/METEOR', (val_greedy_output['METEOR'] * 100), niter)
writer.add_scalar('Val/Bleu_4', (val_greedy_output['Bleu_4'] * 100), niter)
writer.add_scalar('Val/CIDEr', (val_greedy_output['CIDEr'] * 100), niter)
writer.add_scalar('Val/Re4', (val_greedy_output['re4'] * 100), niter)
if (opt.save_mode == 'all'):
model_name = (opt.save_model + '_e{e}_b{b}_m{m}_c{c}_r{r}.chkpt'.format(e=epoch_i, b=round((bleu4 * 100), 2), m=round((meteor * 100), 2), c=round((cider * 100), 2), r=round((r4 * 100), 2)))
torch.save(checkpoint, model_name)
elif (opt.save_mode == 'best'):
model_name = (opt.save_model + '.chkpt')
if (cider > prev_best_score):
es_cnt = 0
prev_best_score = cider
torch.save(checkpoint, model_name)
new_filepaths = [e.replace('tmp', 'best') for e in filepaths]
for (src, tgt) in zip(filepaths, new_filepaths):
os.renames(src, tgt)
logger.info('The checkpoint file has been updated.')
else:
es_cnt += 1
if (es_cnt > opt.max_es_cnt):
logger.info('Early stop at {} with CIDEr {}'.format(epoch_i, prev_best_score))
break
cfg_name = (opt.save_model + '.cfg.json')
save_parsed_args_to_json(opt, cfg_name)
if (log_train_file and log_valid_file):
with open(log_train_file, 'a') as log_tf, open(log_valid_file, 'a') as log_vf:
log_tf.write('{epoch},{loss: 8.5f},{ppl: 8.5f},{acc:3.3f}\n'.format(epoch=epoch_i, loss=train_loss, ppl=math.exp(min(train_loss, 100)), acc=(100 * train_acc)))
log_vf.write('{epoch},{loss: 8.5f},{ppl: 8.5f},{acc:3.3f},{m:.2f},{b:.2f},{c:.2f},{r:.2f}\n'.format(epoch=epoch_i, loss=val_loss, ppl=math.exp(min(val_loss, 100)), acc=(100 * val_acc), m=(val_greedy_output['METEOR'] * 100), b=(val_greedy_output['Bleu_4'] * 100), c=(val_greedy_output['CIDEr'] * 100), r=(val_greedy_output['re4'] * 100)))
if opt.debug:
break
writer.close() |
def batched_index_select(x: torch.Tensor, dim: int, index: torch.LongTensor) -> torch.Tensor:
views = ([x.shape[0]] + [(1 if (i != dim) else (- 1)) for i in range(1, len(x.shape))])
expanse = list(x.shape)
expanse[0] = (- 1)
expanse[dim] = (- 1)
index = index.view(views).expand(expanse)
return torch.gather(x, dim, index).squeeze(dim) |
def quaddobl_pade_vector(dim):
result = []
for i in range(1, (dim + 1)):
result.append(quaddobl_pade_coefficients(i))
return result |
def __dice_loss(input: torch.FloatTensor, target: torch.LongTensor, weights: torch.FloatTensor=None, k: int=0, eps: float=0.0001):
n_classes = input.size()[0]
if (weights is not None):
for c in range(n_classes):
intersection = ((input[c] * target[c]) * weights[c]).sum()
union = ((weights[c] * (input[c] + target[c])).sum() + eps)
else:
intersection = torch.dot(input.view((- 1)), target.view((- 1)))
union = ((torch.sum(input) + torch.sum(target)) + eps)
gd = (((2 * intersection.float()) + eps) / union.float())
return (1 - (gd / (1 + (k * (1 - gd))))) |
_torch
_wandb
class TrainerHyperParameterWandbIntegrationTest(unittest.TestCase):
def setUp(self):
args = TrainingArguments('..')
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def test_hyperparameter_search(self):
class MyTrialShortNamer(TrialShortNamer):
DEFAULTS = {'a': 0, 'b': 0}
def hp_space(trial):
return {'method': 'random', 'metric': {}, 'parameters': {'a': {'distribution': 'uniform', 'min': 1e-06, 'max': 0.0001}, 'b': {'distribution': 'int_uniform', 'min': 1, 'max': 6}}}
def model_init(config):
if (config is None):
a = 0
b = 0
else:
a = config['a']
b = config['b']
model_config = RegressionModelConfig(a=a, b=b, double_output=False)
return RegressionPreTrainedModel(model_config)
def hp_name(params):
return MyTrialShortNamer.shortname(params)
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(output_dir=tmp_dir, learning_rate=0.1, logging_steps=1, evaluation_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, num_train_epochs=4, disable_tqdm=True, load_best_model_at_end=True, logging_dir='runs', run_name='test', model_init=model_init)
trainer.hyperparameter_search(direction='minimize', hp_space=hp_space, hp_name=hp_name, backend='wandb', n_trials=4, anonymous='must') |
def preresnet56_cifar10(num_classes=10, **kwargs):
return get_preresnet_cifar(num_classes=num_classes, blocks=56, bottleneck=False, model_name='preresnet56_cifar10', **kwargs) |
def local_env_settings():
settings = EnvSettings()
settings.davis_dir = ''
settings.got10k_path = ''
settings.got_packed_results_path = ''
settings.got_reports_path = ''
settings.lasot_path = ''
settings.network_path = '/data/zzy/ablation/V9_Swin_mon_02_accu_box_09/ltr/checkpoints/ltr/transt/transt/TransT_ep0063.pth.tar'
settings.nfs_path = ''
settings.otb_path = ''
settings.result_plot_path = '/data/zhu_19/TransT-test/17-09-22/pytracking/result_plots/'
settings.results_path = '/data/zzy/ablation/V9_Swin_mon_02_accu_box_09/pytracking/tracking_results_0063_new/'
settings.tn_packed_results_path = ''
settings.tpl_path = ''
settings.trackingnet_path = ''
settings.uav_path = ''
settings.vot_path = ''
settings.youtubevos_dir = ''
return settings |
class MSDInitLayer(nn.Module):
def __init__(self, in_channels, out_channels):
super(MSDInitLayer, self).__init__()
self.scale_blocks = MultiOutputSequential()
for (i, out_channels_per_scale) in enumerate(out_channels):
if (i == 0):
self.scale_blocks.add_module('scale_block{}'.format((i + 1)), ResInitBlock(in_channels=in_channels, out_channels=out_channels_per_scale))
else:
self.scale_blocks.add_module('scale_block{}'.format((i + 1)), conv3x3_block(in_channels=in_channels, out_channels=out_channels_per_scale, stride=2))
in_channels = out_channels_per_scale
def forward(self, x):
y = self.scale_blocks(x)
return y |
def make_folder(folder_name):
if (not os.path.isdir(folder_name)):
os.makedirs(folder_name) |
def render_example(example_id, render_dir, input_dir, output_dir, texture_dir, csv_file, shape, views):
example_in_dir = os.path.join(input_dir, example_id)
example_out_dir = os.path.join(output_dir, example_id)
example_render_dir = os.path.join(render_dir, example_id)
try:
obj = os.path.join(example_in_dir, 'models', 'model_normalized.obj')
except:
return False
if os.path.isdir(example_out_dir):
if (len(os.listdir(example_out_dir)) == views):
return False
else:
shutil.rmtree(example_out_dir)
if (not os.path.isdir(example_out_dir)):
os.makedirs(example_out_dir)
if (not os.path.isdir(example_render_dir)):
os.makedirs(example_render_dir)
if os.path.isdir(texture_dir):
textures = [name for name in os.listdir(texture_dir)]
textures.sort()
else:
raise ValueError('Invalid texture directory !')
logfile = 'render.log'
open(logfile, 'a').close()
old = os.dup(1)
sys.stdout.flush()
os.close(1)
os.open(logfile, os.O_WRONLY)
textures = [name for name in os.listdir(texture_dir)]
textures.sort()
texture = textures[np.random.randint(0, len(textures))]
texture_img = os.path.join(texture_dir, texture)
render_obj_grid(obj, example_render_dir, [512, 512], 30, 5, 1, 2, False, None, None)
render_obj_with_view(obj, example_out_dir, csv_file, texture_img, views, shape)
os.close(1)
os.dup(old)
os.close(old)
return True |
class CognateSet():
IDX = 0
def __init__(self):
self._data = defaultdict(set)
self.idx = CognateSet.IDX
CognateSet.IDX += 1
def add(self, lang, *words):
words = [w for w in words if (w != '_')]
if words:
self._data[lang].update(words)
def is_in(self, word, lang):
if (lang not in self._data):
return False
return (word in self._data[lang])
def __contains__(self, lang):
return (lang in self._data)
def items(self):
return self._data.items()
def __getitem__(self, lang):
if (not (lang in self)):
raise KeyError
else:
return self._data[lang]
def to_df(self):
data = list()
for (l, s) in self._data.items():
for w in s:
data.append((w, l, self.idx))
return pd.DataFrame(data, columns=['word', 'lang', 'idx']) |
class Evaluator(Generic[S], ABC):
def evaluate(self, solution_list: List[S], problem: Problem) -> List[S]:
pass
def evaluate_solution(solution: S, problem: Problem) -> None:
problem.evaluate(solution) |
def rotate_coordination(orig_x, orig_y, orig_d, coordi_rotate_d):
coordi_rotate_d_in_rad = ((coordi_rotate_d * math.pi) / 180)
transformed_x = ((orig_x * math.cos(coordi_rotate_d_in_rad)) + (orig_y * math.sin(coordi_rotate_d_in_rad)))
transformed_y = (((- orig_x) * math.sin(coordi_rotate_d_in_rad)) + (orig_y * math.cos(coordi_rotate_d_in_rad)))
transformed_d = (orig_d - coordi_rotate_d)
if (transformed_d > 180):
while (transformed_d > 180):
transformed_d = (transformed_d - 360)
elif (transformed_d <= (- 180)):
while (transformed_d <= (- 180)):
transformed_d = (transformed_d + 360)
else:
transformed_d = transformed_d
return (transformed_x, transformed_y, transformed_d) |
class DisentangleLayer(nn.Module):
def __init__(self, n_latent, in_dim, out_dim, cat=True):
super(DisentangleLayer, self).__init__()
self.g = None
self.n_latent = n_latent
self.n_feat_latent = ((out_dim // self.n_latent) if cat else out_dim)
self.cat = cat
self.linear = nn.Linear(in_dim, self.n_feat_latent)
self.att_ls = nn.ModuleList()
self.att_rs = nn.ModuleList()
for latent_i in range(self.n_latent):
self.att_ls.append(nn.Linear(self.n_feat_latent, 1))
self.att_rs.append(nn.Linear(self.n_feat_latent, 1))
self.graph_to_feat = GraphEncoder(self.n_feat_latent, (self.n_feat_latent // 2))
self.classifier = nn.Linear(self.n_feat_latent, self.n_latent)
self.loss_fn = nn.CrossEntropyLoss()
def forward(self, g, inputs):
self.g = g.local_var()
out_feats = []
hidden = self.linear(inputs)
self.hidden = hidden
for latent_i in range(self.n_latent):
a_l = self.att_ls[latent_i](hidden)
a_r = self.att_rs[latent_i](hidden)
self.g.ndata.update({f'feat_{latent_i}': hidden, f'a_l_{latent_i}': a_l, f'a_r_{latent_i}': a_r})
self.g.apply_edges(fn.u_add_v(f'a_l_{latent_i}', f'a_r_{latent_i}', f'factor_{latent_i}'))
self.g.edata[f'factor_{latent_i}'] = torch.sigmoid((6.0 * self.g.edata[f'factor_{latent_i}']))
feat = self.g.ndata[f'feat_{latent_i}']
norm = torch.pow(self.g.in_degrees().float().clamp(min=1), (- 0.5))
shp = (norm.shape + ((1,) * (feat.dim() - 1)))
norm = torch.reshape(norm, shp).to(feat.device)
feat = (feat * norm)
self.g.ndata['h'] = feat
self.g.update_all(fn.u_mul_e('h', f'factor_{latent_i}', 'm'), fn.sum(msg='m', out='h'))
out_feats.append(self.g.ndata['h'].unsqueeze((- 1)))
if self.cat:
return torch.cat(tuple([rst.squeeze((- 1)) for rst in out_feats]), (- 1))
else:
return torch.mean(torch.cat(tuple(out_feats), (- 1)), (- 1))
def compute_disentangle_loss(self):
assert (self.g is not None), 'compute disentangle loss need to be called after forward pass'
factors_feat = [self.graph_to_feat(self.g, self.hidden, f'factor_{latent_i}').squeeze() for latent_i in range(self.n_latent)]
labels = [(torch.ones(f.shape[0]) * i) for (i, f) in enumerate(factors_feat)]
labels = torch.cat(tuple(labels), 0).long().cuda()
factors_feat = torch.cat(tuple(factors_feat), 0)
pred = self.classifier(factors_feat)
discrimination_loss = self.loss_fn(pred, labels)
distribution_loss = 0
return [discrimination_loss, distribution_loss]
def get_factor(self):
g = self.g.local_var()
return g |
def extra_hidden_layer(hidden_dim):
return nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.ReLU(True)) |
class EarlyStopping():
def __init__(self, steps_to_wait, epsilon=0):
assert (steps_to_wait >= 0)
self.steps_to_wait = steps_to_wait
self.epsilon = epsilon
self.best_loss = float('inf')
self.waited_steps = 0
def should_stop(self, current_loss, current_step):
if ((self.best_loss - current_loss) > self.epsilon):
self.best_loss = current_loss
self.waited_steps = 0
elif (self.waited_steps < self.steps_to_wait):
self.waited_steps += 1
else:
if verbose:
print('Early stopping at time step {} after waiting {} steps'.format(current_step, self.steps_to_wait))
return True
return False |
class AssignmentNode(TwoAddressNode):
snippet = '{res_var} = {cast}{var1};\n'
def __init__(self, res_var: TreeNode, var1: TreeNode, prev_node: Node=None):
super().__init__(res_var, var1, prev_node)
self.cast = ''
def write_c(self):
res_var: Variable = self.get_node('res_var')
var1: Variable = self.get_node('var1')
if ((type(var1) == Variable) and (type(res_var) == Variable) and np.any((var1.dim != res_var.dim))):
self.cast = res_var.get_cast()
super().write_c() |
class Bool(Int):
def __init__(self, default=None, prefix=None):
super(Bool, self).__init__(0, 1, default=default, prefix=prefix) |
def build_detection_model(cfg, BBAM=False):
meta_arch = _DETECTION_META_ARCHITECTURES[cfg.MODEL.META_ARCHITECTURE]
return meta_arch(cfg, BBAM=BBAM) |
class MyOtherNewExpectedFlux(MyNewExpectedFlux):
def __init__(self, config):
super().__init__()
pass |
def main():
parser = argparse.ArgumentParser(description='Deep Orientation Estimation')
parser.add_argument('-c', '--config', default=DEFAULT_CONFIG, type=str)
args = parser.parse_args()
config_file = args.config
assert os.path.exists(args.config), 'Config file {} does not exist'.format(args.config)
with open(config_file) as fp:
config = yaml.load(fp)
if ('loss_parameters' in config['test']):
loss_parameters = config['test']['loss_parameters']
else:
loss_parameters = None
device = torch.device((config['test']['device'] if torch.cuda.is_available() else 'cpu'))
print('Using device: {}'.format(device))
num_classes = config['test']['num_outputs']
num_channels = config['test']['num_channels']
model_name = config['test']['model']
model = modules.network.get_model(name=model_name, pretrained=True, num_channels=num_channels, num_classes=num_classes)
model.to(device)
print('Model name: {}'.format(model_name))
model_path = config['test']['model_path']
if os.path.isfile(model_path):
print('Loading model {}'.format(model_path))
checkpoint = torch.load(model_path)
model.load_state_dict(checkpoint['state_dict'])
else:
assert 'model not found'
batch_size = 32
test_loader = get_data_loader(config, batch_size)
loss_function_name = config['test']['loss_function']
dataset_name = config['data_loader']['name']
if loss_parameters:
criterion = LOSS_FUNCTIONS[loss_function_name](**loss_parameters)
else:
criterion = LOSS_FUNCTIONS[loss_function_name]()
if ('floating_point_type' in config['test']):
floating_point_type = config['test']['floating_point_type']
else:
floating_point_type = 'float'
if (floating_point_type == 'double'):
model.double()
run_evaluation(model, test_loader, criterion, device, floating_point_type) |
def wait_for_server_started(ip, port, timeout=60):
s = socket.socket()
num_attempts = 0
while True:
if (num_attempts == timeout):
raise TimeoutError('Failed to connect to {} after waiting for {} s'.format((ip, port), timeout))
try:
s.connect((ip, port))
break
except socket.error:
time.sleep(1)
num_attempts += 1
s.close() |
def validate(val_loader, model, train_labels=None, prefix='Val'):
batch_time = AverageMeter('Time', ':6.3f')
losses_mse = AverageMeter('Loss (MSE)', ':.3f')
losses_l1 = AverageMeter('Loss (L1)', ':.3f')
progress = ProgressMeter(len(val_loader), [batch_time, losses_mse, losses_l1], prefix=f'{prefix}: ')
criterion_mse = nn.MSELoss()
criterion_l1 = nn.L1Loss()
criterion_gmean = nn.L1Loss(reduction='none')
model.eval()
losses_all = []
(preds, labels) = ([], [])
with torch.no_grad():
end = time.time()
for (idx, (inputs, targets, _)) in enumerate(val_loader):
(inputs, targets) = (inputs.cuda(non_blocking=True), targets.cuda(non_blocking=True))
outputs = model(inputs)
preds.extend(outputs.data.cpu().numpy())
labels.extend(targets.data.cpu().numpy())
loss_mse = criterion_mse(outputs, targets)
loss_l1 = criterion_l1(outputs, targets)
loss_all = criterion_gmean(outputs, targets)
losses_all.extend(loss_all.cpu().numpy())
losses_mse.update(loss_mse.item(), inputs.size(0))
losses_l1.update(loss_l1.item(), inputs.size(0))
batch_time.update((time.time() - end))
end = time.time()
if ((idx % args.print_freq) == 0):
progress.display(idx)
(mean_MSE, mean_L1) = balanced_metrics(np.hstack(preds), np.hstack(labels))
shot_dict = shot_metrics(np.hstack(preds), np.hstack(labels), train_labels)
shot_dict_balanced = shot_metrics_balanced(np.hstack(preds), np.hstack(labels), train_labels)
loss_gmean = gmean(np.hstack(losses_all), axis=None).astype(float)
print(f' * Overall: MSE {losses_mse.avg:.3f} L1 {losses_l1.avg:.3f} G-Mean {loss_gmean:.3f}')
print(('-' * 40))
print(f" * Many: MSE {shot_dict['many']['mse']:.3f} L1 {shot_dict['many']['l1']:.3f} G-Mean {shot_dict['many']['gmean']:.3f}")
print(f" * Median: MSE {shot_dict['median']['mse']:.3f} L1 {shot_dict['median']['l1']:.3f} G-Mean {shot_dict['median']['gmean']:.3f}")
print(f" * Low: MSE {shot_dict['low']['mse']:.3f} L1 {shot_dict['low']['l1']:.3f} G-Mean {shot_dict['low']['gmean']:.3f}")
print(('=' * 40))
print(f' * bMSE {mean_MSE:.3f} bMAE {mean_L1:.3f}')
print(('-' * 40))
print(f" * Many: bMSE {shot_dict_balanced['many']['mse']:.3f} bMAE {shot_dict_balanced['many']['l1']:.3f} G-Mean {shot_dict_balanced['many']['gmean']:.3f}")
print(f" * Median: bMSE {shot_dict_balanced['median']['mse']:.3f} bMAE {shot_dict_balanced['median']['l1']:.3f} G-Mean {shot_dict_balanced['median']['gmean']:.3f}")
print(f" * Low: bMSE {shot_dict_balanced['low']['mse']:.3f} bMAE {shot_dict_balanced['low']['l1']:.3f} G-Mean {shot_dict_balanced['low']['gmean']:.3f}")
return (losses_mse.avg, losses_l1.avg, loss_gmean, mean_MSE, mean_L1) |
def ReadFileWithAbort(tthread, batchInterval):
(w, h) = (6, 6)
y = [[0 for x in range(w)] for y in range(h)]
y_sum = [0 for x in range(w)]
inputEvents = (tthread * batchInterval)
gs_path = (FILE_FOLER + '/GSA/threads = {}/totalEvents = {}'.format(tthread, inputEvents))
lines = open(gs_path).readlines()
idx = locateIdx(lines)
for line in lines[idx:]:
breakdown_value = line.split('\t')
print(breakdown_value)
for i in range(0, 6):
y[i][0] += float(breakdown_value[(i + 1)])
y_sum[0] += float(breakdown_value[(i + 1)])
bfs_path = (FILE_FOLER + '/BFSA/threads = {}/totalEvents = {}'.format(tthread, inputEvents))
lines = open(bfs_path).readlines()
idx = locateIdx(lines)
for line in lines[idx:]:
breakdown_value = line.split('\t')
print(breakdown_value)
for i in range(0, 6):
y[i][1] += float(breakdown_value[(i + 1)])
y_sum[1] += float(breakdown_value[(i + 1)])
dfs_path = (FILE_FOLER + '/DFSA/threads = {}/totalEvents = {}'.format(tthread, inputEvents))
lines = open(dfs_path).readlines()
idx = locateIdx(lines)
for line in lines[idx:]:
breakdown_value = line.split('\t')
print(breakdown_value)
for i in range(0, 6):
y[i][2] += float(breakdown_value[(i + 1)])
y_sum[2] += float(breakdown_value[(i + 1)])
op_gs_path = (FILE_FOLER + '/OPGSA/threads = {}/totalEvents = {}'.format(tthread, inputEvents))
lines = open(op_gs_path).readlines()
idx = locateIdx(lines)
for line in lines[idx:]:
breakdown_value = line.split('\t')
print(breakdown_value)
for i in range(0, 6):
y[i][3] += float(breakdown_value[(i + 1)])
y_sum[3] += float(breakdown_value[(i + 1)])
op_bfs_path = (FILE_FOLER + '/OPBFSA/threads = {}/totalEvents = {}'.format(tthread, inputEvents))
lines = open(op_bfs_path).readlines()
idx = locateIdx(lines)
for line in lines[idx:]:
breakdown_value = line.split('\t')
print(breakdown_value)
for i in range(0, 6):
y[i][4] += float(breakdown_value[(i + 1)])
y_sum[4] += float(breakdown_value[(i + 1)])
op_dfs_path = (FILE_FOLER + '/OPDFSA/threads = {}/totalEvents = {}'.format(tthread, inputEvents))
lines = open(op_dfs_path).readlines()
idx = locateIdx(lines)
for line in lines[idx:]:
breakdown_value = line.split('\t')
print(breakdown_value)
for i in range(0, 6):
y[i][5] += float(breakdown_value[(i + 1)])
y_sum[5] += float(breakdown_value[(i + 1)])
for i in range(h):
for j in range(w):
if (y_sum[j] != 0):
y[i][j] = ((y[i][j] / y_sum[j]) * 100)
print(y)
return y |
def gen_rosette_code(invocations: list, o_id: int) -> str:
argCount = len(invocations[0][0])
arglist = ' '.join([f's{i}' for i in range(argCount)])
header = f'''#lang rosette
(require rosette/lib/synthax)
(define int32? (bitvector 32))
(define (int32 i) (bv i int32?))
'''
definitions = f'''(define-symbolic {arglist} int32?)
'''
database = f'''(define (database {arglist})
(cond
'''
for (in_list, out_list) in invocations:
entry = ' [(and'
for i in range(argCount):
entry += f' (eq? s{i} (int32 {in_list[i]}))'
entry += f''') => (int32 {out_list[o_id]})]
'''
database += entry
database += ' [else (int32 -1)]\n ))\n'
checker = f'''(define (check-equal impl std {arglist})
(define out (impl {arglist}))
(define ans (std {arglist}))
(assert (or (eq? out ans) (eq? (int32 -1) ans)))
)
'''
grammar = f'''(define-grammar (ArithExpTree {arglist})
[expr
(choose {arglist} (int32 1) (int32 2)
((bop) (expr) (expr)))]
[bop
(choose bvadd bvsub bvmul bvudiv bvurem)])
(define (myterm {arglist})
(ArithExpTree {arglist} #:depth 3))
'''
synthesis = f'''(define sol
(synthesize
#:forall (list {arglist})
#:guarantee
(check-equal myterm database {arglist})
))
'''
output = f'(if (sat? sol) (print-forms sol) (println "UNSAT"))'
return ((((((header + definitions) + database) + checker) + grammar) + synthesis) + output) |
('Please use `bigdl.chronos.autots.AutoTSEstimator` instead.')
class AutoTSTrainer():
def __init__(self, horizon=1, dt_col='datetime', target_col='value', logs_dir='~/bigdl_automl_logs', extra_features_col=None, search_alg=None, search_alg_params=None, scheduler=None, scheduler_params=None, name='automl'):
target_col_list = target_col
if isinstance(target_col, str):
target_col_list = [target_col]
self.internal = TimeSequencePredictor(dt_col=dt_col, target_col=target_col_list, logs_dir=logs_dir, future_seq_len=horizon, extra_features_col=extra_features_col, search_alg=search_alg, search_alg_params=search_alg_params, scheduler=scheduler, scheduler_params=scheduler_params, name=name)
def fit(self, train_df, validation_df=None, metric='mse', recipe: Recipe=SmokeRecipe(), uncertainty: bool=False, upload_dir=None):
bigdl_pipeline = self.internal.fit(train_df, validation_df, metric, recipe, mc=uncertainty, upload_dir=upload_dir)
ppl = TSPipeline()
ppl.internal = bigdl_pipeline
return ppl |
def test_microboone_fale_report_numbr():
ref_line = u'[40] MicroBooNE, LAr1-ND, ICARUS-WA104 collaboration, M. Antonello et al., A Proposal for a Three Detector Short-Baseline Neutrino Oscillation Program in the Fermilab Booster Neutrino Beam, 1503.01520.'
res = get_references(ref_line)
references = res[0]
expected = [{'author': [u'M. Antonello et al.'], 'linemarker': [u'40'], 'misc': [u'MicroBooNE, LAr1-ND, ICARUS-WA104 collaboration', u'A Proposal', u'for a Three Detector Short-Baseline Neutrino Oscillation Program in the Fermilab Booster Neutrino Beam'], 'raw_ref': [ref_line], 'reportnumber': [u'arXiv:1503.01520']}]
assert (references == expected) |
def validate_item_buffer_args(max_length: int, min_length: int, sample_batch_size: int):
validate_sample_batch_size(sample_batch_size, max_length)
validate_min_length(min_length, max_length) |
class PB4D(Instance, ABC):
def __init__(self):
super(PB4D, self).__init__()
self.dst = '/scratch/NFC/OnFlame/BP4D/'
self.src = '/scratch/NFC/BP4D/'
def get_images(self):
images = {}
for actor in sorted(glob((self.get_src() + 'images/*'))):
imgs = sorted(glob(f'/{actor}/*.jpg'))
indecies = np.random.choice(len(imgs), 100, replace=False)
images[Path(actor).name] = [imgs[i] for i in indecies]
return images
def get_flame_params(self):
prams = {}
for file in sorted(glob((self.get_src() + 'FLAME_parameters/*.npz'))):
prams[Path(file).stem] = [file]
return prams
def get_registrations(self):
registrations = {}
for file in sorted(glob((self.get_src() + 'registrations/*'))):
registrations[Path(file).stem] = [file]
return registrations
def get_meshes(self):
meshes = {}
for file in sorted(glob((self.get_src() + 'scans/*.obj'))):
meshes[Path(file).stem] = [file]
return meshes
def transform_mesh(self, path):
mesh = load_objs_as_meshes(path, device=self.device)
mesh.scale_verts_(0.01)
vertices = mesh._verts_list[0]
center = vertices.mean(0)
mesh._verts_list = [(vertices - center)]
return mesh.clone() |
def gauss_peak(maxpos, width, weight, wgrid):
a = ((weight / (np.sqrt((2.0 * np.pi)) * width)) * np.exp((((- 0.5) * ((wgrid - maxpos) ** 2)) / (width ** 2))))
a -= ((weight / (np.sqrt((2.0 * np.pi)) * width)) * np.exp((((- 0.5) * ((wgrid + maxpos) ** 2)) / (width ** 2))))
return a |
def load_pretrained_weights(model, model_name, load_fc=True, advprop=False):
url_map_ = (url_map_advprop if advprop else url_map)
state_dict = model_zoo.load_url(url_map_[model_name])
model.load_state_dict(state_dict, strict=False) |
def rotation_matrix(axis, theta):
if ((np.abs(axis).sum() < 1e-06) or (np.abs(theta) < 1e-06)):
return np.eye(3)
axis = np.asarray(axis)
axis = (axis / math.sqrt(np.dot(axis, axis)))
a = math.cos((theta / 2.0))
(b, c, d) = ((- axis) * math.sin((theta / 2.0)))
(aa, bb, cc, dd) = ((a * a), (b * b), (c * c), (d * d))
(bc, ad, ac, ab, bd, cd) = ((b * c), (a * d), (a * c), (a * b), (b * d), (c * d))
return np.array([[(((aa + bb) - cc) - dd), (2 * (bc + ad)), (2 * (bd - ac))], [(2 * (bc - ad)), (((aa + cc) - bb) - dd), (2 * (cd + ab))], [(2 * (bd + ac)), (2 * (cd - ab)), (((aa + dd) - bb) - cc)]]) |
def export_entry_point():
import argparse
parser = argparse.ArgumentParser(description='Use this script to export models to a zip file for sharing with others. You can upload the zip file and then either share the url for usage with nnUNet_download_pretrained_model_by_url, or share the zip for usage with nnUNet_install_pretrained_model_from_zip')
parser.add_argument('-t', type=str, help='task name or task id')
parser.add_argument('-o', type=str, help='output file name. Should end with .zip')
parser.add_argument('-m', nargs='+', help='list of model configurations. Default: 2d 3d_lowres 3d_fullres 3d_cascade_fullres. Must be adapted to fit the available models of a task', default=('2d', '3d_lowres', '3d_fullres', '3d_cascade_fullres'), required=False)
parser.add_argument('-tr', type=str, help=('trainer class used for 2d 3d_lowres and 3d_fullres. Default: %s' % default_trainer), required=False, default=default_trainer)
parser.add_argument('-trc', type=str, help=('trainer class used for 3d_cascade_fullres. Default: %s' % default_cascade_trainer), required=False, default=default_cascade_trainer)
parser.add_argument('-pl', type=str, help=('nnunet plans identifier. Default: %s' % default_plans_identifier), required=False, default=default_plans_identifier)
parser.add_argument('--disable_strict', action='store_true', help='set this if you want to allow skipping missing things', required=False)
parser.add_argument('-f', nargs='+', help='Folds. Default: 0 1 2 3 4', required=False, default=[0, 1, 2, 3, 4])
args = parser.parse_args()
folds = args.f
folds = [(int(i) if (i != 'all') else i) for i in folds]
taskname = args.t
if taskname.startswith('Task'):
pass
else:
try:
taskid = int(taskname)
except Exception as e:
print('-t must be either a Task name (TaskXXX_YYY) or a task id (integer)')
raise e
taskname = convert_id_to_task_name(taskid)
export_pretrained_model(taskname, args.o, args.m, args.tr, args.trc, args.pl, strict=(not args.disable_strict), folds=folds) |
def reshape_features(features):
input_tensors = {}
for (name, tensor) in features.items():
input_tensors[name] = tf.reshape(tensor, ((- 1), 1))
return input_tensors |
def stop_recording():
global ffmpeg
ffmpeg.stdin.close()
ffmpeg.wait()
ffmpeg = None
return |
def detect_monitor_files(training_dir):
return [os.path.join(training_dir, f) for f in os.listdir(training_dir) if f.startswith((FILE_PREFIX + '.'))] |
def transform_pos(mtx, pos):
t_mtx = (torch.from_numpy(mtx).cuda() if isinstance(mtx, np.ndarray) else mtx)
posw = torch.cat([pos, torch.ones([pos.shape[0], 1]).cuda()], axis=1)
return torch.matmul(posw, t_mtx.t())[(None, ...)] |
def train_model(model, fields, optim, data_type, model_opt, train_part):
train_loss = make_loss_compute(model, fields['tgt'].vocab, opt)
valid_loss = make_loss_compute(model, fields['tgt'].vocab, opt, train=False)
trunc_size = opt.truncated_decoder
shard_size = opt.max_generator_batches
norm_method = opt.normalization
grad_accum_count = opt.accum_count
trainer = onmt.Trainer(model, train_loss, valid_loss, optim, trunc_size, shard_size, data_type, norm_method, grad_accum_count)
print('\nStart training...')
print((' * number of epochs: %d, starting from Epoch %d' % (((opt.epochs + 1) - opt.start_epoch), opt.start_epoch)))
print((' * batch size: %d' % opt.batch_size))
for epoch in range(opt.start_epoch, (opt.epochs + 1)):
print('')
train_iter = None
train_iter = make_dataset_iter(lazily_load_dataset('train'), fields, opt)
train_stats = trainer.train(train_iter, epoch, report_func, train_part, model_opt, fields)
print(('Train perplexity: %g' % train_stats.ppl()))
print(('Train accuracy: %g' % train_stats.accuracy()))
train_iter = None
valid_iter = None
valid_iter = make_dataset_iter(lazily_load_dataset('valid'), fields, opt, is_train=False)
valid_stats = trainer.validate(valid_iter, train_part)
print(('Validation perplexity: %g' % valid_stats.ppl()))
print(('Validation accuracy: %g' % valid_stats.accuracy()))
if opt.exp_host:
train_stats.log('train', experiment, optim.lr)
valid_stats.log('valid', experiment, optim.lr)
if opt.tensorboard:
train_stats.log_tensorboard('train', writer, optim.lr, epoch)
train_stats.log_tensorboard('valid', writer, optim.lr, epoch)
trainer.epoch_step(valid_stats.ppl(), epoch)
if (epoch >= opt.start_checkpoint_at):
trainer.drop_checkpoint(model_opt, epoch, fields, valid_stats) |
class UnetGeneratorShiftTriple(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, opt, innerCos_list, shift_list, mask_global, ngf=64, norm_layer=nn.BatchNorm2d, use_spectral_norm=False):
super(UnetGeneratorShiftTriple, self).__init__()
unet_block = UnetSkipConnectionBlock((ngf * 8), (ngf * 8), input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True, use_spectral_norm=use_spectral_norm)
print(unet_block)
for i in range((num_downs - 5)):
unet_block = UnetSkipConnectionBlock((ngf * 8), (ngf * 8), input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_spectral_norm=use_spectral_norm)
unet_block = UnetSkipConnectionBlock((ngf * 4), (ngf * 8), input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_spectral_norm=use_spectral_norm)
unet_shift_block = UnetSkipConnectionShiftBlock((ngf * 2), (ngf * 4), opt, innerCos_list, shift_list, mask_global, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_spectral_norm=use_spectral_norm, layer_to_last=3)
unet_block = UnetSkipConnectionBlock(ngf, (ngf * 2), input_nc=None, submodule=unet_shift_block, norm_layer=norm_layer, use_spectral_norm=use_spectral_norm)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer, use_spectral_norm=use_spectral_norm)
self.model = unet_block
def forward(self, input):
return self.model(input) |
class RandomPerspective(object):
def __init__(self, distortion_scale=0.5, p=0.5, interpolation=Image.BICUBIC):
self.p = p
self.interpolation = interpolation
self.distortion_scale = distortion_scale
def __call__(self, img):
if (not F._is_pil_image(img)):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
if (random.random() < self.p):
(width, height) = img.size
(startpoints, endpoints) = self.get_params(width, height, self.distortion_scale)
return F.perspective(img, startpoints, endpoints, self.interpolation)
return img
def get_params(width, height, distortion_scale):
half_height = int((height / 2))
half_width = int((width / 2))
topleft = (random.randint(0, int((distortion_scale * half_width))), random.randint(0, int((distortion_scale * half_height))))
topright = (random.randint(((width - int((distortion_scale * half_width))) - 1), (width - 1)), random.randint(0, int((distortion_scale * half_height))))
botright = (random.randint(((width - int((distortion_scale * half_width))) - 1), (width - 1)), random.randint(((height - int((distortion_scale * half_height))) - 1), (height - 1)))
botleft = (random.randint(0, int((distortion_scale * half_width))), random.randint(((height - int((distortion_scale * half_height))) - 1), (height - 1)))
startpoints = [(0, 0), ((width - 1), 0), ((width - 1), (height - 1)), (0, (height - 1))]
endpoints = [topleft, topright, botright, botleft]
return (startpoints, endpoints)
def __repr__(self):
return (self.__class__.__name__ + '(p={})'.format(self.p)) |
class BlipTextModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def load_video(video_path, n_frms=MAX_INT, height=(- 1), width=(- 1), sampling='uniform'):
vr = VideoReader(uri=video_path, height=height, width=width)
vlen = len(vr)
(start, end) = (0, vlen)
n_frms = min(n_frms, vlen)
if (sampling == 'uniform'):
indices = np.arange(start, end, (vlen / n_frms)).astype(int)
elif (sampling == 'headtail'):
indices_h = sorted(rnd.sample(range((vlen // 2)), (n_frms // 2)))
indices_t = sorted(rnd.sample(range((vlen // 2), vlen), (n_frms // 2)))
indices = (indices_h + indices_t)
else:
raise NotImplementedError
vr_batch = vr.get_batch(indices)
if isinstance(vr_batch, torch.Tensor):
frms = vr_batch.permute(3, 0, 1, 2).float()
else:
frms = torch.tensor(vr_batch.asnumpy()).permute(3, 0, 1, 2).float()
return frms |
def prepare_doc_data(input_folder, output_folder):
train_input = os.path.join(input_folder, 'training.txt')
validation_input = os.path.join(input_folder, 'validation.txt')
test_input = os.path.join(input_folder, 'test.txt')
(train_label, train_doc) = extract_docs(train_input, output_folder)
(validation_label, validation_doc) = extract_docs(validation_input, output_folder)
(test_label, test_doc) = extract_docs(test_input, output_folder)
all_doc = ((train_doc + validation_doc) + test_doc)
(vocab_list, vocab_dict, ignore_words) = gen_vocab_docnade(['_bos_', '_eos_', '_unk_'], all_doc, cachedStopWords, 10, 0.001, True)
vocab_to_id = dict(zip(vocab_list, range(len(vocab_list))))
vocab_list_docnade = [vocab_list[index] for index in range(len(vocab_list)) if (not (index in ignore_words))]
vocab_to_id_docnade = dict(zip(vocab_list_docnade, range(len(vocab_list_docnade))))
docnade_vocab_filename = os.path.join(output_folder, 'vocab_docnade.vocab')
with open(docnade_vocab_filename, 'w') as f:
f.write('\n'.join(vocab_list_docnade))
print(('Size of the topic vocab: ' + str(len(vocab_to_id_docnade))))
file_name = os.path.join(output_folder, 'training_nvdm_docs_non_replicated.csv')
write_topic_data(train_doc, train_label, vocab_to_id_docnade, file_name)
file_name = os.path.join(output_folder, 'validation_nvdm_docs_non_replicated.csv')
write_topic_data(validation_doc, validation_label, vocab_to_id_docnade, file_name)
file_name = os.path.join(output_folder, 'test_nvdm_docs_non_replicated.csv')
write_topic_data(test_doc, test_label, vocab_to_id_docnade, file_name) |
class FCConfig(object):
num_scale = 3
scale_step = 1.0375
scale_penalty = 0.9745
scale_lr = 0.59
response_up = 16
windowing = 'cosine'
w_influence = 0.35
exemplar_size = 128
instance_size = 256
score_size = 27
total_stride = 8
context_amount = 0.5
def update(self, newparam=None):
if newparam:
for (key, value) in newparam.items():
setattr(self, key, value)
self.renew()
def renew(self):
self.exemplar_size = (self.instance_size - 128)
self.score_size = 27 |
_tokenizers
class GPT2TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = GPT2Tokenizer
rust_tokenizer_class = GPT2TokenizerFast
test_rust_tokenizer = True
from_pretrained_kwargs = {'add_prefix_space': True}
test_seq2seq = False
def setUp(self):
super().setUp()
vocab = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'G', 'Gl', 'Gn', 'Glo', 'Glow', 'er', 'Glowest', 'Gnewer', 'Gwider', '<unk>', '<|endoftext|>']
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ['#version: 0.2', 'G l', 'Gl o', 'Glo w', 'e r', '']
self.special_tokens_map = {'unk_token': '<unk>'}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write((json.dumps(vocab_tokens) + '\n'))
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(merges))
def get_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return GPT2Tokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_rust_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return GPT2TokenizerFast.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = 'lower newer'
output_text = 'lower newer'
return (input_text, output_text)
def test_full_tokenizer(self):
tokenizer = GPT2Tokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
text = 'lower newer'
bpe_tokens = ['Glow', 'er', 'G', 'n', 'e', 'w', 'er']
tokens = tokenizer.tokenize(text, add_prefix_space=True)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = (tokens + [tokenizer.unk_token])
input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
def test_rust_and_python_full_tokenizers(self):
if (not self.test_rust_tokenizer):
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True)
sequence = 'lower newer'
tokens = tokenizer.tokenize(sequence, add_prefix_space=True)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=True)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True)
ids = tokenizer.encode(sequence, add_prefix_space=True)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
input_tokens = (tokens + [rust_tokenizer.unk_token])
input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
def test_pretokenized_inputs(self, *args, **kwargs):
pass
def test_padding(self, max_length=15):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
s = 'This is a simple input'
s2 = ['This is a simple input 1', 'This is a simple input 2']
p = ('This is a simple input', 'This is a pair')
p2 = [('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2')]
self.assertRaises(ValueError, tokenizer_r.encode, s, max_length=max_length, padding='max_length')
self.assertRaises(ValueError, tokenizer_r.encode_plus, s, max_length=max_length, padding='max_length')
self.assertRaises(ValueError, tokenizer_r.batch_encode_plus, s2, max_length=max_length, padding='max_length')
self.assertRaises(ValueError, tokenizer_r.encode, p, max_length=max_length, padding='max_length')
self.assertRaises(ValueError, tokenizer_r.encode_plus, p, max_length=max_length, padding='max_length')
self.assertRaises(ValueError, tokenizer_r.batch_encode_plus, p2, max_length=max_length, padding='max_length')
def test_padding_different_model_input_name(self):
pass |
.register('DetNASNet-RCNN')
def build_detnasnet_fpn_backbone(cfg):
in_channels_stage2 = cfg.MODEL.HNASNET.FILTER_MULTIPLIER
in_channels_list = [(in_channels_stage2 * s) for s in cfg.MODEL.HNASNET.STRIDE_MULTIPLIER[1:]]
in_channels_list = ([0] + in_channels_list)
body = DetNASNet(cfg)
out_channels = cfg.MODEL.HNASNET.BACKBONE_OUT_CHANNELS
fpn = fpn_module.Scaler(in_channels_list=in_channels_list, out_channels=out_channels, conv_block=conv_with_kaiming_uniform(cfg.MODEL.FPN.USE_GN, cfg.MODEL.FPN.USE_RELU, cfg.MODEL.FPN.USE_DEFORMABLE, cfg.MODEL.FPN.USE_BN), top_blocks=fpn_module.LastLevelMaxPool())
model = nn.Sequential(OrderedDict([('body', body), ('fpn', fpn)]))
model.out_channels = out_channels
return model |
def train(rank, world_size, opt):
torch.manual_seed(0)
setup(rank, world_size, opt.port)
torch.cuda.set_device(rank)
device = torch.device(rank)
curriculum = getattr(curriculums, opt.curriculum)
metadata = curriculums.extract_metadata(curriculum, 0)
fixed_z = z_sampler((25, 256), device='cpu', dist=metadata['z_dist'])
ldist = LSampler(device=device, dataset=metadata['dataset'], mvn_path=(metadata['mvn_path'] if ('mvn_path' in metadata) else None))
fixed_l = ldist.sample(25)
SIREN = getattr(siren, metadata['model'])
scaler = torch.cuda.amp.GradScaler()
generator = getattr(generators, metadata['generator'])(SIREN, metadata['latent_dim'], metadata['shading'], metadata['view_condition'], metadata['light_condition'], metadata['surf_track'], ldist=ldist).to(device)
discriminator = getattr(discriminators, metadata['discriminator'])().to(device)
ema = ExponentialMovingAverage(generator.parameters(), decay=0.999)
ema2 = ExponentialMovingAverage(generator.parameters(), decay=0.9999)
if (opt.load_dir != ''):
generator.load_state_dict(torch.load((opt.load_dir + 'generator.pth'), map_location=device), strict=False)
discriminator.load_state_dict(torch.load((opt.load_dir + 'discriminator.pth'), map_location=device), strict=False)
if os.path.isfile((opt.load_dir + 'ema.pth')):
ema = torch.load((opt.load_dir + 'ema.pth'), map_location=device)
ema2 = torch.load((opt.load_dir + 'ema2.pth'), map_location=device)
generator_ddp = DDP(generator, device_ids=[rank], find_unused_parameters=True)
discriminator_ddp = DDP(discriminator, device_ids=[rank], find_unused_parameters=True, broadcast_buffers=False)
generator = generator_ddp.module
discriminator = discriminator_ddp.module
if metadata['surf_track']:
perceptual_loss = PerceptualLoss(model='net-lin', net='vgg', use_gpu=True, gpu_ids=[device])
if metadata.get('unique_lr', False):
mapping_network_param_names = [name for (name, _) in generator_ddp.module.siren.mapping_network.named_parameters()]
mapping_network_parameters = [p for (n, p) in generator_ddp.named_parameters() if (n in mapping_network_param_names)]
if metadata['surf_track']:
surfacenet_param_names = [name for (name, _) in generator_ddp.module.surfacenet.named_parameters()]
surfacenet_parameters = [p for (n, p) in generator_ddp.named_parameters() if (n in surfacenet_param_names)]
generator_parameters = [p for (n, p) in generator_ddp.named_parameters() if (n not in [(mapping_network_param_names + surfacenet_param_names)])]
optimizer_G = torch.optim.Adam([{'params': generator_parameters, 'name': 'generator'}, {'params': surfacenet_parameters, 'name': 'surfacenet', 'lr': (metadata['gen_lr'] * 10)}, {'params': mapping_network_parameters, 'name': 'mapping_network', 'lr': (metadata['gen_lr'] * 0.05)}], lr=metadata['gen_lr'], betas=metadata['betas'], weight_decay=metadata['weight_decay'])
else:
generator_parameters = [p for (n, p) in generator_ddp.named_parameters() if (n not in mapping_network_param_names)]
optimizer_G = torch.optim.Adam([{'params': generator_parameters, 'name': 'generator'}, {'params': mapping_network_parameters, 'name': 'mapping_network', 'lr': (metadata['gen_lr'] * 0.05)}], lr=metadata['gen_lr'], betas=metadata['betas'], weight_decay=metadata['weight_decay'])
else:
optimizer_G = torch.optim.Adam(generator_ddp.parameters(), lr=metadata['gen_lr'], betas=metadata['betas'], weight_decay=metadata['weight_decay'])
optimizer_D = torch.optim.Adam(discriminator_ddp.parameters(), lr=metadata['disc_lr'], betas=metadata['betas'], weight_decay=metadata['weight_decay'])
if (opt.load_dir != ''):
if os.path.isfile((opt.load_dir + 'optimizer_G.pth')):
optimizer_G.load_state_dict(torch.load((opt.load_dir + 'optimizer_G.pth'), map_location=device))
optimizer_D.load_state_dict(torch.load((opt.load_dir + 'optimizer_D.pth'), map_location=device))
if (not metadata.get('disable_scaler', False)):
scaler.load_state_dict(torch.load((opt.load_dir + 'scaler.pth'), map_location=device))
generator_losses = []
discriminator_losses = []
if (opt.set_step != None):
generator.step = opt.set_step
discriminator.step = opt.set_step
if metadata.get('disable_scaler', False):
scaler = torch.cuda.amp.GradScaler(enabled=False)
generator.set_device(device)
with open(os.path.join(opt.output_dir, 'options.txt'), 'w') as f:
f.write(str(opt))
f.write('\n\n')
f.write(str(generator))
f.write('\n\n')
f.write(str(discriminator))
f.write('\n\n')
f.write(str(curriculum))
torch.manual_seed(rank)
dataloader = None
total_progress_bar = tqdm(total=opt.n_epochs, desc='Total progress', dynamic_ncols=True)
total_progress_bar.update(discriminator.epoch)
interior_step_bar = tqdm(dynamic_ncols=True)
t0 = time.time()
for _ in range(opt.n_epochs):
total_progress_bar.update(1)
metadata = curriculums.extract_metadata(curriculum, discriminator.step)
if ((not dataloader) or (dataloader.batch_size != metadata['batch_size'])):
(dataloader, CHANNELS) = datasets.get_dataset_distributed(metadata['dataset'], world_size, rank, **metadata)
step_next_upsample = curriculums.next_upsample_step(curriculum, discriminator.step)
step_last_upsample = curriculums.last_upsample_step(curriculum, discriminator.step)
interior_step_bar.reset(total=(step_next_upsample - step_last_upsample))
interior_step_bar.set_description(f'Progress to next stage')
interior_step_bar.update((discriminator.step - step_last_upsample))
for (i, (imgs, _)) in enumerate(dataloader):
if (((discriminator.step % opt.model_save_interval) == 0) and (rank == 0)):
now = datetime.now()
now = now.strftime('%d--%H:%M--')
torch.save(ema, os.path.join(opt.output_dir, (now + 'ema.pth')))
torch.save(ema2, os.path.join(opt.output_dir, (now + 'ema2.pth')))
torch.save(generator_ddp.module.state_dict(), os.path.join(opt.output_dir, (now + 'generator.pth')))
torch.save(discriminator_ddp.module.state_dict(), os.path.join(opt.output_dir, (now + 'discriminator.pth')))
torch.save(optimizer_G.state_dict(), os.path.join(opt.output_dir, (now + 'optimizer_G.pth')))
torch.save(optimizer_D.state_dict(), os.path.join(opt.output_dir, (now + 'optimizer_D.pth')))
torch.save(scaler.state_dict(), os.path.join(opt.output_dir, (now + 'scaler.pth')))
metadata = curriculums.extract_metadata(curriculum, discriminator.step)
if (dataloader.batch_size != metadata['batch_size']):
break
if metadata['surf_track']:
(metadata['delta'], metadata['num_steps']) = rendering_scheduler(discriminator.step, **metadata)
lr_ratio = (metadata['num_steps'] / metadata['num_steps_max'])
else:
metadata['delta'] = (- 1)
lr_ratio = 1
for param_group in optimizer_G.param_groups:
if (param_group.get('name', None) == 'mapping_network'):
param_group['lr'] = ((metadata['gen_lr'] * 0.05) * lr_ratio)
elif (param_group.get('name', None) == 'surfacenet'):
param_group['lr'] = (metadata['gen_lr'] * 10)
else:
param_group['lr'] = (metadata['gen_lr'] * lr_ratio)
param_group['betas'] = metadata['betas']
param_group['weight_decay'] = metadata['weight_decay']
for param_group in optimizer_D.param_groups:
param_group['lr'] = metadata['disc_lr']
param_group['betas'] = metadata['betas']
param_group['weight_decay'] = metadata['weight_decay']
if (scaler.get_scale() < 1):
scaler.update(1.0)
generator_ddp.train()
discriminator_ddp.train()
alpha = min(1, ((discriminator.step - step_last_upsample) / metadata['fade_steps']))
real_imgs = imgs.to(device, non_blocking=True)
metadata['nerf_noise'] = max(0, (1.0 - (discriminator.step / 5000.0)))
metadata['l_ratio'] = act_scheduler(discriminator.step)
with torch.cuda.amp.autocast():
with torch.no_grad():
z = z_sampler((real_imgs.shape[0], metadata['latent_dim']), device=device, dist=metadata['z_dist'])
l = ldist.sample(real_imgs.shape[0])
split_batch_size = (z.shape[0] // metadata['batch_split'])
gen_imgs = []
gen_positions = []
for split in range(metadata['batch_split']):
subset_z = z[(split * split_batch_size):((split + 1) * split_batch_size)]
subset_l = l[(split * split_batch_size):((split + 1) * split_batch_size)]
results = generator_ddp(subset_z, subset_l, **metadata)
(g_imgs, g_pos) = (results['rgb'], results['pose'])
gen_imgs.append(g_imgs)
gen_positions.append(g_pos)
gen_imgs = torch.cat(gen_imgs, axis=0)
gen_positions = torch.cat(gen_positions, axis=0)
real_imgs.requires_grad = True
(r_preds, _, _) = discriminator_ddp(real_imgs, alpha, **metadata)
if (metadata['r1_lambda'] > 0):
grad_real = torch.autograd.grad(outputs=scaler.scale(r_preds.sum()), inputs=real_imgs, create_graph=True)
inv_scale = (1.0 / scaler.get_scale())
grad_real = [(p * inv_scale) for p in grad_real][0]
with torch.cuda.amp.autocast():
if (metadata['r1_lambda'] > 0):
grad_penalty = (grad_real.view(grad_real.size(0), (- 1)).norm(2, dim=1) ** 2).mean()
grad_penalty = ((0.5 * metadata['r1_lambda']) * grad_penalty)
else:
grad_penalty = 0
(g_preds, g_pred_latent, g_pred_position) = discriminator_ddp(gen_imgs, alpha, **metadata)
if ((metadata['z_lambda'] > 0) or (metadata['pos_lambda'] > 0)):
latent_penalty = (torch.nn.MSELoss()(g_pred_latent, z) * metadata['z_lambda'])
position_penalty = (torch.nn.MSELoss()(g_pred_position, gen_positions) * metadata['pos_lambda'])
identity_penalty = (latent_penalty + position_penalty)
else:
identity_penalty = 0
d_loss = (((torch.nn.functional.softplus(g_preds).mean() + torch.nn.functional.softplus((- r_preds)).mean()) + grad_penalty) + identity_penalty)
discriminator_losses.append(d_loss.item())
optimizer_D.zero_grad()
scaler.scale(d_loss).backward()
scaler.unscale_(optimizer_D)
torch.nn.utils.clip_grad_norm_(discriminator_ddp.parameters(), metadata['grad_clip'])
scaler.step(optimizer_D)
z = z_sampler((imgs.shape[0], metadata['latent_dim']), device=device, dist=metadata['z_dist'])
l = ldist.sample(imgs.shape[0])
split_batch_size = (z.shape[0] // metadata['batch_split'])
for split in range(metadata['batch_split']):
with torch.cuda.amp.autocast():
subset_z = z[(split * split_batch_size):((split + 1) * split_batch_size)]
subset_l = l[(split * split_batch_size):((split + 1) * split_batch_size)]
results = generator_ddp(subset_z, subset_l, **metadata)
(gen_imgs, gen_positions) = (results['rgb'], results['pose'])
(depth_pred, depth) = (results['depth_pred'], results['depth'].detach())
(g_preds, g_pred_latent, g_pred_position) = discriminator_ddp(gen_imgs, alpha, **metadata)
topk_percentage = (max((0.99 ** (discriminator.step / metadata['topk_interval'])), metadata['topk_v']) if (('topk_interval' in metadata) and ('topk_v' in metadata)) else 1)
topk_num = math.ceil((topk_percentage * g_preds.shape[0]))
g_preds = torch.topk(g_preds, topk_num, dim=0).values
if ((metadata['z_lambda'] > 0) or (metadata['pos_lambda'] > 0)):
latent_penalty = (torch.nn.MSELoss()(g_pred_latent, subset_z) * metadata['z_lambda'])
position_penalty = (torch.nn.MSELoss()(g_pred_position, gen_positions) * metadata['pos_lambda'])
identity_penalty = (latent_penalty + position_penalty)
else:
identity_penalty = 0
if metadata['surf_track']:
depth_pred_norm = ((((depth_pred - metadata['ray_start']) / (metadata['ray_end'] - metadata['ray_start'])) * 2) - 1)
depth_norm = ((((depth - metadata['ray_start']) / (metadata['ray_end'] - metadata['ray_start'])) * 2) - 1)
depth_loss = (F.l1_loss(depth_pred_norm, depth_norm) + torch.mean(perceptual_loss(depth_pred_norm.unsqueeze(1).repeat(1, 3, 1, 1), depth_norm.unsqueeze(1).repeat(1, 3, 1, 1))))
else:
depth_loss = torch.zeros(1).to(device)
g_loss = (torch.nn.functional.softplus((- g_preds)).mean() + identity_penalty)
generator_losses.append(g_loss.item())
scaler.scale((g_loss + depth_loss)).backward()
scaler.unscale_(optimizer_G)
torch.nn.utils.clip_grad_norm_(generator_ddp.parameters(), metadata.get('grad_clip', 0.3))
scaler.step(optimizer_G)
scaler.update()
optimizer_G.zero_grad()
ema.update(generator_ddp.parameters())
ema2.update(generator_ddp.parameters())
if (rank == 0):
interior_step_bar.update(1)
if ((i % 10) == 0):
tqdm.write(f"Epoch: {discriminator.epoch} Step: {discriminator.step} D: {d_loss.item():.3f} G: {g_loss.item():.3f} Depth: {depth_loss.item():.3f} STD_r: {results['depth_std'].mean().item():.4f} Delta: {metadata['delta']:.4f} Num step: {metadata['num_steps']} Alpha: {alpha:.2f} Img: {metadata['img_size']} Batch: {metadata['batch_size']} TopK: {topk_num} Scale: {scaler.get_scale()}")
if ((discriminator.step % opt.sample_interval) == 0):
generator_ddp.eval()
with torch.no_grad():
with torch.cuda.amp.autocast():
copied_metadata = copy.deepcopy(metadata)
copied_metadata['h_stddev'] = copied_metadata['v_stddev'] = 0
copied_metadata['img_size'] = 128
results = generator_ddp.module.staged_forward(fixed_z.to(device), fixed_l, **copied_metadata)
save_image(((results['rgb'][:25] / 2) + 0.5), os.path.join(opt.output_dir, f'{discriminator.step}_fixed.jpg'), nrow=5, normalize=False)
save_image(results['depth'][:25].unsqueeze(1), os.path.join(opt.output_dir, f'{discriminator.step}_fixed_depth.jpg'), nrow=5, normalize=True)
if metadata['surf_track']:
save_image(results['depth_pred'][:25].unsqueeze(1), os.path.join(opt.output_dir, f'{discriminator.step}_fixed_depth_pred.jpg'), nrow=5, normalize=True)
if metadata['shading']:
save_image(((results['normal'][:25] / 2) + 0.5), os.path.join(opt.output_dir, f'{discriminator.step}_fixed_normal.jpg'), nrow=5, normalize=False)
save_image(results['shading'][:25].unsqueeze(1), os.path.join(opt.output_dir, f'{discriminator.step}_fixed_shading.jpg'), nrow=5, normalize=False)
save_image(results['shading'][:25].unsqueeze(1), os.path.join(opt.output_dir, f'{discriminator.step}_fixed_shading2.jpg'), nrow=5, normalize=True)
save_image(((results['albedo'][:25] / 2) + 0.5), os.path.join(opt.output_dir, f'{discriminator.step}_fixed_albedo.jpg'), nrow=5, normalize=False)
with torch.no_grad():
with torch.cuda.amp.autocast():
copied_metadata = copy.deepcopy(metadata)
copied_metadata['h_stddev'] = copied_metadata['v_stddev'] = 0
copied_metadata['h_mean'] += 0.5
copied_metadata['img_size'] = 128
gen_imgs = generator_ddp.module.staged_forward(fixed_z.to(device), fixed_l, **copied_metadata)['rgb']
save_image(((gen_imgs[:25] / 2) + 0.5), os.path.join(opt.output_dir, f'{discriminator.step}_tilted.jpg'), nrow=5, normalize=False)
ema.store(generator_ddp.parameters())
ema.copy_to(generator_ddp.parameters())
generator_ddp.eval()
with torch.no_grad():
with torch.cuda.amp.autocast():
copied_metadata = copy.deepcopy(metadata)
copied_metadata['h_stddev'] = copied_metadata['v_stddev'] = 0
copied_metadata['img_size'] = 128
gen_imgs = generator_ddp.module.staged_forward(fixed_z.to(device), fixed_l, **copied_metadata)['rgb']
save_image(((gen_imgs[:25] / 2) + 0.5), os.path.join(opt.output_dir, f'{discriminator.step}_fixed_ema.jpg'), nrow=5, normalize=False)
with torch.no_grad():
with torch.cuda.amp.autocast():
copied_metadata = copy.deepcopy(metadata)
copied_metadata['h_stddev'] = copied_metadata['v_stddev'] = 0
copied_metadata['h_mean'] += 0.5
copied_metadata['img_size'] = 128
gen_imgs = generator_ddp.module.staged_forward(fixed_z.to(device), fixed_l, **copied_metadata)['rgb']
save_image(((gen_imgs[:25] / 2) + 0.5), os.path.join(opt.output_dir, f'{discriminator.step}_tilted_ema.jpg'), nrow=5, normalize=False)
with torch.no_grad():
with torch.cuda.amp.autocast():
copied_metadata = copy.deepcopy(metadata)
copied_metadata['img_size'] = 128
copied_metadata['h_stddev'] = copied_metadata['v_stddev'] = 0
copied_metadata['psi'] = 0.7
gen_imgs = generator_ddp.module.staged_forward(torch.randn_like(fixed_z).to(device), fixed_l, **copied_metadata)['rgb']
save_image(((gen_imgs[:25] / 2) + 0.5), os.path.join(opt.output_dir, f'{discriminator.step}_random.jpg'), nrow=5, normalize=False)
ema.restore(generator_ddp.parameters())
if ((discriminator.step % opt.sample_interval) == 0):
torch.save(ema, os.path.join(opt.output_dir, 'ema.pth'))
torch.save(ema2, os.path.join(opt.output_dir, 'ema2.pth'))
torch.save(generator_ddp.module.state_dict(), os.path.join(opt.output_dir, 'generator.pth'))
torch.save(discriminator_ddp.module.state_dict(), os.path.join(opt.output_dir, 'discriminator.pth'))
torch.save(optimizer_G.state_dict(), os.path.join(opt.output_dir, 'optimizer_G.pth'))
torch.save(optimizer_D.state_dict(), os.path.join(opt.output_dir, 'optimizer_D.pth'))
torch.save(scaler.state_dict(), os.path.join(opt.output_dir, 'scaler.pth'))
torch.save(generator_losses, os.path.join(opt.output_dir, 'generator.losses'))
torch.save(discriminator_losses, os.path.join(opt.output_dir, 'discriminator.losses'))
if ((opt.eval_freq > 0) and ((discriminator.step % opt.eval_freq) == 0)):
generated_dir = os.path.join(opt.output_dir, 'evaluation/generated')
if (rank == 0):
fid_evaluation.setup_evaluation(metadata['dataset'], metadata['dataset_path'], generated_dir, target_size=128)
dist.barrier()
ema.store(generator_ddp.parameters())
ema.copy_to(generator_ddp.parameters())
generator_ddp.eval()
fid_evaluation.output_images(generator_ddp, metadata, rank, world_size, generated_dir)
ema.restore(generator_ddp.parameters())
dist.barrier()
if (rank == 0):
fid = fid_evaluation.calculate_fid(metadata['dataset'], generated_dir, target_size=128)
with open(os.path.join(opt.output_dir, f'fid.txt'), 'a') as f:
f.write(f'''
{discriminator.step}:{fid}''')
with open(os.path.join(opt.output_dir, f'time.txt'), 'a') as f:
f.write(f'''
{discriminator.step}:{((time.time() - t0) / 3600)}''')
with open(os.path.join(opt.output_dir, f'render.txt'), 'a') as f:
(delta, numstep) = (metadata['delta'], metadata['num_steps'])
f.write(f'''
{discriminator.step}:{delta} {numstep}''')
torch.cuda.empty_cache()
discriminator.step += 1
generator.step += 1
discriminator.epoch += 1
generator.epoch += 1
cleanup() |
def load_images(images_file_path, batch_size, resize_size=256, is_train=True, crop_size=224, is_cen=False, num_workers=4):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if (not is_train):
start_center = (((resize_size - crop_size) - 1) / 2)
transformer = transforms.Compose([ResizeImage(resize_size), PlaceCrop(crop_size, start_center, start_center), transforms.ToTensor(), normalize])
images = ImageList(open(images_file_path).readlines(), transform=transformer)
images_loader = util_data.DataLoader(images, batch_size=batch_size, shuffle=False, num_workers=num_workers, drop_last=True)
else:
if is_cen:
transformer = transforms.Compose([ResizeImage(resize_size), transforms.Scale(resize_size), transforms.RandomHorizontalFlip(), transforms.CenterCrop(crop_size), transforms.ToTensor(), normalize])
else:
transformer = transforms.Compose([ResizeImage(resize_size), transforms.RandomResizedCrop(crop_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize])
images = ImageList(open(images_file_path).readlines(), transform=transformer)
images_loader = util_data.DataLoader(images, batch_size=batch_size, shuffle=True, num_workers=num_workers, drop_last=True)
return images_loader |
class CutPaste(object):
def __init__(self, transform=True, type='binary'):
self.type = type
if transform:
self.transform = transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1)
else:
self.transform = None
def crop_and_paste_patch(image, patch_w, patch_h, transform, rotation=False):
(org_w, org_h) = image.size
mask = None
(patch_left, patch_top) = (random.randint(0, (org_w - patch_w)), random.randint(0, (org_h - patch_h)))
(patch_right, patch_bottom) = ((patch_left + patch_w), (patch_top + patch_h))
patch = image.crop((patch_left, patch_top, patch_right, patch_bottom))
if transform:
patch = transform(patch)
if rotation:
random_rotate = random.uniform(*rotation)
patch = patch.convert('RGBA').rotate(random_rotate, expand=True)
mask = patch.split()[(- 1)]
(paste_left, paste_top) = (random.randint(0, (org_w - patch_w)), random.randint(0, (org_h - patch_h)))
aug_image = image.copy()
aug_image.paste(patch, (paste_left, paste_top), mask=mask)
return aug_image
def cutpaste(self, image, area_ratio=(0.02, 0.15), aspect_ratio=((0.3, 1), (1, 3.3))):
img_area = (image.size[0] * image.size[1])
patch_area = (random.uniform(*area_ratio) * img_area)
patch_aspect = random.choice([random.uniform(*aspect_ratio[0]), random.uniform(*aspect_ratio[1])])
patch_w = int(np.sqrt((patch_area * patch_aspect)))
patch_h = int(np.sqrt((patch_area / patch_aspect)))
cutpaste = self.crop_and_paste_patch(image, patch_w, patch_h, self.transform, rotation=False)
return cutpaste
def cutpaste_scar(self, image, width=[1, 8], length=[5, 13], rotation=((- 45), 45)):
(patch_w, patch_h) = (random.randint(*width), random.randint(*length))
cutpaste_scar = self.crop_and_paste_patch(image, patch_w, patch_h, self.transform, rotation=rotation)
return cutpaste_scar
def __call__(self, image):
if (self.type == 'binary'):
aug = random.choice([self.cutpaste, self.cutpaste_scar])
return (image, aug(image))
elif (self.type == '3way'):
cutpaste = self.cutpaste(image)
scar = self.cutpaste_scar(image)
return (image, cutpaste, scar) |
def get_transformers_submodules():
submodules = []
for (path, directories, files) in os.walk(PATH_TO_TRANSFORMERS):
for folder in directories:
if folder.startswith('_'):
directories.remove(folder)
continue
if (len(list((Path(path) / folder).glob('*.py'))) == 0):
continue
short_path = str((Path(path) / folder).relative_to(PATH_TO_TRANSFORMERS))
submodule = short_path.replace(os.path.sep, '.')
submodules.append(submodule)
for fname in files:
if (fname == '__init__.py'):
continue
short_path = str((Path(path) / fname).relative_to(PATH_TO_TRANSFORMERS))
submodule = short_path.replace(os.path.sep, '.').replace('.py', '')
if (len(submodule.split('.')) == 1):
submodules.append(submodule)
return submodules |
def f(x):
dist = space.pairwise_dist(x, space.id.view((- 1), *space.id.shape)).squeeze()
return torch.sin(dist) |
def main(base_model='ise-uiuc/Magicoder-S-DS-6.7B', device='cuda:0', port=8080):
tokenizer = AutoTokenizer.from_pretrained(base_model)
pipeline = transformers.pipeline('text-generation', model=base_model, torch_dtype=torch.float16, device=device)
def evaluate_magicoder(instruction, temperature=1, max_new_tokens=2048):
MAGICODER_PROMPT = 'You are an exceptionally intelligent coding assistant that consistently delivers accurate and reliable responses to user instructions.\n\ Instruction\n{instruction}\n\ Response\n'
prompt = MAGICODER_PROMPT.format(instruction=instruction)
if (temperature > 0):
sequences = pipeline(prompt, do_sample=True, temperature=temperature, max_new_tokens=max_new_tokens)
else:
sequences = pipeline(prompt, max_new_tokens=max_new_tokens)
for seq in sequences:
print('question')
print(prompt)
generated_text = seq['generated_text'].replace(prompt, '')
print('answer')
print(generated_text)
return generated_text
gr.Interface(fn=evaluate_magicoder, inputs=[gr.components.Textbox(lines=3, label='Instruction', placeholder='Anything you want to ask Magicoder ?'), gr.components.Slider(minimum=0, maximum=1, value=1, label='Temperature'), gr.components.Slider(minimum=1, maximum=2048, step=1, value=512, label='Max tokens')], outputs=[gr.components.Textbox(lines=30, label='Output')], title='Magicoder', description='This is a LLM playground for Magicoder! Follow us on Github: and Huggingface: server_port=port) |
def json_to_dataframe(path: str, layer_name: str, max_C: int=128, prefix: str='') -> pd.DataFrame:
files = glob.glob((path + '/*.json'))
regex = f'{layer_name}_.+\.json'
pattern = re.compile(regex)
files_res = [x for x in files if pattern.search(x)]
dfs = []
for file in files_res:
data = pd.read_json(file)
if ((layer_name + '.learned_n') not in data.columns):
data[(layer_name + '.learned_n')] = data.iloc[0][(layer_name + '.learned_a_shape')]
data[(layer_name + '.learned_d')] = data.iloc[1][(layer_name + '.learned_a_shape')]
K = data.iloc[0][(layer_name + '.K')]
C = data.iloc[0][(layer_name + '.C')]
data[(layer_name + '.learned_m')] = int((data.iloc[0][(layer_name + '.L_size')] / ((4 * K) * C)))
C = data.iloc[0][(layer_name + '.C')]
if (C > max_C):
continue
if ((layer_name + '.learned_a_shape') in data.columns):
data = data.drop([1])
data = data.drop(columns=[(layer_name + '.learned_a_shape'), (layer_name + '.learned_b_shape')])
data['hue_string'] = (prefix + str(C))
data['test_name'] = ((layer_name + '-') + str(data.iloc[0][(layer_name + '.C')]))
data['layer_name'] = (layer_name + (' (3x3)' if ('conv2' in layer_name) else ' (1x1)'))
data['row_name'] = layer_name.split('.')[0]
data['col_name'] = layer_name[(len(layer_name.split('.')[0]) + 1):]
dfs.append(data)
df = pd.concat(dfs, ignore_index=True)
df = df.drop(columns='halut_layers')
df['top_1_accuracy_100'] = (df['top_1_accuracy'] * 100)
final_dfs = []
for C in [16, 32, 64]:
df_C = df[(df[(layer_name + '.C')] == C)]
df_C.sort_values(by=['top_1_accuracy'], inplace=True, ignore_index=True, ascending=False)
final_dfs.append(df_C.iloc[[0]])
df = pd.concat(final_dfs, ignore_index=True)
df.columns = df.columns.str.replace((layer_name + '.'), '')
return df |
def main():
tf.set_random_seed(10)
with tf.Session() as sess:
rnn_cell = tf.nn.rnn_cell.LSTMCell(10)
initial_state = rnn_cell.zero_state(4, dtype=tf.float32)
inputs = tf.Variable(tf.random_uniform(shape=(4, 30, 100)), name='input')
inputs = tf.identity(inputs, 'input_node')
(outputs, state) = tf.nn.dynamic_rnn(rnn_cell, inputs, initial_state=initial_state, dtype=tf.float32)
y1 = tf.identity(outputs, 'outputs')
y2 = tf.identity(state, 'state')
t1 = tf.ones([4, 30, 10])
t2 = tf.ones([4, 10])
loss = (tf.reduce_sum(((y1 - t1) * (y1 - t1))) + tf.reduce_sum(((y2 - t2) * (y2 - t2))))
tf.identity(loss, name='lstm_loss')
net_outputs = map((lambda x: tf.get_default_graph().get_tensor_by_name(x)), argv[2].split(','))
run_model(net_outputs, argv[1], None, (argv[3] == 'True')) |
class TextSearchSolver(object):
def __init__(self, host: str='localhost', port: int=9200, index_name: str='knowledge', field_name: str='body', topn: int=1) -> None:
self.client = Elasticsearch([host], port=port)
print(self.client)
self.fields = [field_name]
self.index_name = index_name
self.topn = topn
def score(self, question_stem: str, choice_text: str):
query_text = '{0} {1}'.format(question_stem, choice_text)
query = Q('multi_match', query=query_text, fields=self.fields)
search = Search(using=self.client, index=self.index_name).query(query)[:self.topn]
response = search.execute()
return (sum((hit.meta.score for hit in response)), [self.client.get(index=self.index_name, doc_type='sentence', id=hit.meta.id)['_source'][self.fields[0]] for hit in response])
def solver_info(self) -> str:
return 'text_search'
def answer_question(self, question: MultipleChoiceQuestion) -> MultipleChoiceAnswerwithContext:
return MultipleChoiceAnswerwithContext([ChoiceConfidenceContext(choice, *self.score(question.stem, choice.text)) for choice in question.choices]) |
def conv1x1_block(in_channels, out_channels, stride=1, padding=0, groups=1, bias=False, use_bn=True, bn_eps=1e-05, activation=(lambda : nn.ReLU(inplace=True))):
return ConvBlock(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=padding, groups=groups, bias=bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation) |
def preprocess_opts(parser):
group = parser.add_argument_group('Data')
group.add_argument('-train_dir', required=True, help='Path to the training data')
group.add_argument('-valid_dir', required=True, help='Path to the validation data')
group.add_argument('-data_type', choices=['concat', 'query', 'hier', 'concat_sent'], help='concat: p <q> q <a> a <q> q <a> a <a> current_answer,\n query: p <q> q <a> a <q> q <a> a <a>, current_answer,\n hier: p, q..., a..., current_answer,\n concat_sent: sent q <a> a <q> q <a> a <a> current_answer,\n ')
group.add_argument('-save_data', required=True, help='Output file for the prepared data')
group = parser.add_argument_group('CoQG')
group.add_argument('-src_nfeats', type=int, default=0, help='Num. of src features')
group.add_argument('-tgt_nfeats', type=int, default=0, help='Num. of tgt features')
group = parser.add_argument_group('Vocab')
group.add_argument('-src_vocab', default='', help='Path to an existing source vocabulary. Format:\n one word per line.')
group.add_argument('-tgt_vocab', default='', help='Path to an existing target vocabulary. Format:\n one word per line.')
group.add_argument('-features_vocabs_prefix', type=str, default='', help='Path prefix to existing features vocabularies')
group.add_argument('-src_vocab_size', type=int, default=50000, help='Size of the source vocabulary')
group.add_argument('-tgt_vocab_size', type=int, default=50000, help='Size of the target vocabulary')
group.add_argument('-src_words_min_frequency', type=int, default=0)
group.add_argument('-tgt_words_min_frequency', type=int, default=0)
group.add_argument('-dynamic_dict', action='store_true', help='Create dynamic dictionaries')
group.add_argument('-share_vocab', action='store_true', help='Share source and target vocabulary')
group = parser.add_argument_group('Pruning')
group.add_argument('-seq_length', type=int, default=50, help='Maximum source sequence length')
group.add_argument('-seq_length_trunc', type=int, default=0, help='Truncate source sequence length.')
group.add_argument('-lower', action='store_true', help='lowercase data')
group = parser.add_argument_group('Random')
group.add_argument('-shuffle', type=int, default=1, help='Shuffle data')
group.add_argument('-seed', type=int, default=3435, help='Random seed')
group = parser.add_argument_group('Logging')
group.add_argument('-report_every', type=int, default=100000, help='Report status every this many sentences')
group.add_argument('-log_file', type=str, default='', help='Output logs to a file under this path.') |
def env_from_checkpoint(ckpt_path=None, ckpt_dict=None, env_name=None, render=False, render_offscreen=False, verbose=False, bddl_file_name=None):
ckpt_dict = maybe_dict_from_checkpoint(ckpt_path=ckpt_path, ckpt_dict=ckpt_dict)
env_meta = ckpt_dict['env_metadata']
shape_meta = ckpt_dict['shape_metadata']
if (bddl_file_name is not None):
env_meta['env_kwargs']['bddl_file_name'] = bddl_file_name
env = EnvUtils.create_env_from_metadata(env_meta=env_meta, render=render, render_offscreen=render_offscreen, use_image_obs=shape_meta['use_images'])
if verbose:
print(' Loaded Environment ')
print(env)
return (env, ckpt_dict) |
def test_call_marginalizevlos():
from galpy.orbit import Orbit
idf = dehnendf(beta=0.0)
pot = [LogarithmicHaloPotential(normalize=1.0), EllipticalDiskPotential(twophio=0.001)]
edf = evolveddiskdf(idf, pot=pot[0], to=(- 10.0))
(R, phi, vT) = (0.8, 0.0, 0.7)
vrs = numpy.linspace((- 1.0), 1.0, 101)
pvrs = numpy.array([edf(Orbit([R, vr, vT, phi]), integrate_method='rk6_c') for vr in vrs])
assert (numpy.fabs((numpy.log((numpy.sum(pvrs) * (vrs[1] - vrs[0]))) - edf(Orbit([R, 0.0, vT, phi]), marginalizeVlos=True, integrate_method='rk6_c', log=True))) < (10.0 ** (- 4.0))), 'diskdf call w/ marginalizeVlos does not work'
edf = evolveddiskdf(idf, pot=pot, to=(- 10.0))
(R, phi, vR) = (numpy.sin((numpy.pi / 6.0)), ((- numpy.pi) / 3.0), 0.4)
vts = numpy.linspace(0.3, 1.5, 101)
pvts = numpy.array([edf(Orbit([R, vR, vt, phi]), integrate_method='rk6_c') for vt in vts])
assert (numpy.fabs(((numpy.sum(pvts) * (vts[1] - vts[0])) - edf(Orbit([R, vR, 0.0, phi]), marginalizeVlos=True, integrate_method='rk6_c', nsigma=4))) < (10.0 ** (- 3.5))), 'diskdf call w/ marginalizeVlos does not work'
return None |
class Node():
def __init__(self, prob, symbol, left=None, right=None):
self.prob = prob
self.symbol = symbol
self.left = left
self.right = right
self.code = '' |
class SymbolicEncoder(nn.Module):
def __init__(self, observation_size, embedding_size, activation_function='relu'):
super().__init__()
self.act_fn = getattr(F, activation_function)
self.fc1 = nn.Linear(observation_size, embedding_size)
self.fc2 = nn.Linear(embedding_size, embedding_size)
self.fc3 = nn.Linear(embedding_size, embedding_size)
def forward(self, observation):
hidden = self.act_fn(self.fc1(observation))
hidden = self.act_fn(self.fc2(hidden))
hidden = self.fc3(hidden)
return hidden |
class ToyGPT2Model(GPT2Model):
def __init__(self, hidden_size=256, head_num=4, layer_num=3, seq_length=512):
config = GPT2Config()
c_s = f'n_embd={hidden_size},n_head={head_num},n_layer={layer_num},n_positions={seq_length}'
config.update_from_string(c_s)
super().__init__(config)
self.config = config
self.lm_head = torch.nn.Linear(config.n_embd, config.vocab_size, bias=False)
def forward(self, input_ids=None, past_key_values=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None):
transformer_outputs = super().forward(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
return lm_logits
def vocab_size(self):
return self.config.vocab_size |
def build_tracker(name='max_box', *args, **kwargs):
if (name == 'max_box'):
from .max_box_tracker import MaxBoxTracker
tracker = MaxBoxTracker()
else:
raise ValueError(f'{name} is not valid, currently it only supports {VALID_TRACKERS}')
return tracker |
def test_amuse_MiyamotoNagaiPotential():
mp = potential.MiyamotoNagaiPotential(normalize=1.0, a=0.5, b=0.1)
tmax = 4.0
(vo, ro) = (220.0, 8.0)
o = Orbit([1.0, 0.1, 1.1, 0.3, 0.1, 0.4], ro=ro, vo=vo)
run_orbitIntegration_comparison(o, mp, tmax, vo, ro)
return None |
def apply_model_ema_and_restore(model, state=None):
model = _remove_ddp(model)
if (state is None):
state = get_model_ema_state(model)
old_state = EMAState.FromModel(model, state.device)
state.apply_to(model)
(yield old_state)
old_state.apply_to(model) |
def trades_loss(model, x_natural, y, optimizer, device, step_size=0.003, epsilon=0.031, perturb_steps=10, beta=1.0, distance='l_inf'):
criterion_kl = nn.KLDivLoss(size_average=False)
model.eval()
batch_size = len(x_natural)
x_adv = (x_natural.detach() + (0.001 * torch.randn(x_natural.shape).cuda().detach().to(device)))
if (distance == 'l_inf'):
for _ in range(perturb_steps):
x_adv.requires_grad_()
with torch.enable_grad():
loss_kl = criterion_kl(F.log_softmax(model(x_adv), dim=1), F.softmax(model(x_natural), dim=1))
grad = torch.autograd.grad(loss_kl, [x_adv])[0]
x_adv = (x_adv.detach() + (step_size * torch.sign(grad.detach())))
x_adv = torch.min(torch.max(x_adv, (x_natural - epsilon)), (x_natural + epsilon))
x_adv = torch.clamp(x_adv, 0.0, 1.0)
elif (distance == 'l_2'):
for _ in range(perturb_steps):
x_adv.requires_grad_()
with torch.enable_grad():
loss_kl = criterion_kl(F.log_softmax(model(x_adv), dim=1), F.softmax(model(x_natural), dim=1))
grad = torch.autograd.grad(loss_kl, [x_adv])[0]
for idx_batch in range(batch_size):
grad_idx = grad[idx_batch]
grad_idx_norm = l2_norm(grad_idx)
grad_idx /= (grad_idx_norm + 1e-08)
x_adv[idx_batch] = (x_adv[idx_batch].detach() + (step_size * grad_idx))
eta_x_adv = (x_adv[idx_batch] - x_natural[idx_batch])
norm_eta = l2_norm(eta_x_adv)
if (norm_eta > epsilon):
eta_x_adv = ((eta_x_adv * epsilon) / l2_norm(eta_x_adv))
x_adv[idx_batch] = (x_natural[idx_batch] + eta_x_adv)
x_adv = torch.clamp(x_adv, 0.0, 1.0)
else:
x_adv = torch.clamp(x_adv, 0.0, 1.0)
model.train()
x_adv = Variable(torch.clamp(x_adv, 0.0, 1.0), requires_grad=False)
optimizer.zero_grad()
logits = model(x_natural)
adv_logits = model(x_adv)
loss_natural = F.cross_entropy(logits, y)
loss_robust = ((1.0 / batch_size) * criterion_kl(F.log_softmax(adv_logits, dim=1), F.softmax(logits, dim=1)))
loss = (loss_natural + (beta * loss_robust))
cleanacc = torch_accuracy(logits, y, (1,))[0].item()
tradesacc = torch_accuracy(adv_logits, y, (1,))[0].item()
return (loss, loss_natural.item(), loss_robust.item(), cleanacc, tradesacc) |
class iCNN(FlowNetwork):
def __init__(self, num_classes=10, k=16):
super().__init__()
self.num_classes = num_classes
self.k = k
self.body = iSequential(RandomPadChannels((k - 3)), *iCoordSelu(k), *iCoordSelu(k), *iCoordSelu(k), NNdownsample(), *iCoordSelu((4 * k)), *iCoordSelu((4 * k)), *iCoordSelu((4 * k)), NNdownsample(), *iCoordSelu((16 * k)), *iCoordSelu((16 * k)), iConv2d((16 * k), (16 * k)))
self.classifier_head = nn.Sequential(nn.BatchNorm2d((16 * k)), Expression((lambda u: u.mean((- 1)).mean((- 1)))), nn.Linear((16 * k), num_classes))
self.flow = iSequential(self.body, Flatten())
self.prior = StandardNormal(((k * 32) * 32)) |
class BaselineUNet(nn.Module):
def __init__(self, in_channels, n_cls, n_filters):
super(BaselineUNet, self).__init__()
self.in_channels = in_channels
self.n_cls = (1 if (n_cls == 2) else n_cls)
self.n_filters = n_filters
self.block_1_1_left = BasicConv3d(in_channels, n_filters, kernel_size=3, stride=1, padding=1)
self.block_1_2_left = BasicConv3d(n_filters, n_filters, kernel_size=3, stride=1, padding=1)
self.pool_1 = nn.MaxPool3d(kernel_size=2, stride=2)
self.block_2_1_left = BasicConv3d(n_filters, (2 * n_filters), kernel_size=3, stride=1, padding=1)
self.block_2_2_left = BasicConv3d((2 * n_filters), (2 * n_filters), kernel_size=3, stride=1, padding=1)
self.pool_2 = nn.MaxPool3d(kernel_size=2, stride=2)
self.block_3_1_left = BasicConv3d((2 * n_filters), (4 * n_filters), kernel_size=3, stride=1, padding=1)
self.block_3_2_left = BasicConv3d((4 * n_filters), (4 * n_filters), kernel_size=3, stride=1, padding=1)
self.pool_3 = nn.MaxPool3d(kernel_size=2, stride=2)
self.block_4_1_left = BasicConv3d((4 * n_filters), (8 * n_filters), kernel_size=3, stride=1, padding=1)
self.block_4_2_left = BasicConv3d((8 * n_filters), (8 * n_filters), kernel_size=3, stride=1, padding=1)
self.upconv_3 = nn.ConvTranspose3d((8 * n_filters), (4 * n_filters), kernel_size=3, stride=2, padding=1, output_padding=1)
self.block_3_1_right = BasicConv3d(((4 + 4) * n_filters), (4 * n_filters), kernel_size=3, stride=1, padding=1)
self.block_3_2_right = BasicConv3d((4 * n_filters), (4 * n_filters), kernel_size=3, stride=1, padding=1)
self.upconv_2 = nn.ConvTranspose3d((4 * n_filters), (2 * n_filters), kernel_size=3, stride=2, padding=1, output_padding=1)
self.block_2_1_right = BasicConv3d(((2 + 2) * n_filters), (2 * n_filters), kernel_size=3, stride=1, padding=1)
self.block_2_2_right = BasicConv3d((2 * n_filters), (2 * n_filters), kernel_size=3, stride=1, padding=1)
self.upconv_1 = nn.ConvTranspose3d((2 * n_filters), n_filters, kernel_size=3, stride=2, padding=1, output_padding=1)
self.block_1_1_right = BasicConv3d(((1 + 1) * n_filters), n_filters, kernel_size=3, stride=1, padding=1)
self.block_1_2_right = BasicConv3d(n_filters, n_filters, kernel_size=3, stride=1, padding=1)
self.conv1x1 = nn.Conv3d(n_filters, self.n_cls, kernel_size=1, stride=1, padding=0)
def forward(self, x):
ds0 = self.block_1_2_left(self.block_1_1_left(x))
ds1 = self.block_2_2_left(self.block_2_1_left(self.pool_1(ds0)))
ds2 = self.block_3_2_left(self.block_3_1_left(self.pool_2(ds1)))
x = self.block_4_2_left(self.block_4_1_left(self.pool_3(ds2)))
x = self.block_3_2_right(self.block_3_1_right(torch.cat([self.upconv_3(x), ds2], 1)))
x = self.block_2_2_right(self.block_2_1_right(torch.cat([self.upconv_2(x), ds1], 1)))
x = self.block_1_2_right(self.block_1_1_right(torch.cat([self.upconv_1(x), ds0], 1)))
x = self.conv1x1(x)
if (self.n_cls == 1):
return torch.sigmoid(x)
else:
return F.softmax(x, dim=1) |
class meta_ops():
def upload(cls, meta, grid=None, grid_url=None):
grid_id = _api_v2.parse_grid_id_args(grid, grid_url)
payload = {'metadata': json.dumps(meta, cls=utils.PlotlyJSONEncoder)}
api_url = (_api_v2.api_url('grids') + '/{grid_id}'.format(grid_id=grid_id))
res = requests.patch(api_url, data=payload, headers=_api_v2.headers(), verify=get_config()['plotly_ssl_verification'])
return _api_v2.response_handler(res) |
class StrikerEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
utils.EzPickle.__init__(self)
self._striked = False
self._min_strike_dist = np.inf
self.strike_threshold = 0.1
mujoco_env.MujocoEnv.__init__(self, 'striker.xml', 5)
def step(self, a):
vec_1 = (self.get_body_com('object') - self.get_body_com('tips_arm'))
vec_2 = (self.get_body_com('object') - self.get_body_com('goal'))
self._min_strike_dist = min(self._min_strike_dist, np.linalg.norm(vec_2))
if (np.linalg.norm(vec_1) < self.strike_threshold):
self._striked = True
self._strike_pos = self.get_body_com('tips_arm')
if self._striked:
vec_3 = (self.get_body_com('object') - self._strike_pos)
reward_near = (- np.linalg.norm(vec_3))
else:
reward_near = (- np.linalg.norm(vec_1))
reward_dist = (- np.linalg.norm(self._min_strike_dist))
reward_ctrl = (- np.square(a).sum())
reward = (((3 * reward_dist) + (0.1 * reward_ctrl)) + (0.5 * reward_near))
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
done = False
return (ob, reward, done, dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl))
def viewer_setup(self):
self.viewer.cam.trackbodyid = 0
self.viewer.cam.distance = 4.0
def reset_model(self):
self._min_strike_dist = np.inf
self._striked = False
self._strike_pos = None
qpos = self.init_qpos
self.ball = np.array([0.5, (- 0.175)])
while True:
self.goal = np.concatenate([self.np_random.uniform(low=0.15, high=0.7, size=1), self.np_random.uniform(low=0.1, high=1.0, size=1)])
if (np.linalg.norm((self.ball - self.goal)) > 0.17):
break
qpos[(- 9):(- 7)] = [self.ball[1], self.ball[0]]
qpos[(- 7):(- 5)] = self.goal
diff = (self.ball - self.goal)
angle = (- np.arctan((diff[0] / (diff[1] + 1e-08))))
qpos[(- 1)] = (angle / 3.14)
qvel = (self.init_qvel + self.np_random.uniform(low=(- 0.1), high=0.1, size=self.model.nv))
qvel[7:] = 0
self.set_state(qpos, qvel)
return self._get_obs()
def _get_obs(self):
return np.concatenate([self.sim.data.qpos.flat[:7], self.sim.data.qvel.flat[:7], self.get_body_com('tips_arm'), self.get_body_com('object'), self.get_body_com('goal')]) |
class TestIMSATLoss(TestCase):
def setUp(self) -> None:
super().setUp()
self.pred_log = torch.randn(200, 10)
def test_multinformation_imsat(self):
criterion = MultualInformaton_IMSAT(mu=1.0)
(MI, _) = criterion(self.pred_log)
assert (MI > 0), f'MI should be aways positive, given {MI.item()}' |
_config
def scratch():
uuid = 'habitat_scratch_map'
cfg = {}
cfg['learner'] = {'perception_network': 'AtariNet'}
cfg['env'] = {'env_specific_kwargs': {'target_dim': 9}, 'transform_fn_pre_aggregation_fn': 'TransformFactory.independent', 'transform_fn_pre_aggregation_kwargs': {'names_to_transforms': {'rgb_filled': 'rescale_centercrop_resize((3,84,84))'}}} |
def run_generate(verbose=True):
parser = argparse.ArgumentParser()
parser.add_argument('model_name', type=str, help='like facebook/bart-large-cnn,t5-base, etc.')
parser.add_argument('input_path', type=str, help='like cnn_dm/test.source')
parser.add_argument('save_path', type=str, help='where to save summaries')
parser.add_argument('--reference_path', type=str, required=False, help='like cnn_dm/test.target')
parser.add_argument('--score_path', type=str, required=False, default='metrics.json', help='where to save metrics')
parser.add_argument('--device', type=str, required=False, default=DEFAULT_DEVICE, help='cuda, cuda:1, cpu etc.')
parser.add_argument('--prefix', type=str, required=False, default=None, help='will be added to the begininng of src examples')
parser.add_argument('--task', type=str, default='summarization', help='used for task_specific_params + metrics')
parser.add_argument('--bs', type=int, default=8, required=False, help='batch size')
parser.add_argument('--n_obs', type=int, default=(- 1), required=False, help='How many observations. Defaults to all.')
parser.add_argument('--fp16', action='store_true')
parser.add_argument('--dump-args', action='store_true', help='print the custom hparams with the results')
parser.add_argument('--info', nargs='?', type=str, const=datetime_now(), help="use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g. lang=en-ru. If no value is passed, the current datetime string will be used.")
(args, rest) = parser.parse_known_args()
parsed_args = parse_numeric_n_bool_cl_kwargs(rest)
if (parsed_args and verbose):
print(f'parsed the following generate kwargs: {parsed_args}')
examples = [((' ' + x.rstrip()) if ('t5' in args.model_name) else x.rstrip()) for x in open(args.input_path).readlines()]
if (args.n_obs > 0):
examples = examples[:args.n_obs]
Path(args.save_path).parent.mkdir(exist_ok=True)
if ((args.reference_path is None) and Path(args.score_path).exists()):
warnings.warn(f'score_path {args.score_path} will be overwritten unless you type ctrl-c.')
runtime_metrics = generate_summaries_or_translations(examples, args.save_path, args.model_name, batch_size=args.bs, device=args.device, fp16=args.fp16, task=args.task, prefix=args.prefix, **parsed_args)
if (args.reference_path is None):
return {}
score_fn = (calculate_bleu if ('translation' in args.task) else calculate_rouge)
output_lns = [x.rstrip() for x in open(args.save_path).readlines()]
reference_lns = [x.rstrip() for x in open(args.reference_path).readlines()][:len(output_lns)]
scores: dict = score_fn(output_lns, reference_lns)
scores.update(runtime_metrics)
if args.dump_args:
scores.update(parsed_args)
if args.info:
scores['info'] = args.info
if verbose:
print(scores)
if (args.score_path is not None):
json.dump(scores, open(args.score_path, 'w'))
return scores |
def train(rank, model, criterion, optimizer, scheduler, batch_meter, comm_meter, train_loader_list, test_loader_list, epoch, device, ue_list_epoches, G, user_weight_diff_array):
average_model_weights = copy.deepcopy(model.state_dict())
average_group_model_weights = copy.deepcopy(model.state_dict())
model.train()
WD_list = []
top1 = util.Meter(ptag='')
iter_time = time.time()
accum_steps = 1
iteration = 0
while (iteration < args.iteration):
ue_list = ue_list_epoches[epoch][iteration]
user_id = ue_list[rank]
(groups, server_list) = get_groups(args)
if args.user_semi:
loader = zip(train_loader_list[user_id][0], train_loader_list[user_id][1])
test_loader = test_loader_list[0]
if (args.eval_grad and ((epoch % args.epoch_interval) == 0)):
group_id = Get_group_num(args, groups, rank)
checkpoint_weights = util_1.Load_Avg_model_checkpoint(args.experiment_folder, args.experiment_name, epoch, prefix=f'after_g{(group_id + 1)}')
model.load_state_dict(checkpoint_weights, strict=False)
elif args.H:
if (user_id in set(server_list)):
loader = train_loader_list[0]
test_loader = test_loader_list[0]
else:
loader = train_loader_list[user_id]
test_loader = test_loader_list[0]
if (args.eval_grad and ((epoch % args.epoch_interval) == 0)):
group_id = Get_group_num(args, groups, rank)
checkpoint_weights = util_1.Load_Avg_model_checkpoint(args.experiment_folder, args.experiment_name, epoch, prefix=f'after_g{(group_id + 1)}')
model.load_state_dict(checkpoint_weights, strict=False)
else:
loader = train_loader_list[user_id]
test_loader = test_loader_list[0]
if (args.eval_grad and ((epoch % args.epoch_interval) == 0)):
group_id = Get_group_num(args, groups, rank)
checkpoint_weights = util_1.Load_Avg_model_checkpoint(args.experiment_folder, args.experiment_name, epoch, prefix=f'before')
model.load_state_dict(checkpoint_weights, strict=False)
while 1:
break_flag = False
train_loss = 0
loss_steps = 0
train_mask = 0
for (batch_idx, data) in enumerate(loader):
if args.user_semi:
(data_x, data_u) = data
(inputs_x, targets_x) = data_x
((inputs_u_w, inputs_u_s), _) = data_u
batch_size = inputs_x.shape[0]
inputs = torch.cat((inputs_x, inputs_u_w, inputs_u_s)).to(device)
targets_x = targets_x.to(device)
logits = model(inputs)
logits_x = logits[:batch_size]
logits_u_w = logits[batch_size:]
del logits
Lx = F.cross_entropy(logits_x, targets_x, reduction='mean')
pseudo_label = torch.softmax(logits_u_w.detach(), dim=(- 1))
(max_probs, targets_u) = torch.max(pseudo_label, dim=(- 1))
if (not args.eval_grad):
mask = max_probs.ge(0.95).float()
else:
mask = max_probs.ge(args.tao).float()
train_mask += max_probs.ge(0.95).float().sum().item()
Lu = (F.cross_entropy(logits_u_w, targets_u, reduction='none') * mask).mean()
loss = (Lx + Lu)
elif (user_id in set(server_list)):
(inputs_x, targets_x) = data
inputs_x = inputs_x.to(device)
targets_x = targets_x.to(device)
output = model(inputs_x)
loss = criterion(output, targets_x)
elif args.labeled:
((inputs_u_w, inputs_u_s), target_labels) = data
inputs_x = inputs_u_w.to(device)
targets_x = target_labels.to(device)
output = model(inputs_x)
loss = criterion(output, targets_x)
else:
if (args.ue_loss == 'CRL'):
((inputs_u_w, inputs_u_s), _) = data
inputs = torch.cat((inputs_u_w, inputs_u_s)).to(device)
logits = model(inputs)
(logits_u_w, logits_u_s) = logits.chunk(2)
del logits
pseudo_label = torch.softmax(logits_u_w.detach_(), dim=(- 1))
(max_probs, targets_u) = torch.max(pseudo_label, dim=(- 1))
if (not args.eval_grad):
mask = max_probs.ge(0.95).float()
else:
mask = max_probs.ge(args.tao).float()
train_mask += max_probs.ge(0.95).float().sum().item()
loss = (F.cross_entropy(logits_u_s, targets_u, reduction='none') * mask).mean()
train_loss += loss.item()
loss_steps += 1
if (args.ue_loss == 'SF'):
(inputs_x, targets_x) = data
inputs = inputs_x.to(device)
model.eval()
with torch.no_grad():
logits = model(inputs)
pseudo_label = torch.softmax(logits.detach_(), dim=(- 1))
(max_probs, targets_u) = torch.max(pseudo_label, dim=(- 1))
if (not args.eval_grad):
mask = max_probs.ge(0.95).float()
else:
mask = max_probs.ge(args.tao).float()
train_mask += max_probs.ge(0.95).float().sum().item()
model.train()
output = model(inputs)
loss = (F.cross_entropy(output, targets_u, reduction='none') * mask).mean()
loss.backward()
if (not args.eval_grad):
optimizer.step()
optimizer.zero_grad()
scheduler.step()
if (not args.eval_grad):
if ((iteration != 0) and (((iteration % args.cp) * accum_steps) == 0)):
if (((epoch % args.epoch_interval) == 0) or (epoch == (args.epoch - 1))):
util_1.Save_model_checkpoint(args.experiment_name, model, rank, epoch)
group_id = Get_group_num(args, groups, rank)
save_each_group_avg_model(args, average_group_model_weights, epoch, rank, rank_save=groups[group_id][0], prefix=f'before_g{(group_id + 1)}')
if (rank == 0):
util_1.Save_Avg_model_checkpoint(args.experiment_name, average_model_weights, rank, epoch, prefix='before')
if args.user_semi:
if args.H:
ue_list = ue_list[0:args.num_comm_ue]
group1_size = (len(ue_list) // 2)
group1 = np.array(ue_list)[np.arange(0, group1_size).tolist()].tolist()
group2 = np.array(ue_list)[np.arange(group1_size, len(ue_list)).tolist()].tolist()
if (rank < (len(ue_list) // 2)):
SyncAllreduce_1(model, rank, size=len(group1), group=G[0])
else:
SyncAllreduce_1(model, rank, size=len(group2), group=G[1])
if ((rank == 0) or (rank == (args.num_rank - 1))):
SyncAllreduce_1(model, rank, size=len(groups[(- 1)]), group=G[(- 1)])
else:
SyncAllreduce(model, rank, args.num_rank)
elif args.H:
average_group_model_weights = Grouping_Avg(args, model, rank, G, groups, epoch)
else:
SyncAllreduce(model, rank, args.num_rank)
average_model_weights = copy.deepcopy(model.state_dict())
if (((epoch % args.epoch_interval) == 0) or (epoch == (args.epoch - 1))):
if (rank == 0):
util_1.Save_Avg_model_checkpoint(args.experiment_name, average_model_weights, rank, epoch, prefix='after')
iteration += 1
break_flag = True
break
iteration += 1
if args.eval_grad:
print(f'save grad. of the whole DataLoader of UE {user_id}')
Save_model_grad_checkpoint(args.experiment_folder, args.experiment_name, model, rank, epoch, args.tao)
values = {'train_loss': train_loss, 'train_mask': train_mask, 'len_loader': len(loader)}
print(epoch, rank, values)
Save_train_state(args.experiment_folder, args.experiment_name, rank, epoch, values, args.tao)
break
if break_flag:
break
return (user_id, WD_list, user_weight_diff_array) |
def train_seco(epoch, train_loader, model, model_ema, contrast, criterion, optimizer, scheduler, args):
model.train()
set_bn_train(model_ema)
batch_time = AverageMeter()
loss_meter = AverageMeter()
timer = mmcv.Timer()
for (idx, (xq, x1, x2, x3, binary_order)) in enumerate(train_loader):
xq = xq.cuda(non_blocking=True)
x1 = x1.cuda(non_blocking=True)
x2 = x2.cuda(non_blocking=True)
x3 = x3.cuda(non_blocking=True)
binary_order = binary_order.cuda(non_blocking=True)
with torch.no_grad():
(x1_shuffled, x1_backward_inds) = DistributedShuffle.forward_shuffle(x1)
(x2_shuffled, x2_backward_inds) = DistributedShuffle.forward_shuffle(x2)
(x3_shuffled, x3_backward_inds) = DistributedShuffle.forward_shuffle(x3)
(x1_feat_inter, x1_feat_intra, x1_feat_order) = model_ema(x1_shuffled)
(x2_feat_inter, x2_feat_intra, x2_feat_order) = model_ema(x2_shuffled)
(x3_feat_inter, x3_feat_intra, x3_feat_order) = model_ema(x3_shuffled)
(x1_feat_inter_all, x1_feat_inter) = DistributedShuffle.backward_shuffle(x1_feat_inter, x1_backward_inds)
(x1_feat_intra_all, x1_feat_intra) = DistributedShuffle.backward_shuffle(x1_feat_intra, x1_backward_inds)
(x2_feat_inter_all, x2_feat_inter) = DistributedShuffle.backward_shuffle(x2_feat_inter, x2_backward_inds)
(x2_feat_intra_all, x2_feat_intra) = DistributedShuffle.backward_shuffle(x2_feat_intra, x2_backward_inds)
(x2_feat_order_all, x2_feat_order) = DistributedShuffle.backward_shuffle(x2_feat_order, x2_backward_inds)
(x3_feat_inter_all, x3_feat_inter) = DistributedShuffle.backward_shuffle(x3_feat_inter, x3_backward_inds)
(x3_feat_intra_all, x3_feat_intra) = DistributedShuffle.backward_shuffle(x3_feat_intra, x3_backward_inds)
(x3_feat_order_all, x3_feat_order) = DistributedShuffle.backward_shuffle(x3_feat_order, x3_backward_inds)
(xq_feat_inter, xq_feat_intra, xq_feat_order, xq_logit_order) = model(xq, order_feat=torch.cat([x2_feat_order.detach(), x3_feat_order.detach()], dim=1))
out_inter = contrast(xq_feat_inter, x1_feat_inter, x2_feat_inter, x3_feat_inter, torch.cat([x1_feat_inter_all, x2_feat_inter_all, x3_feat_inter_all], dim=0), inter=True)
out_intra = contrast(xq_feat_intra, x1_feat_intra, x2_feat_intra, x3_feat_intra, None, inter=False)
loss_inter = criterion(out_inter)
loss_intra = criterion(out_intra)
loss_order = torch.nn.functional.cross_entropy(xq_logit_order, binary_order)
loss = ((loss_inter + loss_intra) + loss_order)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
moment_update(model, model_ema, args.alpha)
loss_meter.update(loss.item())
batch_time.update(timer.since_last_check())
if ((idx % args.print_freq) == 0):
logger.info('Train: [{:>3d}]/[{:>4d}/{:>4d}] BT={:>0.3f}/{:>0.3f} Loss={:>0.3f} {:>0.3f} {:>0.3f} {:>0.3f}/{:>0.3f}'.format(epoch, idx, len(train_loader), batch_time.val, batch_time.avg, loss.item(), loss_inter.item(), loss_intra.item(), loss_order.item(), loss_meter.avg))
return loss_meter.avg |
def download_and_prepare(root):
train = STL10(root, split='train', download=True)
test = STL10(root, split='test')
unlabeled = STL10(root, split='unlabeled')
train_dir = osp.join(root, 'train')
test_dir = osp.join(root, 'test')
unlabeled_dir = osp.join(root, 'unlabeled')
extract_and_save_image(train, train_dir)
extract_and_save_image(test, test_dir)
extract_and_save_image(unlabeled, unlabeled_dir) |
class DAGMM(nn.Module):
def __init__(self, feats):
super(DAGMM, self).__init__()
self.name = 'DAGMM'
self.lr = 0.0001
self.beta = 0.01
self.n_feats = feats
self.n_hidden = 16
self.n_latent = 8
self.n_window = 5
self.n = (self.n_feats * self.n_window)
self.n_gmm = (self.n_feats * self.n_window)
self.encoder = nn.Sequential(nn.Linear(self.n, self.n_hidden), nn.Tanh(), nn.Linear(self.n_hidden, self.n_hidden), nn.Tanh(), nn.Linear(self.n_hidden, self.n_latent))
self.decoder = nn.Sequential(nn.Linear(self.n_latent, self.n_hidden), nn.Tanh(), nn.Linear(self.n_hidden, self.n_hidden), nn.Tanh(), nn.Linear(self.n_hidden, self.n), nn.Sigmoid())
self.estimate = nn.Sequential(nn.Linear((self.n_latent + 2), self.n_hidden), nn.Tanh(), nn.Dropout(0.5), nn.Linear(self.n_hidden, self.n_gmm), nn.Softmax(dim=1))
def compute_reconstruction(self, x, x_hat):
relative_euclidean_distance = ((x - x_hat).norm(2, dim=1) / x.norm(2, dim=1))
cosine_similarity = F.cosine_similarity(x, x_hat, dim=1)
return (relative_euclidean_distance, cosine_similarity)
def forward(self, x):
x = x.view(1, (- 1))
z_c = self.encoder(x)
x_hat = self.decoder(z_c)
(rec_1, rec_2) = self.compute_reconstruction(x, x_hat)
z = torch.cat([z_c, rec_1.unsqueeze((- 1)), rec_2.unsqueeze((- 1))], dim=1)
gamma = self.estimate(z)
return (z_c, x_hat.view((- 1)), z, gamma.view((- 1))) |
class nnUNetTrainerV2CascadeFullRes_noConnComp(nnUNetTrainerV2CascadeFullRes):
def setup_DA_params(self):
super().setup_DA_params()
self.data_aug_params['cascade_do_cascade_augmentations'] = True
self.data_aug_params['cascade_random_binary_transform_p'] = 0.4
self.data_aug_params['cascade_random_binary_transform_p_per_label'] = 1
self.data_aug_params['cascade_random_binary_transform_size'] = (1, 8)
self.data_aug_params['cascade_remove_conn_comp_p'] = 0.0
self.data_aug_params['cascade_remove_conn_comp_max_size_percent_threshold'] = 0.15
self.data_aug_params['cascade_remove_conn_comp_fill_with_other_class_p'] = 0.0 |
def _manual_inverting(X, rcond=0.001, full_rank=False):
X = np.asarray(X)
(n_samples, n_features) = X.shape
if (n_samples != n_features):
raise ValueError('The matrix is not a square matrix')
(U, s, V) = np.linalg.svd(X, full_matrices=False)
rank = np.sum((s > (rcond * s.max())))
s_inv = np.zeros(np.size(s))
s_inv[:rank] = (1 / s[:rank])
if full_rank:
s_inv[rank:] = (1 / (rcond * s.max()))
X_inv = np.linalg.multi_dot([U, np.diag(s_inv), V])
return X_inv |
class BaseDataLoader():
def __init__(self, config):
self.config = config
self.batch_size = config['data_loader']['batch_size']
self.shuffle = config['data_loader']['shuffle']
self.num_workers = config['data_loader']['workers']
self.batch_idx = 0
def __iter__(self):
assert (self.__len__() > 0)
self.batch_idx = 0
if self.shuffle:
self._shuffle_data()
return self
def __next__(self):
packed = self._pack_data()
if (self.batch_idx < self.__len__()):
batch = packed[(self.batch_idx * self.batch_size):((self.batch_idx + 1) * self.batch_size)]
self.batch_idx = (self.batch_idx + 1)
return self._unpack_data(batch)
else:
raise StopIteration
def __len__(self):
return (self._n_samples() // self.batch_size)
def _n_samples(self):
return NotImplementedError
def _pack_data(self):
return NotImplementedError
def _unpack_data(self, packed):
return NotImplementedError
def _update_data(self, unpacked):
return NotImplementedError
def _shuffle_data(self):
packed = self._pack_data()
rand_idx = np.random.permutation(len(packed))
packed = [packed[i] for i in rand_idx]
self._update_data(self._unpack_data(packed))
def split_validation(self):
validation_split = self.config['validation']['validation_split']
shuffle = self.config['validation']['shuffle']
if (validation_split == 0.0):
return None
if shuffle:
self._shuffle_data()
valid_data_loader = copy(self)
split = int((self._n_samples() * validation_split))
packed = self._pack_data()
train_data = self._unpack_data(packed[split:])
val_data = self._unpack_data(packed[:split])
valid_data_loader._update_data(val_data)
self._update_data(train_data)
return valid_data_loader |
def show_feature_map(feature_map, feature_data, i):
feature_map = feature_map.squeeze(0)
unsample = torch.nn.UpsamplingBilinear2d(size=(64, 64))
feature_data = torch.sum(feature_data, 2)
feature_data = unsample(feature_data)
feature_data = np.array(feature_data.cpu())
feature_data = feature_data.squeeze(0)
feature_data = feature_data.squeeze(0)
feature_map = torch.sum(feature_map, 0)
feature_map = feature_map.view(1, 1, feature_map.shape[0], feature_map.shape[1])
feature_map = unsample(feature_map)
feature_map = np.array(feature_map.cpu())
feature_map = feature_map.squeeze(0)
feature_map = feature_map.squeeze(0)
img_add = cv2.addWeighted(feature_data, 0.7, feature_map, 0.3, 0)
plt.figure()
plt.imshow(img_add, cmap='jet')
plt.axis('off')
plt.show()
plt.imsave((('./feature_map_save/' + str(i)) + '.svg'), img_add)
plt.imsave((('./feature_map_save/' + str(i)) + '_origin.svg'), feature_data) |
def pause(interval: float):
backend = plt.rcParams['backend']
if (backend in matplotlib.rcsetup.interactive_bk):
figManager = matplotlib._pylab_helpers.Gcf.get_active()
if (figManager is not None):
canvas = figManager.canvas
if canvas.figure.stale:
canvas.draw()
canvas.start_event_loop(interval)
return |
class LegacyFairseqTask(FairseqTask):
def __init__(self, args: Namespace):
self.args = args
self.datasets = {}
self.dataset_to_epoch_iter = {}
def setup_task(cls, args: Namespace, **kwargs):
return cls(args, **kwargs)
def has_sharded_data(self, split):
return (os.pathsep in getattr(self.args, 'data', ''))
def build_model(self, args: Namespace):
from fairseq import models, quantization_utils
model = models.build_model(args, self)
model = quantization_utils.quantize_model_scalar(model, args)
return model
def build_criterion(self, args: Namespace):
from fairseq import criterions
return criterions.build_criterion(args, self) |
def resnet_v2_block(scope, base_depth, num_units, stride, centered_stride=False):
return resnet_utils.Block(scope, bottleneck, (([{'depth': (base_depth * 4), 'depth_bottleneck': base_depth, 'stride': 1}] * (num_units - 1)) + [{'depth': (base_depth * 4), 'depth_bottleneck': base_depth, 'stride': stride, 'centered_stride': centered_stride}])) |
def test_can_add(state: trajectory_queue.TrajectoryQueueState, fake_transition: chex.ArrayTree, max_length: int, add_batch_size: int, add_sequence_length: int, sample_sequence_length: int) -> None:
fake_batch_sequence = get_fake_batch_sequence(fake_transition, add_batch_size, add_sequence_length)
assert ((max_length % add_sequence_length) == 0)
assert ((max_length % sample_sequence_length) == 0)
assert (state.read_index == state.write_index)
assert ((add_sequence_length % sample_sequence_length) == 0)
n_batches_to_write = int(np.ceil((max_length / add_sequence_length)))
for _ in range(n_batches_to_write):
assert (not state.is_full)
assert trajectory_queue.can_add(state, add_sequence_length, max_length)
state = trajectory_queue.add(state, fake_batch_sequence)
assert (not trajectory_queue.can_add(state, add_sequence_length, max_length))
assert state.is_full
num_samples = (add_sequence_length // sample_sequence_length)
for _ in range(num_samples):
assert trajectory_queue.can_sample(state, sample_sequence_length, max_length)
(state, sample) = trajectory_queue.sample(state, sample_sequence_length)
assert (state.read_index > state.write_index)
assert trajectory_queue.can_add(state, add_sequence_length, max_length)
while trajectory_queue.can_add(state, add_sequence_length, max_length):
state = trajectory_queue.add(state, fake_batch_sequence)
assert (state.write_index <= state.read_index)
while trajectory_queue.can_sample(state, sample_sequence_length, max_length):
(state, sample) = trajectory_queue.sample(state, sample_sequence_length)
assert (state.read_index <= state.write_index)
assert trajectory_queue.can_add(state, add_sequence_length, max_length)
while trajectory_queue.can_add(state, add_sequence_length, max_length):
state = trajectory_queue.add(state, fake_batch_sequence)
assert (state.write_index <= state.read_index) |
_registry(operator_type='PaddingSequence')
class PaddingSequence(Operator):
def __init__(self):
super().__init__()
def set_attr(self, framework, node):
if (framework == 'torch'):
self._attr['dst_shape'] = '-1,1,1,-1'
self._attr['dims'] = 1
self._attr['padding_value'] = node.inputsAt(1).toIValue().item()
del self.input_tensors[1] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.