code stringlengths 101 5.91M |
|---|
class ElvenDagger(BaseDagger):
def __init__(self):
super().__init__('elven dagger', weight=10, damage=D.SingleDice(5), material=M.Wood) |
class OutputMode():
Temp = 1
Calib = 2
Orient = 4
Auxiliary = 8
Position = 16
Velocity = 32
Status = 2048
RAWGPS = 4096
RAW = 16384 |
('AGENT_8')
class AGENT_8(BaseAgent):
type = PolicyType.MLP
features_extractor_class = None
features_extractor_kwargs = None
net_arch = [64, 64, 64, 64, dict(pi=[64, 64], vf=[64, 64])]
activation_fn = nn.ReLU |
class Exponential(JavaValue):
def __init__(self, decay_step, decay_rate, stair_case=False, bigdl_type='float'):
JavaValue.__init__(self, None, bigdl_type, decay_step, decay_rate, stair_case) |
def is_initialized():
cls = (InProcessCommunicator if __use_threads else DistributedCommunicator)
return cls.is_initialized() |
def config_parser():
parser = configargparse.ArgumentParser()
parser.add_argument('--config', is_config_file=True, default='configs/shapenet_cars.txt', help='config file path')
parser.add_argument('--exp_name', type=str, default=None, help='Experiment name, used as folder name for the experiment. If left blank, a name will be auto generated based on the configuration settings.')
parser.add_argument('--data_dir', type=str, help='input data directory')
parser.add_argument('--input_data_glob', type=str, help='glob expression to find raw input files')
parser.add_argument('--split_file', type=str, help='Path to read and write the data split file. Needs to end with ".npz"')
parser.add_argument('--sample_std_dev', action='append', type=float, help='Standard deviations of gaussian samples. Used for displacing surface points to sample the distance field.')
parser.add_argument('--sample_ratio', action='append', type=float, help='Ratio of standard deviations for samples used for training. Needs to have the same len as sample_std with floats between 0-1 and summing to 1.')
parser.add_argument('--bb_min', default=(- 0.5), type=float, help='Training and testing shapes are normalized to be in a common bounding box. This value defines the min value in x,y and z for the bounding box.')
parser.add_argument('--bb_max', default=0.5, type=float, help='Training and testing shapes are normalized to be in a common bounding box. This value defines the max value in x,y and z for the bounding box.')
parser.add_argument('--input_res', type=int, default=256, help='Training and testing shapes are normalized to be in a common bounding box. This value defines the max value in x,y and z for the bounding box.')
parser.add_argument('--num_points', type=int, default=10000, help='Number of points sampled from each ground truth shape.')
parser.add_argument('--num_chunks', type=int, default=1, help='The preprocessing can be distributed over num_chunks multiple machines. For this the raw files are split into num_chunks chunks. Default is preprocessing on a single machine.')
parser.add_argument('--current_chunk', type=int, default=0, help='Tells the script which chunk it should process. Value between 0 till num_chunks-1.')
parser.add_argument('--num_cpus', type=int, default=(- 1), help='Number of cpu cores to use for running the script. Default is -1, that is, using all available cpus.')
parser.add_argument('--class_folders', type=str, default=None, help='If set to None, the split is created by creating a random sample from all input files. If not None, the split is created per class of objects. Objects of the same class need to be in a common parent folder for this. Variable class_folder is interpreted as glob pattern, suffix of data_dir - i.e. data_dir + class_folder, e.g. class_folder="/*/".')
parser_nval = parser.add_mutually_exclusive_group()
parser_nval.add_argument('--n_val', type=int, help='Size of validation set.')
parser_nval.add_argument('--r_val', type=float, default=0.1, help='Relative size of validation set.')
parser_ntest = parser.add_mutually_exclusive_group()
parser_ntest.add_argument('--n_test', type=int, help='Size of test set.')
parser_ntest.add_argument('--r_test', type=float, default=0.2, help='Relative size of test set.')
parser.add_argument('--num_sample_points_generation', type=int, default=50000, help='Number of point samples per object provided to the NDF network during generation. Influences generation speed (larger batches result in faster generation) but also GPU memory usage (higher values need more memory). Tip: choose largest possible value on GPU.')
parser.add_argument('--num_sample_points_training', type=int, default=50000, help='Number of point samples per object provided to the NDF network during training. Influences training speed (larger batches result in shorter epochs) but also GPU memory usage (higher values need more memory). Needs to be balanced with batch_size.')
parser.add_argument('--batch_size', type=int, default=4, help='Number of objects provided to the NDF network in one batch during training. Influences training speed (larger batches result in shorter epochs) but also GPU memory usage (higher values need more memory). Needs to be balanced with num_sample_points_training')
parser.add_argument('--num_epochs', type=int, default=1500, help='Stopping citron for duration of training. Model converges much earlier: model convergence can be checked via tensorboard and is logged within the experiment folder.')
parser.add_argument('--lr', type=float, default=1e-06, help='Learning rate used during training.')
parser.add_argument('--optimizer', type=str, default='Adam', help='Optimizer used during training.')
return parser |
def check_graphviz_support(caller_name):
try:
import graphviz
except ImportError:
raise ImportError(f'{caller_name} requires rdata. Please install pyreadr using `pip install rdata`') |
class GDANET(nn.Module):
def __init__(self):
super(GDANET, self).__init__()
self.bn1 = nn.BatchNorm2d(64, momentum=0.1)
self.bn11 = nn.BatchNorm2d(64, momentum=0.1)
self.bn12 = nn.BatchNorm1d(64, momentum=0.1)
self.bn2 = nn.BatchNorm2d(64, momentum=0.1)
self.bn21 = nn.BatchNorm2d(64, momentum=0.1)
self.bn22 = nn.BatchNorm1d(64, momentum=0.1)
self.bn3 = nn.BatchNorm2d(128, momentum=0.1)
self.bn31 = nn.BatchNorm2d(128, momentum=0.1)
self.bn32 = nn.BatchNorm1d(128, momentum=0.1)
self.bn4 = nn.BatchNorm1d(512, momentum=0.1)
self.conv1 = nn.Sequential(nn.Conv2d(6, 64, kernel_size=1, bias=True), self.bn1)
self.conv11 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1, bias=True), self.bn11)
self.conv12 = nn.Sequential(nn.Conv1d((64 * 2), 64, kernel_size=1, bias=True), self.bn12)
self.conv2 = nn.Sequential(nn.Conv2d((67 * 2), 64, kernel_size=1, bias=True), self.bn2)
self.conv21 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1, bias=True), self.bn21)
self.conv22 = nn.Sequential(nn.Conv1d((64 * 2), 64, kernel_size=1, bias=True), self.bn22)
self.conv3 = nn.Sequential(nn.Conv2d((131 * 2), 128, kernel_size=1, bias=True), self.bn3)
self.conv31 = nn.Sequential(nn.Conv2d(128, 128, kernel_size=1, bias=True), self.bn31)
self.conv32 = nn.Sequential(nn.Conv1d(128, 128, kernel_size=1, bias=True), self.bn32)
self.conv4 = nn.Sequential(nn.Conv1d(256, 512, kernel_size=1, bias=True), self.bn4)
self.SGCAM_1s = SGCAM(64)
self.SGCAM_1g = SGCAM(64)
self.SGCAM_2s = SGCAM(64)
self.SGCAM_2g = SGCAM(64)
self.linear1 = nn.Linear(1024, 512, bias=True)
self.bn6 = nn.BatchNorm1d(512)
self.dp1 = nn.Dropout(p=0.4)
self.linear2 = nn.Linear(512, 256, bias=True)
self.bn7 = nn.BatchNorm1d(256)
self.dp2 = nn.Dropout(p=0.4)
self.linear3 = nn.Linear(256, 40, bias=True)
def forward(self, x):
(B, C, N) = x.size()
x1 = local_operator(x, k=30)
x1 = F.relu(self.conv1(x1))
x1 = F.relu(self.conv11(x1))
x1 = x1.max(dim=(- 1), keepdim=False)[0]
(x1s, x1g) = GDM(x1, M=256)
y1s = self.SGCAM_1s(x1, x1s.transpose(2, 1))
y1g = self.SGCAM_1g(x1, x1g.transpose(2, 1))
z1 = torch.cat([y1s, y1g], 1)
z1 = F.relu(self.conv12(z1))
x1t = torch.cat((x, z1), dim=1)
x2 = local_operator(x1t, k=30)
x2 = F.relu(self.conv2(x2))
x2 = F.relu(self.conv21(x2))
x2 = x2.max(dim=(- 1), keepdim=False)[0]
(x2s, x2g) = GDM(x2, M=256)
y2s = self.SGCAM_2s(x2, x2s.transpose(2, 1))
y2g = self.SGCAM_2g(x2, x2g.transpose(2, 1))
z2 = torch.cat([y2s, y2g], 1)
z2 = F.relu(self.conv22(z2))
x2t = torch.cat((x1t, z2), dim=1)
x3 = local_operator(x2t, k=30)
x3 = F.relu(self.conv3(x3))
x3 = F.relu(self.conv31(x3))
x3 = x3.max(dim=(- 1), keepdim=False)[0]
z3 = F.relu(self.conv32(x3))
x = torch.cat((z1, z2, z3), dim=1)
x = F.relu(self.conv4(x))
x11 = F.adaptive_max_pool1d(x, 1).view(B, (- 1))
x22 = F.adaptive_avg_pool1d(x, 1).view(B, (- 1))
x = torch.cat((x11, x22), 1)
x = F.relu(self.bn6(self.linear1(x)))
x = self.dp1(x)
x = F.relu(self.bn7(self.linear2(x)))
x = self.dp2(x)
x = self.linear3(x)
return x |
def vectorize1(func, args=(), vec_func=False):
if vec_func:
def vfunc(x):
return func(x, *args)
else:
def vfunc(x):
if numpy.isscalar(x):
return func(x, *args)
x = numpy.asarray(x)
y0 = func(x[0], *args)
n = len(x)
dtype = getattr(y0, 'dtype', type(y0))
output = numpy.empty((n,), dtype=dtype)
output[0] = y0
for i in range(1, n):
output[i] = func(x[i], *args)
return output
return vfunc |
def save_model(mean_IOU, best_iou, save_dir, save_prefix, train_loss, test_loss, recall, precision, epoch, net):
if (mean_IOU > best_iou):
save_mIoU_dir = (((('result/' + save_dir) + '/') + save_prefix) + '_best_IoU_IoU.log')
save_other_metric_dir = (((('result/' + save_dir) + '/') + save_prefix) + '_best_IoU_other_metric.log')
now = datetime.now()
dt_string = now.strftime('%d/%m/%Y %H:%M:%S')
best_iou = mean_IOU
save_model_and_result(dt_string, epoch, train_loss, test_loss, best_iou, recall, precision, save_mIoU_dir, save_other_metric_dir)
save_ckpt({'epoch': epoch, 'state_dict': net, 'loss': test_loss, 'mean_IOU': mean_IOU}, save_path=('result/' + save_dir), filename=(((('mIoU_' + '_') + save_prefix) + '_epoch') + '.pth.tar')) |
class TestFGFieldingData():
ALL_DATA_COLUMNS_COUNT = (len(FangraphsFieldingStats.ALL()) + 2)
DEFAULT_MAX_RESULTS = 10
def test_fg_fielding_data(self) -> None:
season = 2019
data = fg_fielding_data(season, max_results=self.DEFAULT_MAX_RESULTS)
assert (data is not None)
assert (not data.empty)
assert (len(data.columns) == (self.ALL_DATA_COLUMNS_COUNT + 1))
assert (len(data.index) == self.DEFAULT_MAX_RESULTS)
seasons = list(set(data['Season']))
assert (len(seasons) == 1)
assert (seasons[0] == season)
def test_fg_fielding_data_future_season(self) -> None:
season = 3000
with pytest.raises(ValueError):
fg_fielding_data(season, max_results=self.DEFAULT_MAX_RESULTS)
def test_fg_fielding_data_end_season(self) -> None:
data = fg_fielding_data(2018, end_season=2019, max_results=self.DEFAULT_MAX_RESULTS)
assert (data is not None)
assert (not data.empty)
assert (len(data.columns) == (self.ALL_DATA_COLUMNS_COUNT + 1))
assert (len(data.index) == self.DEFAULT_MAX_RESULTS)
def test_fg_fielding_data_end_season_no_split_season(self) -> None:
data = fg_fielding_data(2018, end_season=2019, split_seasons=False, max_results=self.DEFAULT_MAX_RESULTS)
assert (data is not None)
assert (not data.empty)
assert (len(data.columns) == self.ALL_DATA_COLUMNS_COUNT)
assert ('Season' not in data.columns)
assert (len(data.index) == self.DEFAULT_MAX_RESULTS)
def test_fg_fielding_data_single_stat_columns(self) -> None:
data = fg_fielding_data(2019, stat_columns='DEF', max_results=self.DEFAULT_MAX_RESULTS)
assert (data is not None)
assert (not data.empty)
assert (len(data.columns) == 6)
assert (len(data.index) == self.DEFAULT_MAX_RESULTS)
def test_fg_fielding_data_multiple_stat_columns(self) -> None:
data = fg_fielding_data(2019, stat_columns=['DEF', 'UZR'], max_results=self.DEFAULT_MAX_RESULTS)
assert (data is not None)
assert (not data.empty)
assert (len(data.columns) == 7)
assert (len(data.index) == self.DEFAULT_MAX_RESULTS)
def test_fg_fielding_data_league(self, assert_frame_not_equal: Callable[(..., bool)]) -> None:
data_al = fg_fielding_data(2019, league='AL', max_results=self.DEFAULT_MAX_RESULTS)
assert (data_al is not None)
assert (not data_al.empty)
assert (len(data_al.columns) == (self.ALL_DATA_COLUMNS_COUNT + 1))
assert (len(data_al.index) == self.DEFAULT_MAX_RESULTS)
data_nl = fg_fielding_data(2019, league='NL', max_results=self.DEFAULT_MAX_RESULTS)
assert (data_nl is not None)
assert (not data_nl.empty)
assert (len(data_nl.columns) == (self.ALL_DATA_COLUMNS_COUNT + 1))
assert (len(data_nl.index) == self.DEFAULT_MAX_RESULTS)
assert assert_frame_not_equal(data_al, data_nl)
def test_fg_fielding_data_qual(self) -> None:
data = fg_fielding_data(2019, qual=1400, max_results=self.DEFAULT_MAX_RESULTS)
assert (data is not None)
assert (not data.empty)
assert (len(data.columns) == (self.ALL_DATA_COLUMNS_COUNT + 1))
assert (len(data.index) == 2)
def test_fg_fielding_data_on_active_roster(self, assert_frame_not_equal: Callable[(..., bool)]) -> None:
data = fg_fielding_data(2018, max_results=self.DEFAULT_MAX_RESULTS)
assert (data is not None)
assert (not data.empty)
assert (len(data.columns) == (self.ALL_DATA_COLUMNS_COUNT + 1))
assert (len(data.index) == self.DEFAULT_MAX_RESULTS)
oar_data = fg_fielding_data(2018, on_active_roster=True, max_results=self.DEFAULT_MAX_RESULTS)
assert (oar_data is not None)
assert (not oar_data.empty)
assert (len(oar_data.columns) == (self.ALL_DATA_COLUMNS_COUNT + 1))
assert (len(oar_data.index) == self.DEFAULT_MAX_RESULTS)
assert_frame_not_equal(data, oar_data)
def test_fg_fielding_team(self, assert_frame_not_equal: Callable[(..., bool)]) -> None:
data_1 = fg_fielding_data(2019, team='1', qual=750, max_results=self.DEFAULT_MAX_RESULTS)
assert (data_1 is not None)
assert (not data_1.empty)
assert (len(data_1.columns) == self.ALL_DATA_COLUMNS_COUNT)
assert ('Team' not in data_1.columns)
assert (len(data_1.index) == 5)
data_2 = fg_fielding_data(2019, team='2', qual=750, max_results=self.DEFAULT_MAX_RESULTS)
assert (data_2 is not None)
assert (not data_2.empty)
assert (len(data_2.columns) == self.ALL_DATA_COLUMNS_COUNT)
assert ('Team' not in data_2.columns)
assert (len(data_2.index) == 3)
assert_frame_not_equal(data_1, data_2)
def test_fg_fielding_position(self, assert_frame_not_equal: Callable[(..., bool)]) -> None:
data_1b = fg_fielding_data(2019, position='1B', max_results=self.DEFAULT_MAX_RESULTS)
assert (data_1b is not None)
assert (not data_1b.empty)
assert (len(data_1b.columns) == (self.ALL_DATA_COLUMNS_COUNT + 1))
assert (len(data_1b.index) == 10)
data_2b = fg_fielding_data(2019, position='2B', max_results=self.DEFAULT_MAX_RESULTS)
assert (data_2b is not None)
assert (not data_2b.empty)
assert (len(data_2b.columns) == (self.ALL_DATA_COLUMNS_COUNT + 1))
assert (len(data_2b.index) == 10)
assert_frame_not_equal(data_1b, data_2b)
def test_fg_fielding_data_max_results(self) -> None:
season = 2019
data = fg_fielding_data(season)
assert (data is not None)
assert (not data.empty)
assert (len(data.columns) == (self.ALL_DATA_COLUMNS_COUNT + 1))
assert (len(data.index) == 107) |
class Trainer():
def __init__(self, args, loader, my_model, my_loss, ckp):
self.args = args
self.scale = args.scale
self.ckp = ckp
self.loader_train = loader.loader_train
self.loader_test = loader.loader_test
self.model = my_model
self.loss = my_loss
self.optimizer = utility.make_optimizer(args, self.model)
if (self.args.load != ''):
self.optimizer.load(ckp.dir, epoch=len(ckp.log))
self.error_last = .0
def train(self):
self.loss.step()
epoch = (self.optimizer.get_last_epoch() + 1)
lr = self.optimizer.get_lr()
self.ckp.write_log('[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr)))
self.loss.start_log()
self.model.train()
(timer_data, timer_model) = (utility.timer(), utility.timer())
self.loader_train.dataset.set_scale(0)
for (batch, (lr, hr, _)) in enumerate(self.loader_train):
(lr, hr) = self.prepare(lr, hr)
timer_data.hold()
timer_model.tic()
self.optimizer.zero_grad()
sr = self.model(lr, 0)
loss = self.loss(sr, hr)
loss.backward()
if (self.args.gclip > 0):
utils.clip_grad_value_(self.model.parameters(), self.args.gclip)
self.optimizer.step()
timer_model.hold()
if (((batch + 1) % self.args.print_every) == 0):
self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format(((batch + 1) * self.args.batch_size), len(self.loader_train.dataset), self.loss.display_loss(batch), timer_model.release(), timer_data.release()))
timer_data.tic()
self.loss.end_log(len(self.loader_train))
self.error_last = self.loss.log[((- 1), (- 1))]
self.optimizer.schedule()
def test(self):
torch.set_grad_enabled(False)
epoch = self.optimizer.get_last_epoch()
self.ckp.write_log('\nEvaluation:')
self.ckp.add_log(torch.zeros(1, len(self.loader_test), len(self.scale)))
self.model.eval()
timer_test = utility.timer()
if self.args.save_results:
self.ckp.begin_background()
for (idx_data, d) in enumerate(self.loader_test):
for (idx_scale, scale) in enumerate(self.scale):
d.dataset.set_scale(idx_scale)
for (lr, hr, filename) in tqdm(d, ncols=80):
(lr, hr) = self.prepare(lr, hr)
sr = self.model(lr, idx_scale)
sr = utility.quantize(sr, self.args.rgb_range)
save_list = [sr]
self.ckp.log[((- 1), idx_data, idx_scale)] += utility.calc_psnr(sr, hr, scale, self.args.rgb_range, dataset=d)
if self.args.save_gt:
save_list.extend([hr])
if self.args.save_results:
self.ckp.save_results(d, filename[0], save_list, scale)
self.ckp.log[((- 1), idx_data, idx_scale)] /= len(d)
best = self.ckp.log.max(0)
self.ckp.write_log('[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} {})'.format(d.dataset.name, scale, self.ckp.log[((- 1), idx_data, idx_scale)], best[0][(idx_data, idx_scale)], (best[1][(idx_data, idx_scale)] + 1)))
self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc()))
self.ckp.write_log('Saving...')
if self.args.save_results:
self.ckp.end_background()
if (not self.args.test_only):
self.ckp.save(self, epoch, is_best=((best[1][(0, 0)] + 1) == epoch))
self.ckp.write_log('Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True)
torch.set_grad_enabled(True)
def prepare(self, *args):
device = torch.device(('cpu' if self.args.cpu else 'cuda'))
def _prepare(tensor):
if (self.args.precision == 'half'):
tensor = tensor.half()
return tensor.to(device)
return [_prepare(a) for a in args]
def terminate(self):
if self.args.test_only:
self.test()
return True
else:
epoch = (self.optimizer.get_last_epoch() + 1)
return (epoch >= self.args.epochs) |
class LibriTransDataset(torch.utils.data.Dataset):
def __init__(self, args, split, sample_rate):
super().__init__()
self.args = args
self.sample_rate = sample_rate
self.tokenizer = whisper.tokenizer.get_tokenizer(True, language=args.language, task='transcribe')
self.data = []
assert (args.language in LANGUAGES), f'language {args.language} is not supported by whisper'
print('running on libri-trans language:', LANGUAGES[args.language])
assert (split in ['train', 'dev', 'test']), f"split {split} not in {['train', 'dev', 'test']}"
lang = ('zh-CN' if ('zh' in args.language) else args.language)
assert (args.language == 'fr'), f"language needs to be fr, but it's {args.language}"
for real_split in ['test', 'dev']:
path = os.path.join(args.dataset_dir, real_split)
with open(os.path.join(path, 'alignments.meta'), 'r') as f, open(os.path.join(path, f'{real_split}.fr'), 'r') as g:
all_flines = [l.strip().split('\t') for l in f.readlines()]
all_flines = all_flines[1:]
all_glines = [l.strip() for l in g.readlines()]
assert (len(all_flines) == len(all_glines)), f'wav files length should equal to translation file length, but they are of length: {len(all_flines)}, and {len(all_glines)}'
for (fline, gline) in zip(all_flines, all_glines):
wav_fn = os.path.join(path, 'audiofiles', (fline[4] + '.wav'))
trans = gline
self.data.append([wav_fn, None, trans])
print(f'pad audio to {(self.args.audio_max_length / 16000)} seconds')
def __len__(self):
return len(self.data)
def __getitem__(self, id):
(cur_path, raw_en, raw_text) = self.data[id]
audio_path = cur_path
audio = load_wave(audio_path, sample_rate=self.sample_rate)
audio = whisper.pad_or_trim(audio.flatten(), length=self.args.audio_max_length)
mel = whisper.log_mel_spectrogram(audio)
return {'audio_path': audio_path, 'input_mel': mel, 'raw_text': raw_text, 'raw_en': raw_en}
def collate(self, batch):
(audio_paths, input_mels, raw_text, raw_en) = ([], [], [], [])
for f in batch:
raw_text.append(f['raw_text'])
audio_paths.append(f['audio_path'])
input_mels.append(f['input_mel'])
raw_en.append(f['raw_en'])
input_mels = torch.stack(input_mels, dim=0)
collated_batch = {}
collated_batch['input_mels'] = input_mels
collated_batch['audio_paths'] = audio_paths
collated_batch['raw_text'] = raw_text
collated_batch['raw_en'] = raw_en
return collated_batch |
def make_dataset(input_dir, split):
plyfiles = []
for dirs in os.listdir(input_dir):
tempDir = os.path.join(input_dir, dirs)
for input in glob.iglob(os.path.join(tempDir, '*.npy')):
input = os.path.basename(input)
root_filename = input[:(- 4)]
plyinput = (((dirs + '/') + root_filename) + '.npy')
plyfiles.append([plyinput])
if (split == None):
return plyfiles
else:
return split2list(plyfiles, split, default_split=split) |
def usps(tnum=2):
channel_stats = dict(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
train_transformation = data.TransformNTimes(transforms.Compose([transforms.ToTensor(), transforms.Normalize(**channel_stats)]), n=tnum)
eval_transformation = transforms.Compose([transforms.ToTensor(), transforms.Normalize(**channel_stats)])
return {'train_transformation': train_transformation, 'eval_transformation': eval_transformation, 'datadir': 'third_party/data-local/images/usps', 'num_classes': 10} |
def main():
parser = argparse.ArgumentParser(description='Train a fastText baseline for X-Stance')
parser.add_argument('--data-dir', type=str, required=True)
parser.add_argument('--pred', type=str, required=True)
parser.add_argument('--pretrained-vectors', type=str, default='')
parser.add_argument('--lr', type=float, default=0.1)
parser.add_argument('--epochs', type=int, default=5)
args = parser.parse_args()
FASTTEXT_DATA_DIR = 'processed_data'
for dataset_path in ['train.jsonl', 'valid.jsonl', 'test.jsonl']:
_jsonl_to_fasttext_format(input_path=os.path.join(args.data_dir, dataset_path), output_path=os.path.join(FASTTEXT_DATA_DIR, dataset_path.replace('.jsonl', '.txt')))
model_path = 'model.bin'
model = train(model_path, train_dataset_path=os.path.join(FASTTEXT_DATA_DIR, 'train.txt'), pretrained_vectors=args.pretrained_vectors, lr=args.lr, epochs=args.epochs)
(_, valid_precision, valid_recall) = model.test(os.path.join(FASTTEXT_DATA_DIR, 'valid.txt'))
print('Validation precision: ', valid_precision)
print('Validation recall: ', valid_recall)
valid_f1 = (((2 * valid_precision) * valid_recall) / (valid_precision + valid_recall))
print('Validation F1: ', valid_f1)
predictions = predict(model_path, os.path.join(FASTTEXT_DATA_DIR, 'test.txt'))
_predictions_to_jsonl(predictions, args.pred)
print('Saved test predictions in', args.pred) |
def main():
plotname = os.path.basename(sys.argv[1])
here = os.path.dirname(__file__)
plot_func = plots.get(plotname, None)
if (not plot_func):
sys.stderr.write('Plot {} not found. Supported: \n{}'.format(plotname, plots.keys()))
return 1
out = plot_func()
out_path = os.path.join(here, 'plots', plotname)
if (not os.path.exists(os.path.dirname(out_path))):
os.makedirs(os.path.dirname(out_path))
ext = os.path.splitext(plotname)[1]
if (ext == '.png'):
out.savefig(out_path, bbox_inches='tight')
elif (ext == '.tex'):
with open(out_path, 'w') as f:
f.write(out)
else:
raise ValueError('Unknown extension {}'.format(ext)) |
_config
def model_unet_hetero_pooled():
cfg = {'learner': {'model': 'UNetHeteroscedasticPooled', 'model_kwargs': {'downsample': 6}}} |
def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: str, give_rw_access=True, rank0_only=True):
now = time.perf_counter()
if (trainer.fsdp is not None):
cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=rank0_only)
with FSDP.state_dict_type(trainer.model, StateDictType.FULL_STATE_DICT, cfg):
state_dict = trainer.model.state_dict()
if trainer.args.should_save:
trainer._save(output_dir, state_dict=state_dict)
elif (trainer.deepspeed is not None):
if trainer.args.should_save:
trainer._save(output_dir)
if is_deepspeed_zero3_enabled():
if trainer.args.should_save:
file = os.path.join(output_dir, WEIGHTS_NAME)
if os.path.isfile(file):
logger.warning(f'deepspeed zero3: removing {file}, see zero_to_fp32.py to recover weights')
os.remove(file)
if (not trainer.deepspeed.save_16bit_model(output_dir, WEIGHTS_NAME)):
logger.warning("deepspeed.save_16bit_model didn't save the model, since stage3_gather_16bit_weights_on_model_save=false. Saving the full checkpoint instead, use zero_to_fp32.py to recover weights")
trainer.deepspeed.save_checkpoint(output_dir)
if trainer.args.should_save:
try:
os.system(f"python {output_dir}/zero_to_fp32.py '{output_dir}' '{output_dir}/pytorch_model.bin'")
except Exception as e:
logger.fatal(f'Failed to convert zero3 checkpoint to fp32: {e}')
else:
state_dict = trainer.model.state_dict()
if trainer.args.should_save:
cpu_state_dict = {key: value.cpu() for (key, value) in state_dict.items()}
del state_dict
trainer._save(output_dir, state_dict=cpu_state_dict)
if trainer.args.should_save:
if give_rw_access:
try:
os.system(f'chmod -R a+xwr {output_dir}')
except Exception as e:
logger.fatal(f'Failed to give read-write access to {output_dir}: {e}')
logger.warning(f'Saving model took {(time.perf_counter() - now):.2f} seconds.') |
class TestStageCascadeRPNHead(TestCase):
def test_cascade_rpn_head_loss(self):
cascade_rpn_head = CascadeRPNHead(**cascade_rpn_config)
s = 256
feats = [torch.rand(1, 1, (s // stride[1]), (s // stride[0])) for stride in cascade_rpn_head.stages[0].prior_generator.strides]
img_metas = {'img_shape': (s, s), 'pad_shape': (s, s), 'scale_factor': 1}
sample = DetDataSample()
sample.set_metainfo(img_metas)
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
sample.gt_instances = gt_instances
empty_gt_losses = cascade_rpn_head.loss(feats, [sample])
for (key, loss) in empty_gt_losses.items():
loss = sum(loss)
if ('cls' in key):
self.assertGreater(loss.item(), 0, 'cls loss should be non-zero')
elif ('reg' in key):
self.assertEqual(loss.item(), 0, 'there should be no reg loss when no ground true boxes')
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([0])
sample.gt_instances = gt_instances
one_gt_losses = cascade_rpn_head.loss(feats, [sample])
for loss in one_gt_losses.values():
loss = sum(loss)
self.assertGreater(loss.item(), 0, 'cls loss, or box loss, or iou loss should be non-zero')
def test_cascade_rpn_head_loss_and_predict(self):
cascade_rpn_head = CascadeRPNHead(**cascade_rpn_config)
s = 256
feats = [torch.rand(1, 1, (s // stride[1]), (s // stride[0])) for stride in cascade_rpn_head.stages[0].prior_generator.strides]
img_metas = {'img_shape': (s, s), 'pad_shape': (s, s), 'scale_factor': 1}
sample = DetDataSample()
sample.set_metainfo(img_metas)
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
sample.gt_instances = gt_instances
proposal_cfg = ConfigDict(dict(max_per_img=300, nms=dict(iou_threshold=0.8)))
cascade_rpn_head.loss_and_predict(feats, [sample], proposal_cfg)
def test_cascade_rpn_head_predict(self):
cascade_rpn_head = CascadeRPNHead(**cascade_rpn_config)
s = 256
feats = [torch.rand(1, 1, (s // stride[1]), (s // stride[0])) for stride in cascade_rpn_head.stages[0].prior_generator.strides]
img_metas = {'img_shape': (s, s), 'pad_shape': (s, s), 'scale_factor': 1}
sample = DetDataSample()
sample.set_metainfo(img_metas)
cascade_rpn_head.predict(feats, [sample]) |
def chunk_layer(layer: Callable, inputs: Dict[(str, Any)], chunk_size: int, no_batch_dims: int, low_mem: bool=False, _out: Any=None, _add_into_out: bool=False) -> Any:
if (not (len(inputs) > 0)):
raise ValueError('Must provide at least one input')
initial_dims = [shape[:no_batch_dims] for shape in _fetch_dims(inputs)]
orig_batch_dims = tuple([max(s) for s in zip(*initial_dims)])
def _prep_inputs(t: torch.Tensor) -> torch.Tensor:
if (not low_mem):
if (not (sum(t.shape[:no_batch_dims]) == no_batch_dims)):
t = t.expand((orig_batch_dims + t.shape[no_batch_dims:]))
t = t.reshape((- 1), *t.shape[no_batch_dims:])
else:
t = t.expand((orig_batch_dims + t.shape[no_batch_dims:]))
return t
prepped_inputs: Dict[(str, Any)] = tensor_tree_map(_prep_inputs, inputs)
prepped_outputs = None
if (_out is not None):
prepped_outputs = tensor_tree_map((lambda t: t.view(([(- 1)] + list(t.shape[no_batch_dims:])))), _out)
flat_batch_dim = 1
for d in orig_batch_dims:
flat_batch_dim *= d
no_chunks = ((flat_batch_dim // chunk_size) + ((flat_batch_dim % chunk_size) != 0))
def _select_chunk(t: torch.Tensor) -> torch.Tensor:
return (t[i:(i + chunk_size)] if (t.shape[0] != 1) else t)
i = 0
out = prepped_outputs
for _ in range(no_chunks):
if (not low_mem):
select_chunk = _select_chunk
else:
select_chunk = partial(_chunk_slice, flat_start=i, flat_end=min(flat_batch_dim, (i + chunk_size)), no_batch_dims=len(orig_batch_dims))
chunks: Dict[(str, Any)] = tensor_tree_map(select_chunk, prepped_inputs)
output_chunk = layer(**chunks)
if (out is None):
out = tensor_tree_map((lambda t: t.new_zeros(((flat_batch_dim,) + t.shape[1:]))), output_chunk)
if isinstance(output_chunk, dict):
def assign(d1: dict, d2: dict) -> None:
for (k, v) in d1.items():
if isinstance(v, dict):
assign(v, d2[k])
elif _add_into_out:
v[i:(i + chunk_size)] += d2[k]
else:
v[i:(i + chunk_size)] = d2[k]
assign(out, output_chunk)
elif isinstance(output_chunk, tuple):
for (x1, x2) in zip(out, output_chunk):
if _add_into_out:
x1[i:(i + chunk_size)] += x2
else:
x1[i:(i + chunk_size)] = x2
elif isinstance(output_chunk, torch.Tensor):
if _add_into_out:
out[i:(i + chunk_size)] += output_chunk
else:
out[i:(i + chunk_size)] = output_chunk
else:
raise ValueError('Not supported')
i += chunk_size
out = tensor_tree_map((lambda t: t.view((orig_batch_dims + t.shape[1:]))), out)
return out |
class FIDInceptionA(torchvision.models.inception.InceptionA):
def __init__(self, in_channels, pool_features):
super(FIDInceptionA, self).__init__(in_channels, pool_features)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1) |
class AE_Decoder(nn.Module):
def __init__(self):
super(AE_Decoder, self).__init__()
self.cov5 = Cov5()
self.cov6 = Cov6()
self.cov7 = Cov7()
def forward(self, feature_1, feature_2, feature_B, feature_D):
Output1 = self.cov5(torch.cat([feature_B, feature_D], 1))
Output2 = self.cov6(torch.cat([Output1, feature_2], 1))
Output3 = self.cov7(torch.cat([Output2, feature_1], 1))
return Output3 |
def save_trained_matrix_to_file(matrix_path, matrix):
with open(matrix_path, 'w') as f:
for i in range(matrix.shape[0]):
s = np.format_float_scientific(matrix[i][0], unique=False, precision=18)
for j in range(1, matrix.shape[1]):
s += (' %s' % np.format_float_scientific(matrix[i][j], unique=False, precision=18))
f.write(('%s\n' % s)) |
def yolo_config():
head_cfg = dict(anchor_generator=dict(type='YOLOAnchorGenerator', base_sizes=[[(116, 90), (156, 198), (373, 326)], [(30, 61), (62, 45), (59, 119)], [(10, 13), (16, 30), (33, 23)]], strides=[32, 16, 8]), bbox_coder=dict(type='YOLOBBoxCoder'))
test_cfg = mmcv.Config(dict(deploy_nms_pre=0, min_bbox_size=0, score_thr=0.05, conf_thr=0.005, nms=dict(type='nms', iou_threshold=0.45), max_per_img=100))
model = YOLOV3Head(num_classes=4, in_channels=[1, 1, 1], out_channels=[16, 8, 4], test_cfg=test_cfg, **head_cfg)
model.requires_grad_(False)
model.cpu().eval()
return model |
def save_pickle(d, path):
print('save pickle to', path)
with open(path, mode='wb') as f:
pickle.dump(d, f) |
def combined_roidb(imdb_names, training=True):
print(imdb_names)
def get_training_roidb(imdb):
if cfg.TRAIN.USE_FLIPPED:
print('Appending horizontally-flipped training examples...')
imdb.append_flipped_images()
print('done')
print('Preparing training data...')
prepare_roidb(imdb)
print('done')
return imdb.roidb
def get_roidb(imdb_name):
imdb = get_imdb(imdb_name)
print('Loaded dataset `{:s}` for training'.format(imdb.name))
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print('Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD))
roidb = get_training_roidb(imdb)
return roidb
roidbs = [get_roidb(s) for s in imdb_names.split('+')]
roidb = roidbs[0]
if (len(roidbs) > 1):
for r in roidbs[1:]:
roidb.extend(r)
tmp = get_imdb(imdb_names.split('+')[1])
imdb = datasets.imdb.imdb(imdb_names, tmp.classes)
else:
imdb = get_imdb(imdb_names)
if training:
roidb = filter_roidb(roidb)
(ratio_list, ratio_index) = rank_roidb_ratio(roidb)
return (imdb, roidb, ratio_list, ratio_index) |
def idct_2D(x):
x = tf.transpose(x, [0, 5, 1, 2, 3, 4])
x = tf.signal.idct(x, norm='ortho')
x = tf.transpose(x, [0, 1, 2, 3, 5, 4])
x = tf.signal.idct(x, norm='ortho')
x = tf.transpose(x, [0, 1, 2, 3, 5, 4])
x = tf.transpose(x, [0, 2, 3, 4, 5, 1])
return x |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--method', choices=['Seafaring', 'Random', 'SmallExact'], default='MaxMax')
parser.add_argument('--env', choices=['OpenImage', 'Flickr'], default='OpenImage')
parser.add_argument('--apikey', type=str, default=None, help='API key of Flickr. Valid only for Flickr env.')
parser.add_argument('--tiara_budget', type=int, default=1000)
parser.add_argument('--budget_per_round', type=int, default=1)
parser.add_argument('--initdata', type=int, default=1, help='NumSizeber of the initial labelled data.')
parser.add_argument('--testdata', type=int, default=100, help='Size of the test dataset.')
parser.add_argument('--nround', type=int, default=100, help='Number of rounds of active learning.')
parser.add_argument('--nepoch', type=int, default=100, help='Number of epochs for training the target model.')
parser.add_argument('--alpha', type=float, default=1.0, help='The alpha parameter of Tiara.')
parser.add_argument('--threshold', type=float, default=0.6, help='Thoreshold of Positive data. Valid only for Flickr env.')
parser.add_argument('--batchsize', type=int, default=16)
parser.add_argument('--poolsize', type=int, default=1000, help='Size of the poolsize for SmallExact method')
parser.add_argument('--device', type=str, default='cpu')
parser.add_argument('--poslabels', type=str, nargs='+', default=['Cat'], help='List of positive labels. Valid only for OpenImage env.')
parser.add_argument('--user', type=int, default=0, help='Id of the target virtual user, i.e., category. Valid only for Flickr env. See also create_virtual_users.py.')
parser.add_argument('--initialtags', type=str, default=None, help='Path to the tag file.')
parser.add_argument('--resdir', type=str, default='results')
args = parser.parse_args()
if (not os.path.exists(args.resdir)):
os.makedirs(args.resdir)
with open(f'{args.resdir}/args.json', 'w') as f:
json.dump(vars(args), f, indent=4)
print(vars(args))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
n_classes = 2
if (args.env == 'OpenImage'):
test_data = build_data(args.testdata, args.poslabels, 'openimage_tag_to_image_test.pickle')
test_data = MDataset(test_data)
init_data = build_data(args.initdata, args.poslabels, 'openimage_tag_to_image.pickle')
else:
test_data = build_user_vitual_user(args.testdata, args.user, (- 1), args.threshold)
test_data = MDataset(test_data)
init_data = build_user_vitual_user(args.initdata, args.user, args.testdata, args.threshold)
env = get_env(args.env, args.apikey, args.initialtags, args.poslabels, args.user, args.device, args.threshold)
if (args.method == 'Random'):
method = RandomMethod(init_data, budget_per_round=args.budget_per_round, poslabels=args.poslabels, env=env)
elif (args.method == 'SmallExact'):
method = SmallExact(init_data, budget_per_round=args.budget_per_round, poslabels=args.poslabels, poolsize=args.poolsize, device=args.device, env=env)
else:
method = Seafaring(init_data, budget_per_round=args.budget_per_round, tiara_budget=args.tiara_budget, alpha=args.alpha, poslabels=args.poslabels, device=args.device, env=env)
(accs, aucs, model) = measure(method, test_data, n_classes, args.nround, args.nepoch, args.batchsize, args.device)
res = [accs, aucs]
print(res)
if hasattr(env, 'save_cache'):
env.save_cache()
with open(f'{args.resdir}/res.pickle', 'wb') as f:
pickle.dump(res, f)
if hasattr(method, 'maxprob'):
with open(f'{args.resdir}/maxprob.pickle', 'wb') as f:
pickle.dump(method.maxprob, f)
torch.save(model.state_dict(), f'{args.resdir}/model.pth') |
_cache()
def setup_logger(name, save_dir, distributed_rank, filename='log.txt', color=True, abbrev_name=None):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.propagate = False
if (abbrev_name is None):
abbrev_name = ('domain adaptation' if (name == 'domain adaptation') else name)
plain_formatter = logging.Formatter('[%(asctime)s] %(name)s %(levelname)s: %(message)s', datefmt='%m/%d %H:%M:%S')
if (distributed_rank > 0):
return logger
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
if color:
formatter = _ColorfulFormatter((colored('[%(asctime)s %(name)s]: ', 'green') + '%(message)s'), datefmt='%m/%d %H:%M:%S', root_name=name, abbrev_name=str(abbrev_name))
else:
formatter = plain_formatter
ch.setFormatter(formatter)
logger.addHandler(ch)
if save_dir:
fh = logging.FileHandler(os.path.join(save_dir, filename))
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger |
class DynamicPad2d(nn.Module):
def __init__(self, kernel_size, stride, dilation, value=0):
super().__init__()
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(stride, int):
stride = (stride, stride)
if isinstance(dilation, int):
dilation = (dilation, dilation)
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.value = value
def compute_padding(x, kernel_size, stride, dilation):
return max((((((math.ceil((x / stride)) - 1) * stride) + ((kernel_size - 1) * dilation)) + 1) - x), 0)
self.compute_padding = compute_padding
def __call__(self, input):
(input_height, input_width) = input.size()[(- 2):]
padding_height = self.compute_padding(input_height, self.kernel_size[0], self.stride[0], self.dilation[0])
padding_width = self.compute_padding(input_width, self.kernel_size[1], self.stride[1], self.dilation[1])
if ((padding_height > 0) or (padding_width > 0)):
input = nn.functional.pad(input, [(padding_width // 2), (padding_width - (padding_width // 2)), (padding_height // 2), (padding_height - (padding_height // 2))], value=self.value)
return input |
def transform(program):
index_to_result = dict()
variable_counter = 0
for (i, op) in enumerate(program):
op_type = get_op_type(op)
if (op_type == 'scene'):
variable_counter += 1
index_to_result[i] = ('', f'x{variable_counter}')
elif (op_type in ('filter_size', 'filter_color', 'filter_material', 'filter_shape')):
(program_str, variable) = index_to_result[op['inputs'][0]]
this_program_str = f"{op['value_inputs'][0]}({variable})"
program_str = (((this_program_str + ' and ') + program_str) if program_str else this_program_str)
index_to_result[i] = (program_str, variable)
elif (op_type == 'unique'):
(inner, variable) = index_to_result[op['inputs'][0]]
program_str = f'iota(Object, lambda {variable}: {inner})'
index_to_result[i] = (program_str, None)
elif (op_type == 'relate'):
variable_counter += 1
variable = f'x{variable_counter}'
(inner, _) = index_to_result[op['inputs'][0]]
program_str = f"{op['value_inputs'][0]}({variable}, {inner})"
index_to_result[i] = (program_str, variable)
elif (op_type in ('same_size', 'same_color', 'same_material', 'same_shape')):
variable_counter += 1
variable = f'x{variable_counter}'
(inner, _) = index_to_result[op['inputs'][0]]
program_str = f'{op_type}({variable}, {inner})'
index_to_result[i] = (program_str, variable)
elif ((op_type == 'intersect') or (op_type == 'union')):
(e1, v1) = index_to_result[op['inputs'][1]]
(e2, v2) = index_to_result[op['inputs'][0]]
if (e1 == ''):
index_to_result[i] = (e2, v2)
elif (e2 == ''):
index_to_result[i] = (e1, v1)
else:
assert ((v1 in e1) and (v2 in e2))
variable_counter += 1
variable = f'x{variable_counter}'
if (op_type == 'intersect'):
program_str = f'{e1.replace(v1, variable)} and {e2.replace(v2, variable)}'
else:
program_str = f'({e1.replace(v1, variable)} or {e2.replace(v2, variable)})'
index_to_result[i] = (program_str, variable)
elif (op_type in ('count', 'exist')):
(inner, variable) = index_to_result[op['inputs'][0]]
if (inner == ''):
inner = f'thing({variable})'
if (op_type == 'exist'):
op_type = 'exists'
program_str = f'{op_type}(Object, lambda {variable}: {inner})'
index_to_result[i] = program_str
elif (op_type in ('query_shape', 'query_color', 'query_material', 'query_size')):
metaconcept = op_type.split('_')[1]
(object_str, _) = index_to_result[op['inputs'][0]]
program_str = f'describe({metaconcept.capitalize()}, lambda k: {metaconcept}(k, {object_str}))'
index_to_result[i] = QueryXProgram(full_program=program_str, object_program=object_str)
elif (op_type == 'equal_integer'):
e1 = index_to_result[op['inputs'][0]]
e2 = index_to_result[op['inputs'][1]]
program_str = f'equal({e1}, {e2})'
index_to_result[i] = program_str
elif (op_type in ('greater_than', 'less_than')):
e1 = index_to_result[op['inputs'][0]]
e2 = index_to_result[op['inputs'][1]]
program_str = f'{op_type}({e1}, {e2})'
index_to_result[i] = program_str
elif (op_type in ('equal_color', 'equal_material', 'equal_shape', 'equal_size')):
e1 = index_to_result[op['inputs'][0]]
e2 = index_to_result[op['inputs'][1]]
op_type = op_type.replace('equal_', 'same_')
program_str = f'{op_type}({e1.object_program}, {e2.object_program})'
index_to_result[i] = program_str
else:
raise ValueError(f'Unknown op type: {op_type}, {op}')
ret = index_to_result[(len(program) - 1)]
if isinstance(ret, QueryXProgram):
ret = ret.full_program
assert isinstance(ret, str)
return ret |
class FasterRCNNResnetV1FeatureExtractor(faster_rcnn_meta_arch.FasterRCNNFeatureExtractor):
def __init__(self, architecture, resnet_model, is_training, first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0):
if ((first_stage_features_stride != 8) and (first_stage_features_stride != 16)):
raise ValueError('`first_stage_features_stride` must be 8 or 16.')
self._architecture = architecture
self._resnet_model = resnet_model
super(FasterRCNNResnetV1FeatureExtractor, self).__init__(is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights, weight_decay)
def preprocess(self, resized_inputs):
channel_means = [123.68, 116.779, 103.939]
return (resized_inputs - [[channel_means]])
def _extract_proposal_features(self, preprocessed_inputs, scope):
if (len(preprocessed_inputs.get_shape().as_list()) != 4):
raise ValueError(('`preprocessed_inputs` must be 4 dimensional, got a tensor of shape %s' % preprocessed_inputs.get_shape()))
shape_assert = tf.Assert(tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.'])
with tf.control_dependencies([shape_assert]):
with slim.arg_scope(resnet_utils.resnet_arg_scope(batch_norm_epsilon=1e-05, batch_norm_scale=True, weight_decay=self._weight_decay)):
with tf.variable_scope(self._architecture, reuse=self._reuse_weights) as var_scope:
(_, activations) = self._resnet_model(preprocessed_inputs, num_classes=None, is_training=self._train_batch_norm, global_pool=False, output_stride=self._first_stage_features_stride, spatial_squeeze=False, scope=var_scope)
handle = (scope + ('/%s/block3' % self._architecture))
return activations[handle]
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
with tf.variable_scope(self._architecture, reuse=self._reuse_weights):
with slim.arg_scope(resnet_utils.resnet_arg_scope(batch_norm_epsilon=1e-05, batch_norm_scale=True, weight_decay=self._weight_decay)):
with slim.arg_scope([slim.batch_norm], is_training=self._train_batch_norm):
blocks = [resnet_utils.Block('block4', resnet_v1.bottleneck, ([{'depth': 2048, 'depth_bottleneck': 512, 'stride': 1}] * 3))]
proposal_classifier_features = resnet_utils.stack_blocks_dense(proposal_feature_maps, blocks)
return proposal_classifier_features |
def close2dest(vehicle, destination):
return (destination.location.distance(vehicle.get_location()) < 20) |
def step_resnet50_tidy(model: ModelWrapper, cfg: DataflowBuildConfig):
model = model.transform(GiveUniqueParameterTensors())
model = model.transform(InferShapes())
model = model.transform(FoldConstants())
model = model.transform(RemoveStaticGraphInputs())
model = model.transform(GiveUniqueNodeNames())
model = model.transform(GiveReadableTensorNames())
model = model.transform(InferDataTypes())
model = model.transform(InsertTopK())
model = model.transform(InferShapes())
model = model.transform(GiveUniqueNodeNames())
model = model.transform(GiveReadableTensorNames())
model = model.transform(InferDataTypes())
return model |
def loadtxt_str(path: PathOrStr) -> np.ndarray:
with open(path, 'r') as f:
lines = f.readlines()
return np.array([l.strip() for l in lines]) |
def get_ood_model_performance_path(args):
if args['test']:
mkdir(os.path.join((args['OOD_model_performance_output_dir'] + '_test'), os.path.basename(args['checkpoint'])))
output_path = os.path.join((args['OOD_model_performance_output_dir'] + '_test'), os.path.basename(args['checkpoint']), (os.path.basename(args['checkpoint']) + '_ood_test.json'))
else:
mkdir(os.path.join((args['OOD_model_performance_output_dir'] + '_full'), os.path.basename(args['checkpoint'])))
output_path = os.path.join((args['OOD_model_performance_output_dir'] + '_full'), os.path.basename(args['checkpoint']), (os.path.basename(args['checkpoint']) + '_ood_full.json'))
return output_path |
class MmiTrainingGraphCompiler(object):
def __init__(self, lexicon: Lexicon, device: torch.device, oov: str='<UNK>'):
self.lexicon = lexicon
L_inv = self.lexicon.L_inv.to(device)
if ((L_inv.properties & k2.fsa_properties.ARC_SORTED) != 0):
L_inv = k2.arc_sort(L_inv)
assert (L_inv.requires_grad is False)
assert (oov in self.lexicon.words)
self.L_inv = L_inv
self.oov_id = self.lexicon.words[oov]
self.oov = oov
self.device = device
phone_symbols = get_phone_symbols(self.lexicon.phones)
phone_symbols_with_blank = ([0] + phone_symbols)
ctc_topo = build_ctc_topo(phone_symbols_with_blank).to(device)
assert (ctc_topo.requires_grad is False)
self.ctc_topo_inv = k2.arc_sort(ctc_topo.invert_())
def compile(self, texts: Iterable[str], P: k2.Fsa, replicate_den: bool=True) -> Tuple[(k2.Fsa, k2.Fsa)]:
assert (P.device == self.device)
P_with_self_loops = k2.add_epsilon_self_loops(P)
ctc_topo_P = k2.intersect(self.ctc_topo_inv, P_with_self_loops, treat_epsilons_specially=False).invert()
ctc_topo_P = k2.arc_sort(ctc_topo_P)
num_graphs = self.build_num_graphs(texts)
num_graphs_with_self_loops = k2.remove_epsilon_and_add_self_loops(num_graphs)
num_graphs_with_self_loops = k2.arc_sort(num_graphs_with_self_loops)
num = k2.compose(ctc_topo_P, num_graphs_with_self_loops, treat_epsilons_specially=False)
num = k2.arc_sort(num)
ctc_topo_P_vec = k2.create_fsa_vec([ctc_topo_P.detach()])
if replicate_den:
indexes = torch.zeros(len(texts), dtype=torch.int32, device=self.device)
den = k2.index_fsa(ctc_topo_P_vec, indexes)
else:
den = ctc_topo_P_vec
return (num, den)
def build_num_graphs(self, texts: List[str]) -> k2.Fsa:
word_ids_list = []
for text in texts:
word_ids = []
for word in text.split(' '):
if (word in self.lexicon.words):
word_ids.append(self.lexicon.words[word])
else:
word_ids.append(self.oov_id)
word_ids_list.append(word_ids)
fsa = k2.linear_fsa(word_ids_list, self.device)
fsa = k2.add_epsilon_self_loops(fsa)
assert (fsa.device == self.device)
num_graphs = k2.intersect(self.L_inv, fsa, treat_epsilons_specially=False).invert_()
num_graphs = k2.arc_sort(num_graphs)
return num_graphs
def compile_lookahead_numerators(self, word_fsa_vec, P):
fsa = k2.add_epsilon_self_loops(word_fsa_vec)
assert (fsa.device == self.device)
num_graphs = k2.intersect(self.L_inv, fsa, treat_epsilons_specially=False).invert_()
num_graphs = k2.arc_sort(num_graphs)
assert (P.device == self.device)
P_with_self_loops = k2.add_epsilon_self_loops(P)
ctc_topo_P = k2.intersect(self.ctc_topo_inv, P_with_self_loops, treat_epsilons_specially=False).invert()
ctc_topo_P = k2.arc_sort(ctc_topo_P)
num_graphs_with_self_loops = k2.remove_epsilon_and_add_self_loops(num_graphs)
num_graphs_with_self_loops = k2.arc_sort(num_graphs_with_self_loops)
num = k2.compose(ctc_topo_P, num_graphs_with_self_loops, treat_epsilons_specially=False)
num = k2.arc_sort(num)
return num
'\n def build_word_fsa(self, prefix, candidate_intervals, drop_prefix_tail):\n # convert prefix_ids in BPE domain to word sequence.\n if \'\' in prefix:\n prefix.remove(\'\')\n\n prefix_ids = [self.lexicon.words[word] if word in self.lexicon.words else self.oov_id \n for word in prefix]\n\n # a special token that does not start with \'_\' could also be proposed in first iteration\n # they requires \'drop_tail\' but there is no tail to drop\n # in this case, disable the \'drop_tail\' operation\n batch = len(candidate_intervals) \n drop_prefix_tail = [0] * batch if prefix == [] else drop_prefix_tail\n \n # Prefix part \n prefix_len = len(prefix_ids)\n start_state = np.arange(prefix_len)\n end_state = np.arange(prefix_len) + 1\n labels = np.array(prefix_ids)\n scores = np.zeros(prefix_len)\n prefix_part = np.stack([start_state, end_state, labels, scores], axis=1)\n \n # candidate part\n candidate_parts = []\n ending_parts = []\n for (start, end), drop_tail in zip(candidate_intervals, drop_prefix_tail): \n num_candidate = end - start\n start_state = np.ones(num_candidate) * (prefix_len - drop_tail)\n end_state = np.ones(num_candidate) * (prefix_len + 1 - drop_tail)\n labels = np.arange(start, end)\n scores = np.zeros(num_candidate)\n candidate_part = np.stack([start_state, end_state, labels, scores], axis=1)\n candidate_parts.append(candidate_part)\n\n # end arc\n end_arc = np.array([[prefix_len + 1 - drop_tail, prefix_len + 2 - drop_tail, -1, 0]])\n ending_parts.append(end_arc)\n \n \n # assemble: do not need to arc_sort \n num_vec = []\n for i, (candidate_part, drop_tail) in enumerate(zip(candidate_parts, drop_prefix_tail)):\n this_prefix_part = prefix_part[:-1] if drop_tail else prefix_part\n end_arc = ending_parts[i]\n num_mat = np.concatenate([this_prefix_part, candidate_part, end_arc], axis=0)\n num_mat = torch.from_numpy(num_mat).to(torch.int32)\n num_vec.append(num_mat)\n \n # convert to k2 FsaVec \n num_vec = [k2.Fsa.from_dict({"arcs": num}) for num in num_vec]\n num_vec = k2.create_fsa_vec(num_vec)\n return num_vec \n ' |
def repeatBlock(conv, repeat_times, all_strides=None, all_expansions=None, feature_maps_downsample=False):
if ((all_strides is not None) and (all_expansions is not None)):
assert (isinstance(all_strides, tuple) or isinstance(all_strides, list))
assert (isinstance(all_expansions, tuple) or isinstance(all_expansions, list))
assert (len(all_strides) == repeat_times)
assert (len(all_expansions) == repeat_times)
elif ((all_strides is None) and (all_expansions is None)):
(all_strides, all_expansions) = ([], [])
for i in range(repeat_times):
all_strides.append(1)
if ((i % 2) == 0):
all_expansions.append(0.5)
else:
all_expansions.append(2)
for i in range(repeat_times):
strides = (all_strides[i], all_strides[i])
expansion = all_expansions[i]
conv = basicResidualBlock(conv, expansion, strides, use_bias=True)
if feature_maps_downsample:
conv = L.maxPooling2D(conv)
return conv |
class vgg16avg_zfnet():
def __init__(self, c, w1, b1, i1, outlayer1, w2, b2, i2, outlayer2):
with tf.variable_scope('vgg16avg_zfnet'):
self.c = []
codebook = []
for i in range(15):
codebook.append(tf.Variable(c[i], dtype=tf.float32))
self.c.append(tf.reshape(codebook[i], [(c[i].shape[0] * c[i].shape[1]), c[i].shape[2]]))
self.w1 = []
self.b1 = []
for i in range(16):
self.w1.append(tf.constant(w1[i], tf.float32))
self.b1.append(tf.constant(b1[i], tf.float32))
self.i1 = []
for i in range(len(i1)):
self.i1.append(tf.constant(embedding_idx(i1[i], c[i].shape[1]), dtype=tf.int32))
self.w2 = []
self.b2 = []
for i in range(8):
self.w2.append(tf.constant(w2[i], tf.float32))
self.b2.append(tf.constant(b2[i], tf.float32))
self.shared2_index = [0, 3, 6, 9, 12, 13, 14]
self.i2 = []
for i in range(len(i2)):
self.i2.append(tf.constant(embedding_idx(i2[i], c[self.shared2_index[i]].shape[1]), dtype=tf.int32))
self.outlayer1 = outlayer1
self.outlayer2 = outlayer2
def model1_target(self, image):
output = []
output.append(tf.nn.relu((conv2d(image, self.w1[0]) + self.b1[0])))
output.append(tf.nn.relu((conv2d(output[0], self.w1[1]) + self.b1[1])))
output.append(max_pool_2x2(output[1]))
output.append(tf.nn.relu((conv2d(output[2], self.w1[2]) + self.b1[2])))
output.append(tf.nn.relu((conv2d(output[3], self.w1[3]) + self.b1[3])))
output.append(max_pool_2x2(output[4]))
output.append(tf.nn.relu((conv2d(output[5], self.w1[4]) + self.b1[4])))
output.append(tf.nn.relu((conv2d(output[6], self.w1[5]) + self.b1[5])))
output.append(tf.nn.relu((conv2d(output[7], self.w1[6]) + self.b1[6])))
output.append(max_pool_2x2(output[8]))
output.append(tf.nn.relu((conv2d(output[9], self.w1[7]) + self.b1[7])))
output.append(tf.nn.relu((conv2d(output[10], self.w1[8]) + self.b1[8])))
output.append(tf.nn.relu((conv2d(output[11], self.w1[9]) + self.b1[9])))
output.append(max_pool_2x2(output[12]))
output.append(tf.nn.relu((conv2d(output[13], self.w1[10]) + self.b1[10])))
output.append(tf.nn.relu((conv2d(output[14], self.w1[11]) + self.b1[11])))
output.append(tf.nn.relu((conv2d(output[15], self.w1[12]) + self.b1[12])))
output.append(avg_pool(output[16]))
output.append(tf.nn.relu((tf.matmul(tf.contrib.layers.flatten(output[17]), self.w1[13]) + self.b1[13])))
output.append(tf.nn.relu((tf.matmul(output[18], self.w1[14]) + self.b1[14])))
output.append((tf.matmul(output[19], self.w1[15]) + self.b1[15]))
return output
def model1(self, image):
w1 = []
for i in range(13):
w1.append(tf.transpose(tf.reshape(tf.transpose(tf.nn.embedding_lookup(self.c[i], self.i1[i]), [0, 2, 1]), [self.w1[i].shape[2], self.w1[i].shape[0], self.w1[i].shape[1], self.w1[i].shape[3]]), (1, 2, 0, 3)))
for i in range(13, 15):
w1.append(tf.reshape(tf.transpose(tf.nn.embedding_lookup(self.c[i], self.i1[i]), [0, 2, 1]), [self.w1[i].shape[0], self.w1[i].shape[1]]))
w1.append(tf.Variable(self.outlayer1, dtype=tf.float32))
output = []
output.append(tf.nn.relu((conv2d(image, w1[0]) + self.b1[0])))
output.append(tf.nn.relu((conv2d(output[0], w1[1]) + self.b1[1])))
output.append(max_pool_2x2(output[1]))
output.append(tf.nn.relu((conv2d(output[2], w1[2]) + self.b1[2])))
output.append(tf.nn.relu((conv2d(output[3], w1[3]) + self.b1[3])))
output.append(max_pool_2x2(output[4]))
output.append(tf.nn.relu((conv2d(output[5], w1[4]) + self.b1[4])))
output.append(tf.nn.relu((conv2d(output[6], w1[5]) + self.b1[5])))
output.append(tf.nn.relu((conv2d(output[7], w1[6]) + self.b1[6])))
output.append(max_pool_2x2(output[8]))
output.append(tf.nn.relu((conv2d(output[9], w1[7]) + self.b1[7])))
output.append(tf.nn.relu((conv2d(output[10], w1[8]) + self.b1[8])))
output.append(tf.nn.relu((conv2d(output[11], w1[9]) + self.b1[9])))
output.append(max_pool_2x2(output[12]))
output.append(tf.nn.relu((conv2d(output[13], w1[10]) + self.b1[10])))
output.append(tf.nn.relu((conv2d(output[14], w1[11]) + self.b1[11])))
output.append(tf.nn.relu((conv2d(output[15], w1[12]) + self.b1[12])))
output.append(avg_pool(output[16]))
output.append(tf.nn.relu((tf.matmul(tf.contrib.layers.flatten(output[17]), w1[13]) + self.b1[13])))
output.append(tf.nn.relu((tf.matmul(output[18], w1[14]) + self.b1[14])))
output.append((tf.matmul(output[19], w1[15]) + self.b1[15]))
return output
def model2_target(self, image):
output = []
output.append(tf.nn.relu((conv2d_2(image, self.w2[0]) + self.b2[0])))
output.append(max_pool_3(spatial_lrn(output[0], local_size=3, alpha=(5e-05 * 9), beta=0.75)))
output.append(tf.nn.relu((conv2d_2(output[1], self.w2[1]) + self.b2[1])))
output.append(max_pool_3(spatial_lrn(output[2], local_size=3, alpha=(5e-05 * 9), beta=0.75)))
output.append(tf.nn.relu((conv2d(output[3], self.w2[2]) + self.b2[2])))
output.append(tf.nn.relu((conv2d(output[4], self.w2[3]) + self.b2[3])))
output.append(max_pool_v(tf.nn.relu((conv2d(output[5], self.w2[4]) + self.b2[4]))))
output.append(tf.nn.relu((tf.matmul(tf.contrib.layers.flatten(output[6]), self.w2[5]) + self.b2[5])))
output.append(tf.nn.relu((tf.matmul(output[7], self.w2[6]) + self.b2[6])))
output.append((tf.matmul(output[8], self.w2[7]) + self.b2[7]))
return output
def model2(self, image):
w2 = []
for i in range(5):
w2.append(tf.transpose(tf.reshape(tf.transpose(tf.nn.embedding_lookup(self.c[self.shared2_index[i]], self.i2[i]), [0, 2, 1]), [self.w2[i].shape[2], self.w2[i].shape[0], self.w2[i].shape[1], self.w2[i].shape[3]]), (1, 2, 0, 3)))
for i in range(5, 7):
w2.append(tf.reshape(tf.transpose(tf.nn.embedding_lookup(self.c[self.shared2_index[i]], self.i2[i]), [0, 2, 1]), [self.w2[i].shape[0], self.w2[i].shape[1]]))
w2.append(tf.Variable(self.outlayer2, dtype=tf.float32))
output = []
output.append(tf.nn.relu((conv2d_2(image, w2[0]) + self.b2[0])))
output.append(max_pool_3(spatial_lrn(output[0], local_size=3, alpha=(5e-05 * 9), beta=0.75)))
output.append(tf.nn.relu((conv2d_2(output[1], w2[1]) + self.b2[1])))
output.append(max_pool_3(spatial_lrn(output[2], local_size=3, alpha=(5e-05 * 9), beta=0.75)))
output.append(tf.nn.relu((conv2d(output[3], w2[2]) + self.b2[2])))
output.append(tf.nn.relu((conv2d(output[4], w2[3]) + self.b2[3])))
output.append(max_pool_v(tf.nn.relu((conv2d(output[5], w2[4]) + self.b2[4]))))
output.append(tf.nn.relu((tf.matmul(tf.contrib.layers.flatten(output[6]), w2[5]) + self.b2[5])))
output.append(tf.nn.relu((tf.matmul(output[7], w2[6]) + self.b2[6])))
output.append((tf.matmul(output[8], w2[7]) + self.b2[7]))
return output
def get_imshape(self):
imshape = {'model1': [224, 224, 3], 'model2': [227, 227, 3]}
return (imshape['model1'], imshape['model2']) |
def list_pretrained(as_str: bool=False):
return [(':'.join([k, t]) if as_str else (k, t)) for k in _PRETRAINED.keys() for t in _PRETRAINED[k].keys()] |
class GANLoss(nn.Module):
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if (gan_mode == 'lsgan'):
self.loss = nn.MSELoss()
elif (gan_mode == 'vanilla'):
self.loss = nn.BCEWithLogitsLoss()
elif (gan_mode == 'hinge'):
self.loss = nn.ReLU()
elif (gan_mode == 'wgangp'):
self.loss = None
else:
raise NotImplementedError(('gan mode %s not implemented' % gan_mode))
def __call__(self, prediction, target_is_real, is_disc=False):
if (self.gan_mode in ['lsgan', 'vanilla']):
labels = (self.real_label if target_is_real else self.fake_label).expand_as(prediction).type_as(prediction)
loss = self.loss(prediction, labels)
elif (self.gan_mode in ['hinge', 'wgangp']):
if is_disc:
if target_is_real:
prediction = (- prediction)
if (self.gan_mode == 'hinge'):
loss = self.loss((1 + prediction)).mean()
elif (self.gan_mode == 'wgangp'):
loss = prediction.mean()
else:
loss = (- prediction.mean())
return loss |
def comment_out_line(filepath, code):
modified_lines = []
with open(filepath, 'r') as file:
lines = file.readlines()
file.seek(0)
for line in lines:
if re.match(code, line.strip()):
line = ('#' + line)
modified_lines.append(line)
with open(filepath, 'w') as file:
file.writelines(modified_lines) |
class DdpCheckpoinerTest(unittest.TestCase):
def setUp(self) -> None:
DdpCheckpointSaver._saver_instance = None
DdpCheckpointSaver.start_async_saving_ckpt()
def tearDown(self) -> None:
if DdpCheckpointSaver._saver_instance:
DdpCheckpointSaver._saver_instance.close()
def test_ddp_checkpointer(self):
model = SimpleNet()
with tempfile.TemporaryDirectory() as tmpdir:
checkpointer = DdpCheckpointer(tmpdir)
step = 100
sd = {'model': model.state_dict()}
checkpointer.save_checkpoint(step, sd, storage_type=StorageType.MEMORY)
sd = checkpointer.load_checkpoint()
self.assertTrue(('model' in sd)) |
def create_run(experiment, command_name, config_updates=None, named_configs=(), force=False):
sorted_ingredients = gather_ingredients_topological(experiment)
scaffolding = create_scaffolding(experiment, sorted_ingredients)
prefixes = sorted([s.split('.') for s in scaffolding if (s != '')], reverse=True, key=(lambda p: len(p)))
config_updates = (config_updates or {})
config_updates = convert_to_nested_dict(config_updates)
(root_logger, run_logger) = initialize_logging(experiment, scaffolding)
distribute_config_updates(prefixes, scaffolding, config_updates)
for ncfg in named_configs:
(scaff, cfg_name) = get_scaffolding_and_config_name(ncfg, scaffolding)
scaff.gather_fallbacks()
ncfg_updates = scaff.run_named_config(cfg_name)
distribute_presets(prefixes, scaffolding, ncfg_updates)
for (ncfg_key, value) in iterate_flattened(ncfg_updates):
set_by_dotted_path(config_updates, join_paths(scaff.path, ncfg_key), value)
distribute_config_updates(prefixes, scaffolding, config_updates)
for scaffold in scaffolding.values():
scaffold.gather_fallbacks()
scaffold.set_up_config()
config = get_configuration(scaffolding)
config_updates = scaffold.run_config_hooks(config, config_updates, command_name, run_logger)
for scaffold in reversed(list(scaffolding.values())):
scaffold.set_up_seed()
config = get_configuration(scaffolding)
config_modifications = get_config_modifications(scaffolding)
experiment_info = experiment.get_experiment_info()
host_info = get_host_info()
main_function = get_command(scaffolding, command_name)
pre_runs = [pr for ing in sorted_ingredients for pr in ing.pre_run_hooks]
post_runs = [pr for ing in sorted_ingredients for pr in ing.post_run_hooks]
run = Run(config, config_modifications, main_function, copy(experiment.observers), root_logger, run_logger, experiment_info, host_info, pre_runs, post_runs, experiment.captured_out_filter)
if hasattr(main_function, 'unobserved'):
run.unobserved = main_function.unobserved
run.force = force
for scaffold in scaffolding.values():
scaffold.finalize_initialization(run=run)
return run |
def pnv_write_eval_stats(file_name, prefix, stats):
s = prefix
ave_1p_recall_l = []
ave_recall_l = []
with open(file_name, 'a') as f:
for ds in stats:
ave_1p_recall = stats[ds]['ave_one_percent_recall']
ave_1p_recall_l.append(ave_1p_recall)
ave_recall = stats[ds]['ave_recall'][0]
ave_recall_l.append(ave_recall)
s += ', {:0.2f}, {:0.2f}'.format(ave_1p_recall, ave_recall)
mean_1p_recall = np.mean(ave_1p_recall_l)
mean_recall = np.mean(ave_recall_l)
s += ', {:0.2f}, {:0.2f}\n'.format(mean_1p_recall, mean_recall)
f.write(s) |
def test_modal_datamodule_train_data(fs, mocker):
dm = kick_modal_datamodule(fs, mocker)
dm.setup('fit')
train_loader = dm.train_dataloader()
assert isinstance(train_loader, DataLoader)
_ = mocker.patch(f'{TESTED_MODULE}.torchaudio.load', return_value=(torch.rand(1, dm.num_samples), dm.sample_rate))
(audio_batch,) = next(iter(train_loader))
assert (audio_batch.shape == (dm.batch_size, 1, dm.num_samples)) |
def accuracy(output, target, topk=(1,)):
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].contiguous().view((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / batch_size)))
return res |
class ResNet(nn.Module):
def __init__(self, block, layers, zero_init_residual=False, groups=1, widen=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None, normalize=False, output_dim=0, hidden_mlp=0, nmb_prototypes=0, eval_mode=False):
super(ResNet, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.eval_mode = eval_mode
self.padding = nn.ConstantPad2d(1, 0.0)
self.inplanes = (width_per_group * widen)
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
num_out_filters = (width_per_group * widen)
self.conv1 = nn.Conv2d(3, num_out_filters, kernel_size=7, stride=2, padding=2, bias=False)
self.bn1 = norm_layer(num_out_filters)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, num_out_filters, layers[0])
num_out_filters *= 2
self.layer2 = self._make_layer(block, num_out_filters, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
num_out_filters *= 2
self.layer3 = self._make_layer(block, num_out_filters, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
num_out_filters *= 2
self.layer4 = self._make_layer(block, num_out_filters, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.l2norm = normalize
if (output_dim == 0):
self.projection_head = None
elif (hidden_mlp == 0):
self.projection_head = nn.Linear((num_out_filters * block.expansion), output_dim)
else:
self.projection_head = nn.Sequential(nn.Linear((num_out_filters * block.expansion), hidden_mlp), nn.BatchNorm1d(hidden_mlp), nn.ReLU(inplace=True), nn.Linear(hidden_mlp, output_dim))
self.prototypes = None
if isinstance(nmb_prototypes, list):
self.prototypes = MultiPrototypes(output_dim, nmb_prototypes)
elif (nmb_prototypes > 0):
self.prototypes = nn.Linear(output_dim, nmb_prototypes, bias=False)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward_backbone(self, x):
x = self.padding(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.eval_mode:
return x
x = self.avgpool(x)
x = torch.flatten(x, 1)
return x
def forward_head(self, x):
if (self.projection_head is not None):
x = self.projection_head(x)
if self.l2norm:
x = nn.functional.normalize(x, dim=1, p=2)
if (self.prototypes is not None):
return (x, self.prototypes(x))
return x
def forward(self, inputs):
if (not isinstance(inputs, list)):
inputs = [inputs]
idx_crops = torch.cumsum(torch.unique_consecutive(torch.tensor([inp.shape[(- 1)] for inp in inputs]), return_counts=True)[1], 0)
start_idx = 0
for end_idx in idx_crops:
_out = self.forward_backbone(torch.cat(inputs[start_idx:end_idx]).cuda(non_blocking=True))
if (start_idx == 0):
output = _out
else:
output = torch.cat((output, _out))
start_idx = end_idx
return self.forward_head(output) |
def get_train_val_paths(all_paths, path_to_train_val_pkl):
path_to_train_val_pkl = pathlib.Path(path_to_train_val_pkl)
with open(path_to_train_val_pkl) as f:
train_val_split = json.load(f)
train_paths = [path for path in all_paths if any((((patient_id + '_ct.nii.gz') in str(path[0])) for patient_id in train_val_split['train']))]
val_paths = [path for path in all_paths if any((((patient_id + '_ct.nii.gz') in str(path[0])) for patient_id in train_val_split['val']))]
return (train_paths, val_paths) |
def download(label, name, path):
label = label.replace(' ', '_')
path_data = os.path.join(path, label)
if (not os.path.exists(path_data)):
os.makedirs(path_data)
link_prefix = '
print(name)
filename = (os.path.join(path_data, name) + '.mp4')
link = (link_prefix + name)
if os.path.exists(filename):
print('already exists, skip')
return
print(('download the whole video for: [%s] - [%s]' % (label, name)))
command1 = 'youtube-dl '
command1 += (link + ' ')
command1 += (('-o ' + filename) + ' ')
command1 += '-f best '
command1 += '-q '
os.system(command1)
print(('finish the video as: ' + filename))
return |
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('', ':6.2f')
top5 = AverageMeter('', ':6.2f')
progress = ProgressMeter(len(val_loader), [batch_time, losses, top1, top5], prefix='Test: ')
model.eval()
with torch.no_grad():
end = time.time()
for (i, (images, target)) in enumerate(val_loader):
if (args.gpu is not None):
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
output = model(images)
loss = criterion(output, target)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
batch_time.update((time.time() - end))
end = time.time()
if ((args.gpu is None) or (args.gpu == 0)):
if ((i % args.print_freq) == 0):
print('Test: [{0}/{1}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\ {top1.val:.3f} ({top1.avg:.3f})\ {top5.val:.3f} ({top5.avg:.3f})'.format(i, len(val_loader), batch_time=batch_time, loss=losses, top1=top1, top5=top5))
print(' * {top1.avg:.3f} {top5.avg:.3f}'.format(top1=top1, top5=top5))
return (top1.avg, top5.avg) |
class RobertaTokenizerFast(GPT2TokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, merges_file, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, **kwargs):
kwargs.setdefault('pad_token', pad_token)
kwargs.setdefault('sep_token', sep_token)
kwargs.setdefault('cls_token', cls_token)
kwargs.setdefault('mask_token', mask_token)
super().__init__(vocab_file=vocab_file, merges_file=merges_file, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, add_prefix_space=add_prefix_space, **kwargs)
self.tokenizer._tokenizer.post_processor = RobertaProcessing((sep_token, self.sep_token_id), (cls_token, self.cls_token_id))
self.max_len_single_sentence = (self.max_len - self.num_added_tokens(False))
self.max_len_sentences_pair = (self.max_len - self.num_added_tokens(True))
logger.warning('RobertaTokenizerFast has an issue when working on mask language modeling where it introduces an extra encoded space before the mask token.See for more information.')
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
output = (([self.bos_token_id] + token_ids_0) + [self.eos_token_id])
if (token_ids_1 is None):
return output
return (((output + [self.eos_token_id]) + token_ids_1) + [self.eos_token_id]) |
def load_prior_model(*loadpath, epoch=None, device='cuda:0'):
loadpath = os.path.join(*loadpath)
config_path = os.path.join(loadpath, 'prior_model_config.pkl')
if (epoch is 'latest'):
epoch = get_latest_epoch(loadpath, 'prior_')
print(f'[ utils/serialization ] Loading model epoch: {epoch}')
state_path = os.path.join(loadpath, f'prior_state_{epoch}.pt')
config = pickle.load(open(config_path, 'rb'))
map_location = (torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu'))
state = torch.load(state_path, map_location=map_location)
model = config()
model.to(device)
model.load_state_dict(state, strict=True)
print(f'''
[ utils/serialization ] Loaded config from {config_path}
''')
print(config)
return (model, epoch) |
def create_app():
app = Flask(__name__)
app.config.from_pyfile('config.cfg', silent=True)
('/extract_journal_info', methods=['POST'])
_args({'publication_infos': fields.List(fields.Dict, required=True), 'journal_kb_data': fields.Dict(required=True)}, locations=('json',))
def extract_journal_info(args):
publication_infos = args.pop('publication_infos')
journal_kb_data = args.pop('journal_kb_data')
extracted_publication_infos = []
journal_dict = {'journals': journal_kb_data}
try:
for publication_info in publication_infos:
if (not publication_info.get('pubinfo_freetext')):
extracted_publication_infos.append({})
continue
extracted_publication_info = extract_journal_reference(publication_info['pubinfo_freetext'], override_kbs_files=journal_dict)
if (not extracted_publication_info):
extracted_publication_info = {}
extracted_publication_infos.append(extracted_publication_info)
except Exception as e:
return make_response(jsonify({'message': f'Can not extract publication info data. Reason: {str(e)}'}), 500)
return jsonify({'extracted_publication_infos': extracted_publication_infos})
('/extract_references_from_text', methods=['POST'])
_args({'text': fields.String(required=True), 'journal_kb_data': fields.Dict(required=True)}, locations=('json',))
def extract_references_from_text(args):
text = args.pop('text')
journal_kb_data = args.pop('journal_kb_data')
journal_dict = {'journals': journal_kb_data}
try:
extracted_references = extract_references_from_string(text, override_kbs_files=journal_dict, reference_format='{title},{volume},{page}')
except Exception as e:
return make_response(jsonify({'message': f'Can not extract references. Reason: {str(e)}'}), 500)
return jsonify({'extracted_references': extracted_references})
('/extract_references_from_url', methods=['POST'])
_args({'url': fields.String(required=True), 'journal_kb_data': fields.Dict(required=True)}, locations=('json',))
def extract_references_from_file_url(args):
url = args.pop('url')
journal_kb_data = args.pop('journal_kb_data')
journal_dict = {'journals': journal_kb_data}
try:
extracted_references = extract_references_from_url(url, **{'override_kbs_files': journal_dict, 'reference_format': '{title},{volume},{page}'})
except Exception as e:
return make_response(jsonify({'message': f'Can not extract references. Reason: {str(e)}'}), 500)
return jsonify({'extracted_references': extracted_references})
return app |
_comparison(baseline_images=['3d_sorted'], remove_text=False, extensions=['png'])
def test_3d_sorted(grid_archive_3d):
plt.figure(figsize=(8, 6))
parallel_axes_plot(grid_archive_3d, sort_archive=True) |
class Config():
vis = False
debug = False
trainset_3d = ['InterHand26M']
trainset_2d = []
testset = 'InterHand26M'
hand_resnet_type = 50
input_img_shape = (256, 256)
input_hm_shape = (64, 64, 64)
output_hm_shape = (8, 8, 8)
bbox_3d_size = 0.3
sigma = 2.5
lr = 0.0001
lr_dec_factor = 10
lr_dec_epoch = [15, 17]
end_epoch = 40
train_batch_size = 32
test_batch_size = 8
contact_thr = 0.005
num_thread = 40
gpu_ids = '0'
num_gpus = 1
continue_train = False
cur_dir = osp.dirname(os.path.abspath(__file__))
root_dir = osp.join(cur_dir, '..')
data_dir = osp.join(root_dir, 'data')
output_dir = osp.join(root_dir, 'output')
model_dir = osp.join(output_dir, 'model_dump')
vis_dir = osp.join(output_dir, 'vis')
log_dir = osp.join(output_dir, 'log')
result_dir = osp.join(output_dir, 'result')
human_model_path = osp.join(root_dir, 'common', 'utils', 'human_model_files')
def set_args(self, gpu_ids, continue_train=False):
self.gpu_ids = gpu_ids
self.num_gpus = len(self.gpu_ids.split(','))
self.continue_train = continue_train
os.environ['CUDA_VISIBLE_DEVICES'] = self.gpu_ids
print('>>> Using GPU: {}'.format(self.gpu_ids)) |
class Encoder(Model):
def __init__(self):
super(Encoder, self).__init__()
self.base_model = DenseNet169(input_shape=(None, None, 3), include_top=False, weights='imagenet')
print('Base model loaded {}'.format(DenseNet169.__name__))
outputs = [self.base_model.outputs[(- 1)]]
for name in ['pool1', 'pool2_pool', 'pool3_pool', 'conv1/relu']:
outputs.append(self.base_model.get_layer(name).output)
self.encoder = Model(inputs=self.base_model.inputs, outputs=outputs)
def call(self, x):
return self.encoder(x) |
def resnet101(**kwargs):
model = ResNet(hidden_size, Bottleneck, [3, 4, 23, 3], **kwargs)
model.apply(init_param)
return model |
def set_logging(save_dir, gpu, rerun=False):
os.makedirs(save_dir, exist_ok=rerun)
log_format = f'%(asctime)s (GPU {gpu}: {save_dir}) %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='[%y/%m/%d %H:%M:%S]')
fh = logging.FileHandler(f'{save_dir}/log.txt')
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh) |
class HintLoss(nn.Module):
def __init__(self):
super(HintLoss, self).__init__()
self.crit = nn.MSELoss()
def forward(self, f_s, f_t):
loss = self.crit(f_s, f_t)
return loss |
class GlobalAttention(torch.nn.Module):
def __init__(self, decoder_hidden_size, encoder_hidden_size, attention):
super(GlobalAttention, self).__init__()
self.decoder_hidden_size = decoder_hidden_size
self.encoder_hidden_size = encoder_hidden_size
self.attention = attention
self.output_layer = torch.nn.Linear((decoder_hidden_size + encoder_hidden_size), decoder_hidden_size, bias=isinstance(attention, MLPAttention))
def forward(self, source, memory_bank, mask=None, coverage=None):
(batch_, target_l, dim_) = source.size()
one_step = False
if (source.dim() == 2):
one_step = True
source = source.unsqueeze(1)
if (isinstance(self.attention, MLPAttention) and (coverage is not None)):
align = self.attention(source, memory_bank, coverage)
elif isinstance(self.attention, BiaffineAttention):
align = self.attention(source, memory_bank).squeeze(1)
else:
align = self.attention(source, memory_bank)
if (mask is not None):
mask = mask.byte().unsqueeze(1)
align.masked_fill_((1 - mask), (- float('inf')))
align_vectors = F.softmax(align, 2)
c = torch.bmm(align_vectors, memory_bank)
concat_c = torch.cat([c, source], 2).view((batch_ * target_l), (- 1))
attn_h = self.output_layer(concat_c).view(batch_, target_l, (- 1))
attn_h = torch.tanh(attn_h)
if (coverage is not None):
coverage = (coverage + align_vectors)
if one_step:
attn_h = attn_h.squeeze(1)
align_vectors = align_vectors.squeeze(1)
return (attn_h, align_vectors, coverage) |
_grad()
def accuracy(output, target, topk=(1,)):
if (target.numel() == 0):
return [torch.zeros([], device=output.device)]
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0)
res.append(correct_k.mul_((100.0 / batch_size)))
return res |
def main():
f = sys.argv[1]
d = pandas.read_csv(f)
d.columns = ['Course', 'Code']
counts = {}
for (_, course_code) in d.iterrows():
(course, code) = course_code.tolist()
counts.setdefault(course, {})
ql = set()
for l in code.split('\n'):
s = l.strip().split(' ')
if (s[0] in ['import', 'from']):
lib = s[1].split('.')[0].lower()
counts[course].setdefault(lib, 0)
if (lib in ql):
continue
else:
ql.add(lib)
counts[course][lib] += 1
libs = set()
for (k, v) in counts.items():
for k in v.keys():
libs.add(k)
header = (['Course Number'] + sorted(libs))
values = [([k] + [v.get(lib, 0) for lib in header[1:]]) for (k, v) in counts.items()]
d_out = pandas.DataFrame(values, columns=header)
d_out.to_csv('figure5-imports.csv') |
_module()
class ABILanguageDecoder(BaseDecoder):
def __init__(self, d_model=512, n_head=8, d_inner=2048, n_layers=4, max_seq_len=40, dropout=0.1, detach_tokens=True, num_chars=90, use_self_attn=False, pad_idx=0, init_cfg=None, **kwargs):
super().__init__(init_cfg=init_cfg)
self.detach_tokens = detach_tokens
self.d_model = d_model
self.max_seq_len = max_seq_len
self.proj = nn.Linear(num_chars, d_model, False)
self.token_encoder = PositionalEncoding(d_model, n_position=self.max_seq_len, dropout=0.1)
self.pos_encoder = PositionalEncoding(d_model, n_position=self.max_seq_len)
self.pad_idx = pad_idx
if use_self_attn:
operation_order = ('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm')
else:
operation_order = ('cross_attn', 'norm', 'ffn', 'norm')
decoder_layer = BaseTransformerLayer(operation_order=operation_order, attn_cfgs=dict(type='MultiheadAttention', embed_dims=d_model, num_heads=n_head, attn_drop=dropout, dropout_layer=dict(type='Dropout', drop_prob=dropout)), ffn_cfgs=dict(type='FFN', embed_dims=d_model, feedforward_channels=d_inner, ffn_drop=dropout), norm_cfg=dict(type='LN'))
self.decoder_layers = ModuleList([copy.deepcopy(decoder_layer) for _ in range(n_layers)])
self.cls = nn.Linear(d_model, num_chars)
def forward_train(self, feat, logits, targets_dict, img_metas):
lengths = self._get_length(logits)
lengths.clamp_(2, self.max_seq_len)
tokens = torch.softmax(logits, dim=(- 1))
if self.detach_tokens:
tokens = tokens.detach()
embed = self.proj(tokens)
embed = self.token_encoder(embed)
padding_mask = self._get_padding_mask(lengths, self.max_seq_len)
zeros = embed.new_zeros(*embed.shape)
query = self.pos_encoder(zeros)
query = query.permute(1, 0, 2)
embed = embed.permute(1, 0, 2)
location_mask = self._get_location_mask(self.max_seq_len, tokens.device)
output = query
for m in self.decoder_layers:
output = m(query=output, key=embed, value=embed, attn_masks=location_mask, key_padding_mask=padding_mask)
output = output.permute(1, 0, 2)
logits = self.cls(output)
return {'feature': output, 'logits': logits}
def forward_test(self, feat, out_enc, img_metas):
return self.forward_train(feat, out_enc, None, img_metas)
def _get_length(self, logit, dim=(- 1)):
out = (logit.argmax(dim=(- 1)) == self.pad_idx)
abn = out.any(dim)
out = ((out.cumsum(dim) == 1) & out).max(dim)[1]
out = (out + 1)
out = torch.where(abn, out, out.new_tensor(logit.shape[1]))
return out
def _get_location_mask(seq_len, device=None):
mask = torch.eye(seq_len, device=device)
mask = mask.float().masked_fill((mask == 1), float('-inf'))
return mask
def _get_padding_mask(length, max_length):
length = length.unsqueeze((- 1))
grid = torch.arange(0, max_length, device=length.device).unsqueeze(0)
return (grid >= length) |
def test_CBPM_spearman(X_iris: pd.DataFrame, y_iris: pd.DataFrame) -> None:
X_pos = ['sepal_length', 'petal_length', 'petal_width']
X_neg = ['sepal_width']
trans_posneg = CBPM(corr_method=spearmanr, agg_method=np.mean).fit_transform(X_iris, y_iris)
trans_man_pos = X_iris[X_pos].values.mean(axis=1)
trans_man_neg = X_iris[X_neg].values.mean(axis=1)
trans_man = np.concatenate([trans_man_pos.reshape((- 1), 1), trans_man_neg.reshape((- 1), 1)], axis=1)
assert_array_equal(trans_posneg, trans_man) |
class GaussianNoise(Transform):
def __init__(self, var_limit=(0, 0.1), mean=0, always_apply=False, p=0.5):
super().__init__(always_apply, p)
self.var_limit = var_limit
self.mean = mean
def apply(self, img, var):
return F.gaussian_noise(img, var=var, mean=self.mean)
def get_params(self, **data):
return {'var': random.uniform(*self.var_limit)} |
def svm_load_model(model_file_name):
model = libsvm.svm_load_model(model_file_name.encode())
if (not model):
print(("can't open model file %s" % model_file_name))
return None
model = toPyModel(model)
return model |
def customized_collate_fn(batch):
(batch, targets) = zip(*batch)
batch = torch.stack(batch, dim=0)
targets = torch.stack(targets, dim=0)
batch = batch.permute(0, 3, 1, 2).contiguous()
return (batch, targets) |
class SimpleRNN(ZooKerasLayer):
def __init__(self, output_dim, activation='tanh', return_sequences=False, go_backwards=False, W_regularizer=None, U_regularizer=None, b_regularizer=None, input_shape=None, **kwargs):
super(SimpleRNN, self).__init__(None, output_dim, activation, return_sequences, go_backwards, W_regularizer, U_regularizer, b_regularizer, (list(input_shape) if input_shape else None), **kwargs) |
class LibraryAPIDef():
def __init__(self, library_name=''):
self.library = library_name
self.id = ''
self.name: str = ''
self.typ: APIType = None
self.alias: List[str] = []
self.description: str = ''
self.declaration: str = ''
self.detail_desc: str = ''
self.ret: str = ''
self.arg_defs: List['ArgDef'] = []
self.arg_declaration: List[Dict] = {}
self.arg_names: List[str] = []
self.case: Dict = {}
def from_json(self, json_obj: dict):
self._dict = json_obj
self.name = json_obj[API_NAME_KEY]
if (TYPE_KEY not in json_obj):
self.typ = 'UNK'
else:
self.typ = json_obj[TYPE_KEY]
self.description = json_obj[DESC_KEY]
self.declaration = json_obj[DECLARATION_KEY]
self.arg_declaration = json_obj[ARGS_KEY]
if (RETURN_KEY in json_obj):
self.ret = json_obj[RETURN_KEY]
self.alias = json_obj[ALIAS_KEY]
if (DETAILED_DESC_KEY in json_obj):
self.detail_desc = json_obj[DETAILED_DESC_KEY]
self.arg_defs = []
for (arg_index, arg_record) in enumerate(self.arg_declaration):
arg_def = ArgDef.new(arg_record, arg_index)
self.arg_defs.append(arg_def)
self.arg_names = [a.name for a in self.arg_defs]
def to_dict(self):
api = {API_NAME_KEY: self.name, DESC_KEY: self.description, ALIAS_KEY: self.alias, DECLARATION_KEY: self.declaration, ARGS_KEY: self.arg_declaration, DETAILED_DESC_KEY: self.detail_desc, RETURN_KEY: self.ret}
return api
def get_arg_by_index(self, index: int):
assert ((index >= 0) and (index < len(self.arg_declaration)))
return self.arg_declaration[index]
def index2name(self, index: int):
return self.get_arg_by_index(index)[ARG_NAME_KEY]
def find_arg(self, arg_name) -> int:
for (i, a) in enumerate(self.arg_declaration):
if (a[ARG_NAME_KEY] == arg_name):
return i
return (- 1)
def set_case(self, case: Dict):
self.case = case
def is_arg_sig_optional(sig: Dict) -> bool:
return sig[ARG_OPTIONAL_KEY]
def is_optional(self, index: int) -> bool:
arg = self.get_arg_by_index(index)
return arg[ARG_OPTIONAL_KEY]
def is_class(self) -> bool:
return (self.typ == 'class')
def get_value(self, arg_name):
if (arg_name in self.case):
return self.case[arg_name]
index = self.find_arg(arg_name)
if (index >= 0):
arg = self.arg_declaration[index]
is_opt = self.is_arg_sig_optional(arg)
if is_opt:
dft_value = arg[ARG_DEFAULT_VALUE_KEY]
return dft_value
else:
raise ValueError(f'Argument {arg_name} not found for {self.name}') |
def pretend_to_be_other_trainer(folder, new_trainer_name, checkpoints=('model_best.model.pkl', 'model_latest.model.pkl', 'model_final_checkpoint.model.pkl')):
folds = subdirs(folder, prefix='fold_', join=False)
if isdir(join(folder, 'all')):
folds.append('all')
for c in checkpoints:
for f in folds:
checkpoint_file = join(folder, f, c)
if isfile(checkpoint_file):
a = load_pickle(checkpoint_file)
a['name'] = new_trainer_name
save_pickle(a, checkpoint_file) |
def latest_torch_ckpt(train_ckpt_dir):
files = os.listdir(train_ckpt_dir)
ckpt_list = [f for f in files if f.endswith('.pth')]
if (len(ckpt_list) == 0):
return None
ckpt_list.sort(key=natural_keys)
ckpt_name = ckpt_list[(- 1)]
return os.path.join(train_ckpt_dir, ckpt_name) |
class PlainC(nn.Module):
def __init__(self, labels_num, context_size):
super(PlainC, self).__init__()
self.out_mesh_dstrbtn = nn.Linear(context_size, labels_num)
nn.init.xavier_uniform_(self.out_mesh_dstrbtn.weight)
def forward(self, context_vectors):
output_dstrbtn = self.out_mesh_dstrbtn(context_vectors)
return output_dstrbtn |
class cnn_cifar10(nn.Module):
def __init__(self):
super(cnn_cifar10, self).__init__()
self.n_cls = 10
self.conv1 = torch.nn.Conv2d(in_channels=3, out_channels=64, kernel_size=5)
self.conv2 = torch.nn.Conv2d(in_channels=64, out_channels=64, kernel_size=5)
self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2)
self.fc1 = torch.nn.Linear(((64 * 5) * 5), 384)
self.fc2 = torch.nn.Linear(384, 192)
self.fc3 = torch.nn.Linear(192, self.n_cls)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view((- 1), ((64 * 5) * 5))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x |
def test_modal_datamodule_init():
data = ModalDataModule()
assert isinstance(data, ModalDataModule)
assert isinstance(data, AudioDataModule) |
class AltDiffusionPipeline(metaclass=DummyObject):
_backends = ['torch', 'transformers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch', 'transformers'])
def from_config(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'transformers'])
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'transformers']) |
def load_torch_data(load_data_func):
def torch_loader(dataset, data_path, batch_size, shuffle=True, cuda_device=None, num_workers=1):
((train_data, val_data), (train_labels, val_labels), label_names) = load_data_func(dataset, data_path)
kwargs = ({'num_workers': num_workers, 'pin_memory': True} if (cuda_device is not None) else {})
kwargs['drop_last'] = True
if (type(train_data) == numpy.ndarray):
train_dataset = TensorDataset(torch.from_numpy(train_data), torch.from_numpy(train_labels))
val_dataset = TensorDataset(torch.from_numpy(val_data), torch.from_numpy(val_labels))
elif (type(train_data) == scipy.sparse.csr.csr_matrix):
from sklearn.feature_extraction.text import TfidfTransformer
tfidf_trans = TfidfTransformer(norm=None)
tfidf_trans.fit(train_data)
train_dataset = SparseDataset(train_data, tfidf_trans.idf_)
val_dataset = SparseDataset(val_data, tfidf_trans.idf_)
else:
train_dataset = torchvision.datasets.ImageFolder(train_data)
val_dataset = torchvision.datasets.ImageFolder(val_data)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=shuffle, **kwargs)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, **kwargs)
return (train_loader, val_loader, label_names)
return torch_loader |
('smooth_quant')
class SmoothQuantSampler(TuningSampler):
def __init__(self, tuning_space: TuningSpace, tuning_order_lst: List[TuningOrder], initial_op_tuning_cfg: Dict, kwargs: Dict={}):
super().__init__(tuning_space, tuning_order_lst, initial_op_tuning_cfg, kwargs)
self._kwargs = kwargs
self._alpha_lst = [0.5]
if kwargs.get('smooth_quant_agrs', {}):
self._alpha_lst = kwargs['smooth_quant_agrs'].get('alpha_lst', [0.5])
def __iter__(self, tune_cfg=None) -> OpTuningConfig:
for alpha in self._alpha_lst:
new_tune_cfg = (copy.deepcopy(self.initial_op_tuning_cfg) if (not tune_cfg) else copy.deepcopy(tune_cfg))
sq_args = {'smooth_quant': True, 'smooth_quant_args': {'alpha': alpha}}
if ('recipe_cfgs' not in new_tune_cfg):
new_tune_cfg['recipe_cfgs'] = sq_args
else:
new_tune_cfg['recipe_cfgs'].update(sq_args)
(yield new_tune_cfg) |
def red_string_matmul(t1: tf.Tensor, t2: tf.Tensor):
dim1 = len(t1.get_shape().as_list())
dim2 = len(t2.get_shape().as_list())
diff = (dim1 - dim2)
assert ((dim1 >= 2) and (dim2 >= 2))
chars = ['i', 'j', 'k', 'l', 'm', 'n', 'o', 'p']
str1 = ''.join(chars[:dim1])
if (diff >= 0):
str2 = ''.join(((chars[:(dim2 - 2)] + [chars[(dim1 - 1)]]) + [chars[dim1]]))
str3 = ''.join((chars[:(dim1 - 1)] + [chars[dim1]]))
else:
str2 = ''.join((((chars[:(dim1 - 2)] + chars[dim1:(dim1 - diff)]) + chars[(dim1 - 1):dim1]) + chars[(dim1 - diff):((dim1 - diff) + 1)]))
str3 = ((str2[:(- 2)] + str1[(- 2)]) + str2[(- 1)])
return ((((str1 + ',') + str2) + '->') + str3) |
def loader(path, batch_size=16, num_workers=1, pin_memory=True):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
return data.DataLoader(datasets.ImageFolder(path, transforms.Compose([transforms.Resize(256), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize])), batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=pin_memory) |
def test_lovasz_loss():
from mmseg.models import build_loss
with pytest.raises(AssertionError):
loss_cfg = dict(type='LovaszLoss', loss_type='Binary', reduction='none', loss_weight=1.0, loss_name='loss_lovasz')
build_loss(loss_cfg)
with pytest.raises(AssertionError):
loss_cfg = dict(type='LovaszLoss', loss_type='multi_class', loss_name='loss_lovasz')
build_loss(loss_cfg)
loss_cfg = dict(type='LovaszLoss', reduction='none', loss_weight=1.0, loss_name='loss_lovasz')
lovasz_loss = build_loss(loss_cfg)
logits = torch.rand(1, 3, 4, 4)
labels = (torch.rand(1, 4, 4) * 2).long()
lovasz_loss(logits, labels)
loss_cfg = dict(type='LovaszLoss', per_image=True, reduction='mean', class_weight=[1.0, 2.0, 3.0], loss_weight=1.0, loss_name='loss_lovasz')
lovasz_loss = build_loss(loss_cfg)
logits = torch.rand(1, 3, 4, 4)
labels = (torch.rand(1, 4, 4) * 2).long()
lovasz_loss(logits, labels, ignore_index=None)
import os
import tempfile
import mmcv
import numpy as np
tmp_file = tempfile.NamedTemporaryFile()
mmcv.dump([1.0, 2.0, 3.0], f'{tmp_file.name}.pkl', 'pkl')
loss_cfg = dict(type='LovaszLoss', per_image=True, reduction='mean', class_weight=f'{tmp_file.name}.pkl', loss_weight=1.0, loss_name='loss_lovasz')
lovasz_loss = build_loss(loss_cfg)
lovasz_loss(logits, labels, ignore_index=None)
np.save(f'{tmp_file.name}.npy', np.array([1.0, 2.0, 3.0]))
loss_cfg = dict(type='LovaszLoss', per_image=True, reduction='mean', class_weight=f'{tmp_file.name}.npy', loss_weight=1.0, loss_name='loss_lovasz')
lovasz_loss = build_loss(loss_cfg)
lovasz_loss(logits, labels, ignore_index=None)
tmp_file.close()
os.remove(f'{tmp_file.name}.pkl')
os.remove(f'{tmp_file.name}.npy')
loss_cfg = dict(type='LovaszLoss', loss_type='binary', reduction='none', loss_weight=1.0, loss_name='loss_lovasz')
lovasz_loss = build_loss(loss_cfg)
logits = torch.rand(2, 4, 4)
labels = torch.rand(2, 4, 4).long()
lovasz_loss(logits, labels)
loss_cfg = dict(type='LovaszLoss', loss_type='binary', per_image=True, reduction='mean', loss_weight=1.0, loss_name='loss_lovasz')
lovasz_loss = build_loss(loss_cfg)
logits = torch.rand(2, 4, 4)
labels = torch.rand(2, 4, 4).long()
lovasz_loss(logits, labels, ignore_index=None)
loss_cfg = dict(type='LovaszLoss', loss_type='binary', per_image=True, reduction='mean', loss_weight=1.0, loss_name='loss_lovasz')
lovasz_loss = build_loss(loss_cfg)
assert (lovasz_loss.loss_name == 'loss_lovasz') |
def get_big_nav(vehicle, plan_map):
x = int(((scale * vehicle.get_location().x) + x_offset))
y = int(((scale * vehicle.get_location().y) + y_offset))
_nav = plan_map.crop(((x - 400), (y - 400), (x + 400), (y + 400)))
r = 20
draw = ImageDraw.Draw(_nav)
draw.ellipse((((_nav.size[0] // 2) - r), ((_nav.size[1] // 2) - r), ((_nav.size[0] // 2) + r), ((_nav.size[1] // 2) + r)), fill='green', outline='green', width=10)
im_rotate = _nav.rotate((vehicle.get_transform().rotation.yaw + 90))
nav = im_rotate.crop((0, 0, _nav.size[0], (_nav.size[1] // 2)))
nav = cv2.cvtColor(np.asarray(nav), cv2.COLOR_BGR2RGB)
return nav |
def test_textnet_save_and_load(corpus, tmp_path):
out = (tmp_path / 'out.textnet')
net = tn.Textnet(corpus.tokenized(), connected=True, doc_attrs={'test': {'New York Times': 1, 'Los Angeles Times': 3}})
net.save(out)
loaded = tn.load_textnet(out)
assert (net.nodes['id'] == loaded.nodes['id'])
assert (net.edges['weight'] == loaded.edges['weight'])
assert (net.summary == loaded.summary) |
def pointnet_sa_module(xyz, points, npoint, radius, nsample, mlp, mlp2, group_all, is_training, bn_decay, scope, bn=True, pooling='max', knn=False, use_xyz=True, use_nchw=False):
data_format = ('NCHW' if use_nchw else 'NHWC')
with tf.variable_scope(scope) as sc:
if group_all:
nsample = xyz.get_shape()[1].value
(new_xyz, new_points, idx, grouped_xyz) = sample_and_group_all(xyz, points, use_xyz)
else:
(new_xyz, new_points, idx, grouped_xyz) = sample_and_group(npoint, radius, nsample, xyz, points, knn, use_xyz)
if use_nchw:
new_points = tf.transpose(new_points, [0, 3, 1, 2])
for (i, num_out_channel) in enumerate(mlp):
new_points = tf_util.conv2d(new_points, num_out_channel, [1, 1], padding='VALID', stride=[1, 1], bn=bn, is_training=is_training, scope=('conv%d' % i), bn_decay=bn_decay, data_format=data_format)
if use_nchw:
new_points = tf.transpose(new_points, [0, 2, 3, 1])
if (pooling == 'max'):
new_points = tf.reduce_max(new_points, axis=[2], keep_dims=True, name='maxpool')
elif (pooling == 'avg'):
new_points = tf.reduce_mean(new_points, axis=[2], keep_dims=True, name='avgpool')
elif (pooling == 'weighted_avg'):
with tf.variable_scope('weighted_avg'):
dists = tf.norm(grouped_xyz, axis=(- 1), ord=2, keep_dims=True)
exp_dists = tf.exp(((- dists) * 5))
weights = (exp_dists / tf.reduce_sum(exp_dists, axis=2, keep_dims=True))
new_points *= weights
new_points = tf.reduce_sum(new_points, axis=2, keep_dims=True)
elif (pooling == 'max_and_avg'):
max_points = tf.reduce_max(new_points, axis=[2], keep_dims=True, name='maxpool')
avg_points = tf.reduce_mean(new_points, axis=[2], keep_dims=True, name='avgpool')
new_points = tf.concat([avg_points, max_points], axis=(- 1))
if (mlp2 is not None):
if use_nchw:
new_points = tf.transpose(new_points, [0, 3, 1, 2])
for (i, num_out_channel) in enumerate(mlp2):
new_points = tf_util.conv2d(new_points, num_out_channel, [1, 1], padding='VALID', stride=[1, 1], bn=bn, is_training=is_training, scope=('conv_post_%d' % i), bn_decay=bn_decay, data_format=data_format)
if use_nchw:
new_points = tf.transpose(new_points, [0, 2, 3, 1])
new_points = tf.squeeze(new_points, [2])
return (new_xyz, new_points, idx) |
def mnist(batch_size=16, size=28, path_to_data='../../mnist_data'):
all_transforms = transforms.Compose([transforms.Resize(size), transforms.ToTensor()])
train_data = datasets.MNIST(path_to_data, train=True, download=True, transform=all_transforms)
test_data = datasets.MNIST(path_to_data, train=False, transform=all_transforms)
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True)
return (train_loader, test_loader) |
class Bijection(nn.Module):
def __init__(self, x_shape, z_shape):
super().__init__()
self.x_shape = x_shape
self.z_shape = z_shape
def forward(self, inputs, direction, **kwargs):
if (direction == 'x-to-z'):
assert (inputs.shape[1:] == self.x_shape), f'Expected shape {self.x_shape}; received {inputs.shape[1:]}'
result = self._x_to_z(inputs, **kwargs)
assert (result['z'].shape[1:] == self.z_shape)
return result
elif (direction == 'z-to-x'):
assert (inputs.shape[1:] == self.z_shape)
result = self._z_to_x(inputs, **kwargs)
assert (result['x'].shape[1:] == self.x_shape)
return result
else:
assert False, f'Invalid direction {direction}'
def x_to_z(self, x, **kwargs):
return self(x, 'x-to-z', **kwargs)
def z_to_x(self, z, **kwargs):
return self(z, 'z-to-x', **kwargs)
def inverse(self):
return InverseBijection(self)
def condition(self, u):
return ConditionedBijection(bijection=self, u=u)
def _x_to_z(self, x, **kwargs):
raise NotImplementedError
def _z_to_x(self, z, **kwargs):
raise NotImplementedError |
def get_optimizer(model, learning_rate=0.0002, beta1=0.5, beta2=0.99):
optimizer = optim.Adam(model.parameters(), lr=learning_rate, betas=(beta1, beta2))
return optimizer |
class _DGStrat_RepeatProxyBoundHolder():
def __init__(self, bound: int) -> None:
self.bound = bound
if (bound < 0):
raise ArgumentError(("The number of repetitions in a repeat strategy must be non-negative. Got '" + str(bound)))
def __call__(self, strat: DGStrat) -> DGStrat:
return DGStrat.makeRepeat(self.bound, dgStrat(strat)) |
def find_tokens(refexp, template, node_id, backtrack=True, partial_match=False):
def backtrack_previous_nodes(cur_id, is_root=True):
cur_tokens = []
function = template['nodes'][cur_id]['type']
if (function[:len('same_')] == 'same_'):
pos = refexp['refexp'].find(function.replace('_', ' '))
assert (pos != (- 1))
cur_tokens.append((pos, (pos + len(function))))
if (((function in ['filter', 'relate', 'relate_filter', 'relate_filter_count']) or (function[:len('filter_')] == 'filter_')) and (cur_id != node_id) and (function != 'filter_unique')):
cur_tokens = find_tokens(refexp, template, cur_id, False)
assert (len(cur_tokens) == 1), 'Error, the relate filter is expected to yield only one possibility'
cur_tokens = cur_tokens[0]
if ((is_root or (function[:len('same_')] == 'same_') or (function == 'intersect') or (function[:len('filter_')] == 'filter_')) and (is_root or (function != 'filter_unique'))):
ancestors = [backtrack_previous_nodes(a, False) for a in template['nodes'][cur_id]['inputs']]
final_tokens = [(cur_tokens + list(itertools.chain.from_iterable(element))) for element in itertools.product(*ancestors)]
return final_tokens
if (function == 'union'):
return [b for branch in template['nodes'][cur_id]['inputs'] for b in backtrack_previous_nodes(branch, False)]
return [cur_tokens]
if ('side_inputs' not in template['nodes'][node_id]):
assert (template['nodes'][node_id]['type'] in ['exist', 'count'])
tokens = backtrack_previous_nodes(node_id)
for template in [OTHER_QUES, OTHER_COUNT_QUES]:
other_match = template.match(refexp['refexp'].lower())
if (other_match is not None):
tokens = [(t + [other_match.span(1)]) for t in tokens]
return tokens
targets = template['nodes'][node_id]['side_inputs']
if ((template['nodes'][node_id]['type'] not in ['relate_filter_count', 'relate_filter_exist', 'relate_filter_unique']) and backtrack and (not partial_match)):
tokens = backtrack_previous_nodes(node_id)
else:
tokens = [[]]
def add_group(choices, match, ignore=False, prefix=''):
return (f'{prefix}((?:{choices}))?' if ((match.group(0) in targets) and (not ignore)) else f'{prefix}(?:{choices})?')
def build_custom_regex(text):
text = re.sub('\\(', '\\(', text)
text = re.sub('\\)', '\\)', text)
text = ('(?:The )?' + text)
text = re.sub(';', ';(?: The )?', text)
text = re.sub('\\s', '\\\\s*', text)
text = re.sub('another', '(?:another|a)', text)
text = re.sub('other', '(?:other)?', text)
text = SIZE_REGEX.sub(partial(add_group, ALL_SIZES), text)
text = COLOR_REGEX.sub(partial(add_group, ALL_COLORS), text)
text = MATERIAL_REGEX.sub(partial(add_group, ALL_MATERIALS), text)
text = SHAPE_REGEX.sub(partial(add_group, ALL_SHAPES), text)
text = RELATION_REGEX.sub(partial(add_group, ALL_RELATIONS), text)
text = VISIBLE_REGEX.sub(partial(add_group, ALL_VISIBLE), text)
text = ORD_DIR_REGEX.sub(partial(add_group, ALL_ORD_DIR, ignore=partial_match), text)
text = ORD_NUM_REGEX.sub(partial(add_group, ALL_ORD_NUM, ignore=partial_match, prefix='(?:the )?'), text)
text = OPTIONAL_REGEX.sub('(?:\\1)?', text)
text = PLURAL_REGEX.sub(')s)?\\1', text)
return re.compile(text.lower(), flags=re.IGNORECASE)
regexes = [build_custom_regex(t) for t in template['text']]
for r in regexes:
match = r.fullmatch(refexp['refexp'].lower())
if (match is not None):
for i in range(len(match.groups())):
if (match.group((i + 1)) is not None):
tokens = [(t + [match.span((i + 1))]) for t in tokens]
return tokens
assert False, 'not found'
return None |
class HierarchyDecoder(nn.Module):
def __init__(self, num_classes):
super(HierarchyDecoder, self).__init__()
self.layer5 = DecoderHead(2048, 512)
self.layer_n1 = Node1(node1_cls=num_classes)
self.layer_n2 = Node2(node2_cls=3)
self.layer_n3 = Node3(node3_cls=2)
self.layer_dsn = nn.Sequential(nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1), BatchNorm2d(512), nn.ReLU(inplace=False), nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True))
def forward(self, x):
x_dsn = self.layer_dsn(x[(- 2)])
seg = self.layer5(x[(- 1)])
seg_node1 = self.layer_n1(seg, x[1], x[0])
seg_node2 = self.layer_n2(seg, x[1])
seg_node3 = self.layer_n3(seg, x[1])
return [seg_node1, seg_node2, seg_node3, x_dsn] |
def get_num_args(func):
params = inspect.signature(func).parameters
return (len(params) - ('self' in params)) |
def _graph_network_no_global_update(graph_tuple):
update_node_fn = (lambda n, se, re, g: n)
update_edge_fn = (lambda e, sn, rn, g: e)
update_global_fn = None
net = nn.GraphNetwork(update_edge_fn, update_node_fn, update_global_fn)
return net(graph_tuple) |
def test_isotropic_nfw_sigmar():
pot = potential.NFWPotential(amp=2.3, a=1.3)
dfp = isotropicNFWdf(pot=pot)
numpy.random.seed(10)
samp = dfp.sample(n=1000000)
tol = 0.08
check_sigmar_against_jeans(samp, pot, tol, rmin=(pot._scale / 10.0), rmax=(pot._scale * 10.0), bins=31)
return None |
_model
def dla60x(pretrained=None, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['dla60x']
model = DLA([1, 1, 1, 2, 3, 1], [16, 32, 128, 256, 512, 1024], block=DlaBottleneck, cardinality=32, base_width=4, num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model |
def load_tracked_dict(path, images, car_masks, semantics, backs, train_list):
for k in range(21):
frame_dir = (path + ('%02d/' % k))
if (not os.path.exists(frame_dir)):
continue
object_list = os.listdir(frame_dir)
object_list.sort()
object_ret = []
for o in range(len(object_list)):
object_dir = ((frame_dir + object_list[o]) + '/')
frames = glob.glob((object_dir + '*.png'))
frames.sort()
flag = check_valid_car_mask(frames, car_masks[k:(k + 9)])
if (flag is True):
train_list.append((images[k:(k + 9)], semantics[k:(k + 9)], backs[k:(k + 9)], frames))
return train_list |
class MetaModule(nn.Module):
def params(self):
for (name, param) in self.named_params(self):
(yield param)
def named_leaves(self):
return []
def named_submodules(self):
return []
def named_params(self, curr_module=None, memo=None, prefix=''):
if (memo is None):
memo = set()
if hasattr(curr_module, 'named_leaves'):
for (name, p) in curr_module.named_leaves():
if ((p is not None) and (p not in memo)):
memo.add(p)
(yield (((prefix + ('.' if prefix else '')) + name), p))
else:
for (name, p) in curr_module._parameters.items():
if ((p is not None) and (p not in memo)):
memo.add(p)
(yield (((prefix + ('.' if prefix else '')) + name), p))
for (mname, module) in curr_module.named_children():
submodule_prefix = ((prefix + ('.' if prefix else '')) + mname)
for (name, p) in self.named_params(module, memo, submodule_prefix):
(yield (name, p))
def update_params(self, lr_inner, first_order=False, source_params=None, detach=False):
if (source_params is not None):
for (tgt, src) in zip(self.named_params(self), source_params):
(name_t, param_t) = tgt
grad = src
if first_order:
grad = to_var(grad.detach().data)
tmp = (param_t - (lr_inner * grad))
self.set_param(self, name_t, tmp)
else:
for (name, param) in self.named_params(self):
if (not detach):
grad = param.grad
if first_order:
grad = to_var(grad.detach().data)
tmp = (param - (lr_inner * grad))
self.set_param(self, name, tmp)
else:
param = param.detach_()
self.set_param(self, name, param)
def set_param(self, curr_mod, name, param):
if ('.' in name):
n = name.split('.')
module_name = n[0]
rest = '.'.join(n[1:])
for (name, mod) in curr_mod.named_children():
if (module_name == name):
self.set_param(mod, rest, param)
break
else:
setattr(curr_mod, name, param)
def detach_params(self):
for (name, param) in self.named_params(self):
self.set_param(self, name, param.detach())
def copy(self, other, same_var=False):
for (name, param) in other.named_params():
if (not same_var):
param = to_var(param.data.clone(), requires_grad=True)
self.set_param(name, param) |
def weight_decay(model, decay=1e-05):
p1 = []
p2 = []
for (name, param) in model.named_parameters():
if (not param.requires_grad):
continue
if ((len(param.shape) == 1) or name.endswith('.bias')):
p1.append(param)
else:
p2.append(param)
return [{'params': p1, 'weight_decay': 0.0}, {'params': p2, 'weight_decay': decay}] |
class TestSelectionMethod(unittest.TestCase):
class MySelectionMethod(model_selection.SelectionMethod):
def run_acc(self, run_records):
return {'val_acc': run_records[0]['env0_out_acc'], 'test_acc': run_records[0]['env0_in_acc']}
def test_sweep_acc(self):
sweep_records = Q([make_record(0, 0, [(0.7, 0.8, True)]), make_record(0, 1, [(0.9, 0.5, True)])])
self.assertEqual(self.MySelectionMethod.sweep_acc(sweep_records), 0.7)
def test_sweep_acc_empty(self):
self.assertEqual(self.MySelectionMethod.sweep_acc(Q([])), None) |
def test_override(capture, msg):
class ExtendedExampleVirt(m.ExampleVirt):
def __init__(self, state):
super(ExtendedExampleVirt, self).__init__((state + 1))
self.data = 'Hello world'
def run(self, value):
print(('ExtendedExampleVirt::run(%i), calling parent..' % value))
return super(ExtendedExampleVirt, self).run((value + 1))
def run_bool(self):
print('ExtendedExampleVirt::run_bool()')
return False
def get_string1(self):
return 'override1'
def pure_virtual(self):
print(('ExtendedExampleVirt::pure_virtual(): %s' % self.data))
class ExtendedExampleVirt2(ExtendedExampleVirt):
def __init__(self, state):
super(ExtendedExampleVirt2, self).__init__((state + 1))
def get_string2(self):
return 'override2'
ex12 = m.ExampleVirt(10)
with capture:
assert (m.runExampleVirt(ex12, 20) == 30)
assert (capture == '\n Original implementation of ExampleVirt::run(state=10, value=20, str1=default1, str2=default2)\n ')
with pytest.raises(RuntimeError) as excinfo:
m.runExampleVirtVirtual(ex12)
assert (msg(excinfo.value) == 'Tried to call pure virtual function "ExampleVirt::pure_virtual"')
ex12p = ExtendedExampleVirt(10)
with capture:
assert (m.runExampleVirt(ex12p, 20) == 32)
assert (capture == '\n ExtendedExampleVirt::run(20), calling parent..\n Original implementation of ExampleVirt::run(state=11, value=21, str1=override1, str2=default2)\n ')
with capture:
assert (m.runExampleVirtBool(ex12p) is False)
assert (capture == 'ExtendedExampleVirt::run_bool()')
with capture:
m.runExampleVirtVirtual(ex12p)
assert (capture == 'ExtendedExampleVirt::pure_virtual(): Hello world')
ex12p2 = ExtendedExampleVirt2(15)
with capture:
assert (m.runExampleVirt(ex12p2, 50) == 68)
assert (capture == '\n ExtendedExampleVirt::run(50), calling parent..\n Original implementation of ExampleVirt::run(state=17, value=51, str1=override1, str2=override2)\n ')
cstats = ConstructorStats.get(m.ExampleVirt)
assert (cstats.alive() == 3)
del ex12, ex12p, ex12p2
assert (cstats.alive() == 0)
assert (cstats.values() == ['10', '11', '17'])
assert (cstats.copy_constructions == 0)
assert (cstats.move_constructions >= 0) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.