code stringlengths 101 5.91M |
|---|
def get_subdomain_xs(ds, scales):
xs = []
for (d, scale) in zip(ds, scales):
x = np.cumsum(np.pad(d, (1, 0)))
x = (((2 * (x - x.min())) / (x.max() - x.min())) - 1)
xs.append((scale * x))
return xs |
class CUB_Sentence_ft(VAE):
def __init__(self, params):
super(CUB_Sentence_ft, self).__init__(prior_dist=dist.Normal, likelihood_dist=FakeCategorical, post_dist=dist.Normal, enc=Enc(params.latent_dim), dec=Dec(params.latent_dim), params=params)
grad = {'requires_grad': params.learn_prior}
self._pz_params = nn.ParameterList([nn.Parameter(torch.zeros(1, params.latent_dim), requires_grad=False), nn.Parameter(torch.zeros(1, params.latent_dim), **grad)])
self.modelName = 'cubSft'
self.llik_scaling = 1.0
self.tie_modules()
self.fn_2i = (lambda t: t.cpu().numpy().astype(int))
self.fn_trun = (lambda s: (s[:(np.where((s == 2))[0][0] + 1)] if (2 in s) else s))
self.vocab_file = vocab_path
self.maxSentLen = maxSentLen
self.vocabSize = vocabSize
def tie_modules(self):
self.dec.toVocabSize.weight = self.enc.embedding.weight
def pz_params(self):
return (self._pz_params[0], (F.softplus(self._pz_params[1]) + Constants.eta))
def getDataLoaders(batch_size, shuffle=True, device='cuda'):
kwargs = ({'num_workers': 1, 'pin_memory': True} if (device == 'cuda') else {})
tx = (lambda data: torch.Tensor(data))
t_data = CUBSentences('../data', split='train', transform=tx, max_sequence_length=maxSentLen)
s_data = CUBSentences('../data', split='test', transform=tx, max_sequence_length=maxSentLen)
train_loader = DataLoader(t_data, batch_size=batch_size, shuffle=shuffle, **kwargs)
test_loader = DataLoader(s_data, batch_size=batch_size, shuffle=shuffle, **kwargs)
return (train_loader, test_loader)
def reconstruct(self, data, runPath, epoch):
recon = super(CUB_Sentence_ft, self).reconstruct(data[:8]).argmax(dim=(- 1)).squeeze()
(recon, data) = (self.fn_2i(recon), self.fn_2i(data[:8]))
(recon, data) = ([self.fn_trun(r) for r in recon], [self.fn_trun(d) for d in data])
i2w = self.load_vocab()
print('\n Reconstruction examples (excluding <PAD>):')
for (r_sent, d_sent) in zip(recon[:3], data[:3]):
print('[DATA] ==> {}'.format(' '.join((i2w[str(i)] for i in d_sent))))
print('[RECON] ==> {}\n'.format(' '.join((i2w[str(i)] for i in r_sent))))
with open('{}/recon_{:03d}.txt'.format(runPath, epoch), 'w+') as txt_file:
for (r_sent, d_sent) in zip(recon, data):
txt_file.write('[DATA] ==> {}\n'.format(' '.join((i2w[str(i)] for i in d_sent))))
txt_file.write('[RECON] ==> {}\n\n'.format(' '.join((i2w[str(i)] for i in r_sent))))
def generate(self, runPath, epoch):
(N, K) = (5, 4)
i2w = self.load_vocab()
samples = super(CUB_Sentence_ft, self).generate(N, K).argmax(dim=(- 1)).squeeze()
samples = samples.view(K, N, samples.size((- 1))).transpose(0, 1)
samples = [[self.fn_trun(s) for s in ss] for ss in self.fn_2i(samples)]
print('\n Generated examples (excluding <PAD>):')
for s_sent in samples[0][:3]:
print('[GEN] ==> {}'.format(' '.join((i2w[str(i)] for i in s_sent if (i != 0)))))
with open('{}/gen_samples_{:03d}.txt'.format(runPath, epoch), 'w+') as txt_file:
for s_sents in samples:
for s_sent in s_sents:
txt_file.write('{}\n'.format(' '.join((i2w[str(i)] for i in s_sent))))
txt_file.write('\n')
def analyse(self, data, runPath, epoch):
pass
def load_vocab(self):
if (not os.path.exists(self.vocab_file)):
(_, _) = self.getDataLoaders(256)
with open(self.vocab_file, 'r') as vocab_file:
vocab = json.load(vocab_file)
return vocab['i2w'] |
def RunKaldiCommand(command, wait=True):
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if wait:
[stdout, stderr] = p.communicate()
if (p.returncode is not 0):
raise Exception('There was an error while running the command {0}\n\n{1}'.format(command, stderr))
return (stdout, stderr)
else:
return p |
class Visualization(object):
def __init__(self, seq_info, update_ms):
image_shape = seq_info['image_size'][::(- 1)]
aspect_ratio = (float(image_shape[1]) / image_shape[0])
image_shape = (1024, int((aspect_ratio * 1024)))
self.viewer = ImageViewer(update_ms, image_shape, ('Figure %s' % seq_info['sequence_name']))
self.viewer.thickness = 2
self.frame_idx = seq_info['min_frame_idx']
self.last_idx = seq_info['max_frame_idx']
def run(self, frame_callback):
self.viewer.run((lambda : self._update_fun(frame_callback)))
def _update_fun(self, frame_callback):
if (self.frame_idx > self.last_idx):
return False
frame_callback(self, self.frame_idx)
self.frame_idx += 1
return True
def set_image(self, image):
self.viewer.image = image
def draw_groundtruth(self, track_ids, boxes):
self.viewer.thickness = 2
for (track_id, box) in zip(track_ids, boxes):
self.viewer.color = create_unique_color_uchar(track_id)
self.viewer.rectangle(*box.astype(np.int), label=str(track_id))
def draw_detections(self, detections):
self.viewer.thickness = 2
self.viewer.color = (0, 0, 255)
for (i, detection) in enumerate(detections):
self.viewer.rectangle(*detection.tlwh)
def draw_trackers(self, tracks):
self.viewer.thickness = 2
for track in tracks:
if ((not track.is_confirmed()) or (track.time_since_update > 0)):
continue
self.viewer.color = create_unique_color_uchar(track.track_id)
self.viewer.rectangle(*track.to_tlwh().astype(np.int), label=str(track.track_id)) |
class ImportTestCase(unittest.TestCase):
def test_import(self):
import dtw
from dtw import dtw as dist
from dtw import accelerated_dtw
def test_has_version(self):
from dtw import __version__ |
.parametrize('loader_parameters', [{'path_data': [str(Path(__data_testing_dir__, 'data_test_png_tif'))], 'target_suffix': ['_seg-myelin-manual'], 'extensions': ['.png', '.tif'], 'roi_params': {'suffix': None, 'slice_filter_roi': None}, 'contrast_params': {'contrast_lst': [], 'balance': {}}, 'slice_axis': 'axial', 'slice_filter_params': {'filter_empty_mask': False, 'filter_empty_input': True}, 'patch_filter_params': {'filter_empty_mask': False, 'filter_empty_input': False}, 'multichannel': False}])
.parametrize('model_parameters', [{'name': 'Unet', 'dropout_rate': 0.3, 'bn_momentum': 0.1, 'final_activation': 'sigmoid', 'depth': 3}])
def test_read_png_tif(download_data_testing_test_files, loader_parameters, model_parameters):
metadata = {}
metadata[MetadataKW.PIXEL_SIZE] = [0.07, 0.07]
metadata[MetadataKW.PIXEL_SIZE_UNITS] = 'um'
loader_parameters.update({LoaderParamsKW.MODEL_PARAMS: model_parameters})
bids_df = BidsDataframe(loader_parameters, __tmp_dir__, derivatives=False)
file_lst = bids_df.df['path'].tolist()
filename_pairs = [(file_lst, None, None, (metadata if isinstance(metadata, list) else [metadata]))]
slice_axis = imed_utils.AXIS_DCT[loader_parameters[LoaderParamsKW.SLICE_AXIS]]
ds = imed_loader_mri2dseg.MRI2DSegmentationDataset(filename_pairs, slice_axis=slice_axis, nibabel_cache=True, transform=[None, None], slice_filter_fn=None)
ds.load_filenames() |
def main():
for split in splits:
output_file = os.path.join(seg_dir, (split + '_align'), (split + '_word_align.tsv'))
origin_file = os.path.join(data_dir, (split + '_raw.tsv'))
output_table = load_df_from_tsv(output_file)
origin_table = load_df_from_tsv(origin_file)
concat_table = pd.merge(output_table, origin_table, on='id')
concat_dict = list(concat_table.T.to_dict().values())
pbar = tqdm.tqdm(range(len(concat_dict)))
final_dict = []
for value in concat_dict:
pbar.update()
new_text = replace(value['word_text'], value['src_text'])
if (new_text is not None):
value['word_text'] = new_text
final_dict.append(value)
final_table = pd.DataFrame.from_dict(final_dict)
save_df_to_tsv(final_table, os.path.join(data_dir, (split + '_raw_seg.tsv'))) |
class MarianTokenizer(PreTrainedTokenizer):
vocab_files_names = vocab_files_names
model_input_names = ['attention_mask']
language_code_re = re.compile('>>.+<<')
def __init__(self, vocab, source_spm, target_spm, source_lang=None, target_lang=None, unk_token='<unk>', eos_token='</s>', pad_token='<pad>', model_max_length=512, **kwargs):
super().__init__(model_max_length=model_max_length, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, **kwargs)
assert Path(source_spm).exists(), f'cannot find spm source {source_spm}'
self.encoder = load_json(vocab)
if (self.unk_token not in self.encoder):
raise KeyError('<unk> token must be in vocab')
assert (self.pad_token in self.encoder)
self.decoder = {v: k for (k, v) in self.encoder.items()}
self.source_lang = source_lang
self.target_lang = target_lang
self.supported_language_codes: list = [k for k in self.encoder if (k.startswith('>>') and k.endswith('<<'))]
self.spm_files = [source_spm, target_spm]
self.spm_source = load_spm(source_spm)
self.spm_target = load_spm(target_spm)
self.current_spm = self.spm_source
self._setup_normalizer()
def _setup_normalizer(self):
try:
from sacremoses import MosesPunctNormalizer
self.punc_normalizer = MosesPunctNormalizer(self.source_lang).normalize
except (ImportError, FileNotFoundError):
warnings.warn('Recommended: pip install sacremoses.')
self.punc_normalizer = (lambda x: x)
def normalize(self, x: str) -> str:
return (self.punc_normalizer(x) if x else '')
def _convert_token_to_id(self, token):
return self.encoder.get(token, self.encoder[self.unk_token])
def remove_language_code(self, text: str):
match = self.language_code_re.match(text)
code: list = ([match.group(0)] if match else [])
return (code, self.language_code_re.sub('', text))
def _tokenize(self, text: str) -> List[str]:
(code, text) = self.remove_language_code(text)
pieces = self.current_spm.EncodeAsPieces(text)
return (code + pieces)
def _convert_id_to_token(self, index: int) -> str:
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens: List[str]) -> str:
return self.spm_target.DecodePieces(tokens)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]:
if (token_ids_1 is None):
return (token_ids_0 + [self.eos_token_id])
return ((token_ids_0 + token_ids_1) + [self.eos_token_id])
_start_docstrings(PREPARE_SEQ2SEQ_BATCH_DOCSTRING)
def prepare_seq2seq_batch(self, src_texts: List[str], tgt_texts: Optional[List[str]]=None, max_length: Optional[int]=None, max_target_length: Optional[int]=None, return_tensors: str='pt', truncation=True, padding='longest', **unused) -> BatchEncoding:
if ('' in src_texts):
raise ValueError(f'found empty string in src_texts: {src_texts}')
self.current_spm = self.spm_source
src_texts = [self.normalize(t) for t in src_texts]
tokenizer_kwargs = dict(add_special_tokens=True, return_tensors=return_tensors, max_length=max_length, truncation=truncation, padding=padding)
model_inputs: BatchEncoding = self(src_texts, **tokenizer_kwargs)
if (tgt_texts is None):
return model_inputs
if (max_target_length is not None):
tokenizer_kwargs['max_length'] = max_target_length
self.current_spm = self.spm_target
model_inputs['labels'] = self(tgt_texts, **tokenizer_kwargs)['input_ids']
self.current_spm = self.spm_source
return model_inputs
def vocab_size(self) -> int:
return len(self.encoder)
def save_vocabulary(self, save_directory: str) -> Tuple[str]:
save_dir = Path(save_directory)
assert save_dir.is_dir(), f'{save_directory} should be a directory'
save_json(self.encoder, (save_dir / self.vocab_files_names['vocab']))
for (orig, f) in zip(['source.spm', 'target.spm'], self.spm_files):
dest_path = (save_dir / Path(f).name)
if (not dest_path.exists()):
copyfile(f, (save_dir / orig))
return tuple(((save_dir / f) for f in self.vocab_files_names))
def get_vocab(self) -> Dict:
vocab = self.encoder.copy()
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self) -> Dict:
state = self.__dict__.copy()
state.update({k: None for k in ['spm_source', 'spm_target', 'current_spm', 'punc_normalizer']})
return state
def __setstate__(self, d: Dict) -> None:
self.__dict__ = d
(self.spm_source, self.spm_target) = (load_spm(f) for f in self.spm_files)
self.current_spm = self.spm_source
self._setup_normalizer()
def num_special_tokens_to_add(self, **unused):
return 1
def _special_token_mask(self, seq):
all_special_ids = set(self.all_special_ids)
all_special_ids.remove(self.unk_token_id)
return [(1 if (x in all_special_ids) else 0) for x in seq]
def get_special_tokens_mask(self, token_ids_0: List, token_ids_1: Optional[List]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(token_ids_0)
elif (token_ids_1 is None):
return (self._special_token_mask(token_ids_0) + [1])
else:
return (self._special_token_mask((token_ids_0 + token_ids_1)) + [1]) |
def find_reference_section_no_title_generic(docbody, marker_patterns):
if (not docbody):
return None
ref_start_line = ref_line_marker = None
found_ref_sect = False
for (reversed_index, line) in enumerate(reversed(docbody)):
mark_match = regex_match_list(line.strip(), marker_patterns)
if (mark_match and (mark_match.group('marknum') == '1')):
mark_pattern = mark_match.re.pattern
next_test_lines = 10
index = (len(docbody) - reversed_index)
zone_to_check = docbody[index:(index + next_test_lines)]
if (len(zone_to_check) < 5):
found = True
else:
found = False
for line_ in zone_to_check:
mark_match2 = regex_match_list(line_.strip(), marker_patterns)
if (mark_match2 and (mark_match2.group('marknum') == '2')):
found = True
break
if found:
found_ref_sect = True
ref_start_line = ((len(docbody) - 1) - reversed_index)
ref_line_marker = mark_match.group('mark')
ref_line_marker_pattern = mark_pattern
break
if found_ref_sect:
ref_sectn_details = {'start_line': ref_start_line, 'title_string': None, 'marker': ref_line_marker.strip(), 'marker_pattern': ref_line_marker_pattern, 'title_marker_same_line': False}
else:
ref_sectn_details = None
return ref_sectn_details |
class CollabList(TabularList):
(_item_cls, _label_cls, _processor) = (CollabLine, FloatList, CollabProcessor)
def reconstruct(self, t: Tensor):
return CollabLine(tensor(t), tensor([]), self.classes, self.col_names) |
class EmptyCollectionException(RuntimeError):
def __init__(self):
super(EmptyCollectionException, self).__init__('The collection is empty') |
def create_model(use_selfatt=False, use_fc=False, dropout=None, stage1_weights=False, dataset=None, log_dir=None, test=False, *args):
print('Loading Scratch ResNet 10 Feature Model.')
resnet10 = ResNet(BasicBlock, [1, 1, 1, 1], use_modulatedatt=use_selfatt, use_fc=use_fc, dropout=None)
if (not test):
if stage1_weights:
assert dataset
print(('Loading %s Stage 1 ResNet 10 Weights.' % dataset))
if (log_dir is not None):
weight_dir = path.join('/'.join(log_dir.split('/')[:(- 1)]), 'stage1')
else:
weight_dir = ('./logs/%s/stage1' % dataset)
print(('==> Loading weights from %s' % weight_dir))
resnet10 = init_weights(model=resnet10, weights_path=path.join(weight_dir, 'final_model_checkpoint.pth'))
else:
print('No Pretrained Weights For Feature Model.')
return resnet10 |
def _union_lcs(evaluated_sentences, reference_sentence, prev_union=None):
if (prev_union is None):
prev_union = set()
if (len(evaluated_sentences) <= 0):
raise ValueError('Collections must contain at least 1 sentence.')
lcs_union = prev_union
prev_count = len(prev_union)
reference_words = _split_into_words([reference_sentence])
combined_lcs_length = 0
for eval_s in evaluated_sentences:
evaluated_words = _split_into_words([eval_s])
lcs = set(_recon_lcs(reference_words, evaluated_words))
combined_lcs_length += len(lcs)
lcs_union = lcs_union.union(lcs)
new_lcs_count = (len(lcs_union) - prev_count)
return (new_lcs_count, lcs_union) |
def get_trimmed_wordvec_vectors(filename, vocab):
with open(filename, 'r') as inFile:
inFile.readline()
dim = (len(inFile.readline().strip().split()) - 1)
embeddings = np.random.uniform((- 0.1), 0.1, size=((len(vocab) + 1), dim))
with open(filename, 'r') as inFile:
for line in inFile:
line = line.strip().split()
word = line[0]
if (word in vocab):
embeddings[vocab[word]] = np.array([float(item) for item in line[1:]])
return embeddings |
def _convert_to_bchar(in_path_prefix: str, src: str, tgt: str, out_path: str):
with open(out_path, 'w') as f_o:
for lang in [src, tgt]:
with open(f'{in_path_prefix}.{lang}') as f:
for s in f:
f_o.write((byte_encode(s.strip()) + '\n')) |
class OptimizerDict(dict):
def __init__(self, *args, **kwargs):
super(OptimizerDict, self).__init__(*args, **kwargs)
def state_dict(self):
return [optim.state_dict() for optim in self.values()]
def load_state_dict(self, state_dicts):
for (state_dict, optim) in zip(state_dicts, self.values()):
optim.load_state_dict(state_dict) |
class CombinedLoss(torch.nn.Module):
def __init__(self, criteria: Sequence[torch.nn.Module], weight: Optional[Sequence[float]]=None, device: Optional[torch.device]=None):
super().__init__()
self.criteria = torch.nn.ModuleList(criteria)
self.device = device
if (weight is None):
weight = torch.ones(len(criteria))
else:
weight = torch.as_tensor(weight, dtype=torch.float32)
assert (weight.shape == (len(criteria),))
self.register_buffer('weight', weight.to(self.device))
def forward(self, *args):
loss = torch.tensor(0.0, device=self.device)
for (crit, weight) in zip(self.criteria, self.weight):
loss += (weight * crit(*args))
return loss |
def data_load(filename, axisname, label):
datanumber = axisname.split('.')
if (eval(datanumber[0]) < 100):
realaxis = (('X0' + datanumber[0]) + axis[0])
else:
realaxis = (('X' + datanumber[0]) + axis[0])
fl = loadmat(filename)[realaxis]
fl = fl.reshape((- 1))
data = []
lab = []
(start, end) = (0, signal_size)
while (end <= (fl.shape[0] / 10)):
x = fl[start:end]
imgs = CWT((signal_size + 1), x)
data.append(imgs)
lab.append(label)
start += signal_size
end += signal_size
return (data, lab) |
def _build_non_max_suppressor(nms_config):
if ((nms_config.iou_threshold < 0) or (nms_config.iou_threshold > 1.0)):
raise ValueError('iou_threshold not in [0, 1.0].')
if (nms_config.max_detections_per_class > nms_config.max_total_detections):
raise ValueError('max_detections_per_class should be no greater than max_total_detections.')
non_max_suppressor_fn = functools.partial(post_processing.batch_multiclass_non_max_suppression, score_thresh=nms_config.score_threshold, iou_thresh=nms_config.iou_threshold, max_size_per_class=nms_config.max_detections_per_class, max_total_size=nms_config.max_total_detections)
return non_max_suppressor_fn |
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
class_num = {'cub': 200, 'cars': 196, 'dogs': 120, 'fgvc': 100}
if (args.gpu is not None):
print('Use GPU: {} for training'.format(args.gpu))
if args.distributed:
if ((args.dist_url == 'env://') and (args.rank == (- 1))):
args.rank = int(os.environ['RANK'])
if args.multiprocessing_distributed:
args.rank = ((args.rank * ngpus_per_node) + gpu)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
if (args.model == 'r18p'):
model = models.resnet18(pretrained=True)
model.fc = nn.Linear(in_features=2048, out_features=class_num[args.dataset], bias=True)
if (args.model == 'r18'):
model = models.resnet18()
model.fc = nn.Linear(in_features=2048, out_features=class_num[args.dataset], bias=True)
if (args.model == 'r50p'):
model = models.resnet50(pretrained=True)
model.fc = nn.Linear(in_features=2048, out_features=class_num[args.dataset], bias=True)
if (args.model == 'r50'):
model = models.resnet50()
model.fc = nn.Linear(in_features=2048, out_features=class_num[args.dataset], bias=True)
if args.distributed:
if (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
args.batch_size = int((args.batch_size / ngpus_per_node))
args.workers = int((args.workers / ngpus_per_node))
model = torch.nn.parallel.DistributedDataParallel(model)
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
elif (args.arch.startswith('alexnet') or args.arch.startswith('vgg')):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
if ((args.model == 'r50p') or (args.model == 'r50')):
new_param_ids = set(map(id, model.module.fc.parameters()))
base_params = [p for p in model.parameters() if (id(p) not in new_param_ids)]
param_groups_base = [{'params': base_params, 'lr_mult': 0.1}]
if ((args.model == 'r50p') or (args.model == 'r50')):
param_groups_new = [{'params': model.module.fc.parameters(), 'lr_mult': 1.0}]
if (args.alg == 'sgd'):
optimizer_base = torch.optim.SGD(param_groups_base, args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
optimizer_new = torch.optim.SGD(param_groups_new, args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if (args.alg == 'sgdGC'):
optimizer_base = SGD_GC(param_groups_base, args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
optimizer_new = SGD_GC(param_groups_new, args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
exp_lr_scheduler_new = lr_scheduler.MultiStepLR(optimizer_new, milestones=[50, 80], gamma=0.1)
exp_lr_scheduler_base = lr_scheduler.MultiStepLR(optimizer_base, milestones=[50, 80], gamma=0.1)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if (args.gpu is not None):
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(traindir, transforms.Compose([transforms.Resize(512), transforms.RandomHorizontalFlip(), transforms.CenterCrop(448), transforms.ToTensor(), normalize]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True)
val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(valdir, transforms.Compose([transforms.Resize(512), transforms.CenterCrop(448), transforms.ToTensor(), normalize])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True, drop_last=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train(train_loader, model, criterion, optimizer_base, optimizer_new, epoch, args)
exp_lr_scheduler_new.step()
exp_lr_scheduler_base.step()
acc1 = validate(val_loader, model, criterion, args)
is_best = (acc1 > best_acc1)
best_acc1 = max(acc1, best_acc1)
if ((not args.multiprocessing_distributed) or (args.multiprocessing_distributed and ((args.rank % ngpus_per_node) == 0))):
save_checkpoint({'epoch': (epoch + 1), 'arch': args.arch, 'state_dict': model.state_dict(), 'best_acc1': best_acc1}, is_best) |
_HEADS_REGISTRY.register()
class VanillaHungarianBBoxIOUTracker(BaseHungarianTracker):
def __init__(self, *, video_height: int, video_width: int, max_num_instances: int=200, max_lost_frame_count: int=0, min_box_rel_dim: float=0.02, min_instance_period: int=1, track_iou_threshold: float=0.5, **kwargs):
super().__init__(video_height=video_height, video_width=video_width, max_num_instances=max_num_instances, max_lost_frame_count=max_lost_frame_count, min_box_rel_dim=min_box_rel_dim, min_instance_period=min_instance_period)
self._track_iou_threshold = track_iou_threshold
def from_config(cls, cfg: CfgNode_):
assert ('VIDEO_HEIGHT' in cfg.TRACKER_HEADS)
assert ('VIDEO_WIDTH' in cfg.TRACKER_HEADS)
video_height = cfg.TRACKER_HEADS.get('VIDEO_HEIGHT')
video_width = cfg.TRACKER_HEADS.get('VIDEO_WIDTH')
max_num_instances = cfg.TRACKER_HEADS.get('MAX_NUM_INSTANCES', 200)
max_lost_frame_count = cfg.TRACKER_HEADS.get('MAX_LOST_FRAME_COUNT', 0)
min_box_rel_dim = cfg.TRACKER_HEADS.get('MIN_BOX_REL_DIM', 0.02)
min_instance_period = cfg.TRACKER_HEADS.get('MIN_INSTANCE_PERIOD', 1)
track_iou_threshold = cfg.TRACKER_HEADS.get('TRACK_IOU_THRESHOLD', 0.5)
return {'_target_': 'detectron2.tracking.vanilla_hungarian_bbox_iou_tracker.VanillaHungarianBBoxIOUTracker', 'video_height': video_height, 'video_width': video_width, 'max_num_instances': max_num_instances, 'max_lost_frame_count': max_lost_frame_count, 'min_box_rel_dim': min_box_rel_dim, 'min_instance_period': min_instance_period, 'track_iou_threshold': track_iou_threshold}
def build_cost_matrix(self, instances: Instances, prev_instances: Instances) -> np.ndarray:
assert ((instances is not None) and (prev_instances is not None))
iou_all = pairwise_iou(boxes1=instances.pred_boxes, boxes2=self._prev_instances.pred_boxes)
bbox_pairs = create_prediction_pairs(instances, self._prev_instances, iou_all, threshold=self._track_iou_threshold)
cost_matrix = np.full((len(instances), len(prev_instances)), LARGE_COST_VALUE)
return self.assign_cost_matrix_values(cost_matrix, bbox_pairs)
def assign_cost_matrix_values(self, cost_matrix: np.ndarray, bbox_pairs: List) -> np.ndarray:
for pair in bbox_pairs:
cost_matrix[pair['idx']][pair['prev_idx']] = (- 1)
return cost_matrix |
class dataset_it(Dataset):
def __init__(self, data_dir, input1, transform_1, queue_length=20, samples_per_volume=5, patch_size=128, num_workers=8, shuffle_subjects=True, shuffle_patches=True, sup=True, num_images=None):
super(dataset_it, self).__init__()
self.subjects_1 = []
image_dir_1 = ((data_dir + '/') + input1)
if sup:
mask_dir = (data_dir + '/mask')
for i in os.listdir(image_dir_1):
image_path_1 = os.path.join(image_dir_1, i)
if sup:
mask_path = os.path.join(mask_dir, i)
subject_1 = tio.Subject(image=tio.ScalarImage(image_path_1), mask=tio.LabelMap(mask_path), ID=i)
else:
subject_1 = tio.Subject(image=tio.ScalarImage(image_path_1), ID=i)
self.subjects_1.append(subject_1)
if (num_images is not None):
len_img_paths = len(self.subjects_1)
quotient = (num_images // len_img_paths)
remainder = (num_images % len_img_paths)
if (num_images <= len_img_paths):
self.subjects_1 = self.subjects_1[:num_images]
else:
rand_indices = torch.randperm(len_img_paths).tolist()
new_indices = rand_indices[:remainder]
self.subjects_1 = (self.subjects_1 * quotient)
self.subjects_1 += [self.subjects_1[i] for i in new_indices]
self.dataset_1 = tio.SubjectsDataset(self.subjects_1, transform=transform_1)
self.queue_train_set_1 = tio.Queue(subjects_dataset=self.dataset_1, max_length=queue_length, samples_per_volume=samples_per_volume, sampler=UniformSampler(patch_size), num_workers=num_workers, shuffle_subjects=shuffle_subjects, shuffle_patches=shuffle_patches) |
class CelebAHQValidation(FacesBase):
def __init__(self, size, keys=None):
super().__init__()
root = 'data/celebahq'
with open('data/celebahqvalidation.txt', 'r') as f:
relpaths = f.read().splitlines()
paths = [os.path.join(root, relpath) for relpath in relpaths]
self.data = NumpyPaths(paths=paths, size=size, random_crop=False)
self.keys = keys |
class ContextAdjustmentLayer(nn.Module):
def __init__(self, num_blocks=8, feature_dim=16, expansion=3):
super().__init__()
self.num_blocks = num_blocks
self.in_conv = nn.Conv2d(4, feature_dim, kernel_size=3, padding=1)
self.layers = nn.ModuleList([ResBlock(feature_dim, expansion) for _ in range(num_blocks)])
self.out_conv = nn.Conv2d(feature_dim, 1, kernel_size=3, padding=1)
self.occ_head = nn.Sequential(weight_norm(nn.Conv2d((1 + 3), feature_dim, kernel_size=3, padding=1)), weight_norm(nn.Conv2d(feature_dim, feature_dim, kernel_size=3, padding=1)), nn.ReLU(inplace=True), weight_norm(nn.Conv2d(feature_dim, feature_dim, kernel_size=3, padding=1)), weight_norm(nn.Conv2d(feature_dim, feature_dim, kernel_size=3, padding=1)), nn.ReLU(inplace=True), nn.Conv2d(feature_dim, 1, kernel_size=3, padding=1), nn.Sigmoid())
def forward(self, disp_raw: Tensor, occ_raw: Tensor, img: Tensor):
feat = self.in_conv(torch.cat([disp_raw, img], dim=1))
for layer in self.layers:
feat = layer(feat, disp_raw)
disp_res = self.out_conv(feat)
disp_final = (disp_raw + disp_res)
occ_final = self.occ_head(torch.cat([occ_raw, img], dim=1))
return (disp_final, occ_final) |
def writetextfile(data, filename):
with open(filename, 'w') as f:
f.writelines(data)
f.close() |
def test():
net = Net(nclass=23).cuda()
print(net)
x = Variable(torch.randn(1, 3, 224, 224)).cuda()
y = net(x)
print(y)
params = net.parameters()
sum = 0
for param in params:
sum += param.nelement()
print('Total params:', sum) |
class UnitTest(unittest.TestCase):
def setUp(self) -> None:
os.environ['IMAGE_SERVER_IP'] = 'test_server_ip'
return super().setUp()
def tearDown(self) -> None:
if os.path.exists('./MagicMock'):
shutil.rmtree('./MagicMock')
def test_find_GPS_image(self):
img_file_path = (('/intel-extension-for-transformers/' + 'intel_extension_for_transformers/neural_chat/tests') + '/ci/server/test_images/img_bird.JPG')
if os.path.exists(img_file_path):
res = find_GPS_image(img_file_path)
else:
res = find_GPS_image('./ci/server/test_images/img_bird.JPG')
self.assertIn('2019:06:18', res['date_information'])
self.assertEqual(122., res['GPS_information']['GPSLongitude'])
def test_generate_caption(self):
img_file_path = (('/intel-extension-for-transformers/' + 'intel_extension_for_transformers/neural_chat/tests') + '/ci/server/test_images/img_bird.JPG')
if os.path.exists(img_file_path):
res = generate_caption(img_file_path)
else:
res = generate_caption('./ci/server/test_images/img_bird.JPG')
self.assertIn('seagulls', res)
def test_image_byte64(self):
img_file_path = (('/intel-extension-for-transformers/' + 'intel_extension_for_transformers/neural_chat/tests') + '/ci/server/test_images/img_bird.JPG')
if os.path.exists(img_file_path):
img_b64 = image_to_byte64(img_file_path)
else:
img_b64 = image_to_byte64('./ci/server/test_images/img_bird.JPG')
self.assertIn('79qt3Sr5Y9utCKR//Z', str(img_b64))
img = byte64_to_image(img_b64)
self.assertIsInstance(img, Image.Image)
def test_transfer_xywh(self):
facial_area = {'x': 1, 'y': 2, 'w': 3, 'h': 4}
res = transfer_xywh(facial_area)
self.assertIn('1_2_3_4_', res) |
class HPOMixin():
FIT_KEYS = {'x', 'y', 'batch_size', 'epochs', 'verbose', 'callbacks', 'validation_split', 'validation_data', 'shuffle', 'class_weight', 'sample_weight', 'initial_epoch', 'steps_per_epoch', 'validation_steps', 'validation_batch_size', 'validation_freq', 'max_queue_size', 'workers', 'use_multiprocessing'}
TUNE_CREATE_KEYS = {'storage', 'sampler', 'sampler_kwargs', 'pruner', 'pruner_kwargs', 'study_name', 'load_if_exists', 'direction', 'directions'}
TUNE_RUN_KEYS = {'n_trials', 'timeout', 'n_jobs', 'catch', 'tune_callbacks', 'gc_after_trial', 'show_progress_bar'}
PROXYED_METHODS = ['predict', 'predict_on_batch', 'evaluate', 'test_on_batch', 'to_json', 'to_yaml', 'summary', 'save', 'save_spec', 'save_weights', 'get_layer']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.objective = None
self.study = None
self.tune_end = False
self._lazymodel = None
self.backend = create_hpo_backend()
def _fix_target_metric(self, target_metric, fit_kwargs):
compile_metrics = self.compile_kwargs.get('metrics', None)
if (target_metric is None):
if (fit_kwargs.get('validation_data', None) or fit_kwargs.get('validation_split', None)):
prefix = 'val_'
else:
prefix = ''
if (compile_metrics is None):
target_metric = (prefix + 'loss')
elif isinstance(compile_metrics, list):
target_metric = (prefix + str(compile_metrics[0]))
else:
target_metric = (prefix + str(compile_metrics))
elif isinstance(target_metric, list):
invalidInputError(False, 'multiple objective metric is not supported.')
else:
stripped_target_metric = _strip_val_prefix(target_metric)
if (compile_metrics is None):
if (stripped_target_metric not in ['loss', 'val_loss']):
invalidInputError(False, 'target metric is should be loss or val_loss if metrics is not provided in compile')
elif isinstance(compile_metrics, list):
target_not_in = (stripped_target_metric not in ['loss', 'val_loss'])
if ((stripped_target_metric not in compile_metrics) and target_not_in):
invalidInputError(False, 'invalid target metric')
else:
target_not_in = (stripped_target_metric not in ['loss', 'val_loss'])
if ((stripped_target_metric != compile_metrics) and target_not_in):
invalidInputError(False, 'invalid target metric')
return target_metric
def _create_objective(model_builder, target_metric, isprune, fit_kwargs, backend, report_method):
objective = Objective(model=model_builder, target_metric=target_metric, pruning=isprune, backend=backend, report_method=report_method, **fit_kwargs)
return objective
def _get_model_builder_args(self):
return {'model_init_args_func': self._model_init_args_func, 'model_init_args_func_kwargs': self._get_model_init_args_func_kwargs(), 'modelcls': self.model_class, 'compile_args': self.compile_args, 'compile_kwargs': self.compile_kwargs, 'backend': self.backend}
def _get_model_builder(model_init_args_func, model_init_args_func_kwargs, modelcls, compile_args, compile_kwargs, backend):
def model_builder(trial):
model = modelcls(**model_init_args_func(trial, **model_init_args_func_kwargs))
optimizer = compile_kwargs.get('optimizer', None)
if (optimizer and isinstance(optimizer, AutoObject)):
optimizer = backend.instantiate(trial, optimizer)
compile_kwargs['optimizer'] = optimizer
model.compile(*compile_args, **compile_kwargs)
return model
return model_builder
def _run_search_subproc(study, get_model_builder_func, get_model_builder_func_args, backend, target_metric, isprune, report_method, fit_kwargs, run_kwargs):
model_builder = get_model_builder_func(**get_model_builder_func_args)
objective = HPOMixin._create_objective(model_builder, target_metric, isprune, fit_kwargs, backend, report_method)
study.optimize(objective, **run_kwargs)
def _run_search_n_procs(self, isprune, n_procs=4):
subp_run_kwargs = copy.deepcopy(self.run_kwargs)
n_trials = subp_run_kwargs.get('n_trials', None)
if n_trials:
subp_n_trials = math.ceil((n_trials / n_procs))
subp_run_kwargs['n_trials'] = subp_n_trials
subp_kwargs = {'study': self.study, 'get_model_builder_func': self._get_model_builder, 'get_model_builder_func_args': self._get_model_builder_args(), 'backend': self.backend, 'target_metric': self.target_metric, 'isprune': isprune, 'report_method': self.report_method, 'fit_kwargs': self.fit_kwargs, 'run_kwargs': subp_run_kwargs}
run_parallel(func=self._run_search_subproc, kwargs=subp_kwargs, n_procs=n_procs)
def _run_search_n_threads(self, isprune, n_threads=1):
self.run_kwargs['n_jobs'] = n_threads
self._run_search(isprune)
def _run_search(self, isprune):
if (self.objective is None):
self.objective = self._create_objective(self._model_build, self.target_metric, isprune, self.fit_kwargs, self.backend, self.report_method)
self.study.optimize(self.objective, **self.run_kwargs)
def _prepare_report_method(mode, direction):
if (mode == 'auto'):
if (direction == 'maximize'):
mode = 'max'
else:
mode = 'min'
if (mode == 'max'):
return max
elif (mode == 'min'):
return min
elif (mode == 'last'):
return (lambda x: x[(- 1)])
else:
invalidInputError(False, 'mode is not recognized')
def search(self, resume=False, target_metric=None, n_parallels=1, target_metric_mode='last', **kwargs):
do_create = True
if resume:
if (('storage' not in kwargs.keys()) or (kwargs['storage'].strip() == '')):
if (self.study is None):
warnings.warn("A new study is created since there's no existing study to resume from.", UserWarning)
else:
do_create = False
if (('storage' in kwargs.keys()) and (kwargs['storage'].strip() == '')):
del kwargs['storage']
search_kwargs = (kwargs or {})
self.target_metric = self._fix_target_metric(target_metric, kwargs)
_validate_args(search_kwargs, self.target_metric, legal_keys=[HPOMixin.FIT_KEYS, HPOMixin.TUNE_CREATE_KEYS, HPOMixin.TUNE_RUN_KEYS])
(self.create_kwargs, self.run_kwargs, self.fit_kwargs) = _prepare_args(search_kwargs, HPOMixin.TUNE_CREATE_KEYS, HPOMixin.TUNE_RUN_KEYS, HPOMixin.FIT_KEYS, self.backend)
self.report_method = self._prepare_report_method(target_metric_mode, self.create_kwargs.get('direction', None))
if do_create:
self.study = _create_study(resume, self.create_kwargs, self.backend)
isprune = (True if self.create_kwargs.get('pruner', None) else False)
if (n_parallels and (n_parallels > 1)):
if (self.create_kwargs.get('storage', '').strip() == ''):
self._run_search_n_threads(isprune, n_threads=n_parallels)
else:
self._run_search_n_procs(isprune, n_procs=n_parallels)
else:
self._run_search(isprune)
self.tune_end = False
def search_summary(self):
return _search_summary(self.study)
def end_search(self, use_trial_id=(- 1)):
self._lazymodel = _end_search(study=self.study, model_builder=self._model_build, use_trial_id=use_trial_id)
self.tune_end = True
def compile(self, *args, **kwargs):
self.compile_args = args
self.compile_kwargs = kwargs
def fit(self, *args, **kwargs):
if (not self.tune_end):
self.end_search()
self._lazymodel.fit(*args, **kwargs)
def _model_compile(self, model, trial):
compile_args = copy.deepcopy(self.compile_args)
compile_kwargs = copy.deepcopy(self.compile_kwargs)
optimizer = compile_kwargs.get('optimizer', None)
if (optimizer and isinstance(optimizer, AutoObject)):
optimizer = self.backend.instantiate(trial, optimizer)
compile_kwargs['optimizer'] = optimizer
model.compile(*compile_args, **compile_kwargs)
def _model_build(self, trial):
modelcls = self.model_class
model = modelcls(**self._model_init_args(trial))
self._model_compile(model, trial)
return model
def _proxy(self, name, method, *args, **kwargs):
if (not self._lazymodel):
invalidInputError(False, (("Model is not actually built yet. Please call 'end_search' before calling '" + name) + "'"))
internal_m = getattr(self._lazymodel, name)
return internal_m(*args, **kwargs) |
class VeEvalDataset(VqaEvalDataset):
def __init__(self, *args, **kwargs):
super().__init__(3, *args, **kwargs) |
def test_ic_uni(model, data_loader, model_path=None, ic_type='spearman', verbose=False):
if model_path:
model.load_state_dict(torch.load(model_path))
model.eval()
loss_all = []
ic_all = []
for slc in tqdm(data_loader.iter_daily(), total=data_loader.daily_length):
(data, label, _, _) = data_loader.get(slc)
with torch.no_grad():
pred = model.predict(data)
mask = (~ torch.isnan(label))
pred = pred[mask]
label = label[mask]
loss = torch.mean(torch.log(torch.cosh((pred - label))))
if (ic_type == 'spearman'):
ic = spearman_corr(pred, label)
elif (ic_type == 'pearson'):
ic = pearson_corr(pred, label)
loss_all.append(loss.item())
ic_all.append(ic)
(loss, ic) = (np.mean(loss_all), np.mean(ic_all))
if verbose:
print('IC: ', ic)
return (loss, ic) |
def stdout(ts_or_cell_num: Union[(int, Timestamp)]) -> Optional[str]:
try:
cell_num = _to_cell_num(ts_or_cell_num)
captured = cells().at_counter(cell_num).captured_output
return (None if (captured is None) else str(captured.stdout))
except KeyError:
raise ValueError(('cell with counter %d has not yet executed' % cell_num)) |
def _format_entry(indent, entry):
color = ''
indent = (' ' * indent)
if entry.typechanged:
color = RED
elif entry.added:
color = GREEN
elif entry.modified:
color = BLUE
if (entry.key == '__doc__'):
color = GREY
doc_string = entry.value.replace('\n', ('\n' + indent))
assign = '{}"""{}"""'.format(indent, doc_string)
elif isinstance(entry, ConfigEntry):
assign = (((indent + entry.key) + ' = ') + PRINTER.pformat(entry.value))
else:
assign = ((indent + entry.key) + ':')
if entry.doc:
doc_string = (((GREY + '# ') + entry.doc) + ENDC)
if (len(assign) <= 35):
assign = '{:<35} {}'.format(assign, doc_string)
else:
assign += (' ' + doc_string)
end = (ENDC if color else '')
return ((color + assign) + end) |
_grad()
def compute_throughput(model, batch_size=128, resolution=224):
torch.cuda.empty_cache()
warmup_iters = 3
num_iters = 30
model.eval()
model.to('cuda')
timing = []
inputs = torch.randn(batch_size, 3, resolution, resolution, device='cuda')
for _ in range(warmup_iters):
model(inputs)
torch.cuda.synchronize()
for _ in range(num_iters):
start = time.time()
model(inputs)
torch.cuda.synchronize()
timing.append((time.time() - start))
timing = torch.as_tensor(timing, dtype=torch.float32)
return (batch_size / timing.mean()) |
class Adam(Optimizer):
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False, use_gc=False, gc_conv_only=False, gc_loc=False):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
if (not (0.0 <= weight_decay)):
raise ValueError('Invalid weight_decay value: {}'.format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
self.gc_loc = gc_loc
self.use_gc = use_gc
self.gc_conv_only = gc_conv_only
def __setstate__(self, state):
super(Adam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
_grad()
def step(self, closure=None):
loss = None
if (closure is not None):
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if amsgrad:
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
(beta1, beta2) = group['betas']
state['step'] += 1
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
if (group['weight_decay'] != 0):
grad = grad.add(p, alpha=group['weight_decay'])
if self.gc_loc:
grad = centralized_gradient(grad, use_gc=self.use_gc, gc_conv_only=self.gc_conv_only)
exp_avg.mul_(beta1).add_(grad, alpha=(1 - beta1))
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2))
if amsgrad:
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = (group['lr'] / bias_correction1)
G_grad = (exp_avg / denom)
if (self.gc_loc == False):
G_grad = centralized_gradient(G_grad, use_gc=self.use_gc, gc_conv_only=self.gc_conv_only)
p.add_(G_grad, alpha=(- step_size))
return loss |
def mriAdjointOp(rawdata, coilsens, mask):
return np.sum((ifft2c((rawdata * mask)) * np.conj(coilsens)), axis=0) |
def get_module_names(path_dir, exclude=None):
if (exclude is None):
exclude = _default_exclude
'Search a given `path_dir` and return all the modules contained inside except those in `exclude`'
files = sorted(path_dir.glob('*'), key=(lambda x: (x.is_dir(), x.name)), reverse=True)
res = [f'{path_dir.name}']
for f in files:
if (f.is_dir() and (f.name in exclude)):
continue
if any([f.name.endswith(ex) for ex in exclude]):
continue
if (f.suffix == '.py'):
res.append(f'{path_dir.name}.{f.stem}')
elif f.is_dir():
res += [f'{path_dir.name}.{name}' for name in get_module_names(f)]
return res |
class TreeLSTM_IO(object):
def __init__(self, hidden_tensor, order_tensor, order_count, dists_tensor, commitments_tensor, dropout_mask):
self.hidden = hidden_tensor
self.order = order_tensor
self.order_count = order_count
self.dists = dists_tensor
self.commitments = commitments_tensor
self.dropout_mask = dropout_mask |
class MetaTrainer(nn.Module):
def __init__(self, args, experiment_id, is_pretrained, new_words=False):
super(MetaTrainer, self).__init__()
self.update_lr = args.update_lr
self.meta_lr = args.meta_lr
self.n_way = args.n_way
self.k_spt = args.k_spt
self.k_qry = args.k_qry
self.task_num = args.task_num
self.update_step = args.update_step
self.update_step_test = args.update_step_test
self.prefix_length = args.prefix_length
self.seq_len = args.seq_len
self.device = set_device()
self.model_name = '{}.pt'.format(experiment_id)
self.log_file_path = (PATH + '/logs/log_{}.txt'.format(experiment_id))
self.model = MetaLearner(prefix_length=self.prefix_length, seq_len=self.seq_len, clip_model_type=args.clip_model_type, new_words=new_words)
if is_pretrained:
model_dict = torch.load((MODELS_PATH + 'coco_trained_model.pt'), map_location=torch.device(self.device))
self.model.mapper_net.load_state_dict(model_dict)
self.model.to(self.device)
self.meta_optim = optim.AdamW(self.model.mapper_net.parameters(), lr=self.meta_lr)
self.pad_token_id = self.model.gpt_tokenizer.eos_token_id
def forward(self, x_spt, y_spt, y_spt_mask, id_spt, x_qry, y_qry, y_qry_mask, id_qry):
task_num = x_spt.shape[0]
querysz = x_qry.shape[1]
losses_q = torch.zeros((self.update_step + 1)).to(self.device)
corrects = [0 for _ in range((self.update_step + 1))]
for i in range(task_num):
logits = self.model(x_spt[i], y_spt[i], y_spt_mask[i], list(self.model.mapper_net.parameters()), get_pred_tokens=False)
loss = F.cross_entropy(logits.reshape((- 1), len(self.model.gpt_tokenizer)), y_spt[i].flatten(), ignore_index=self.pad_token_id)
grad = torch.autograd.grad(loss, self.model.mapper_net.parameters())
fast_weights = list(map((lambda p: (p[1] - (self.update_lr * p[0]))), zip(grad, self.model.mapper_net.parameters())))
question = y_qry[i]
answer = y_qry[i]
with torch.no_grad():
(logits_q, pred_tokens) = self.model(x_qry[i], question, y_qry_mask[i], list(self.model.mapper_net.parameters()))
loss_q = F.cross_entropy(logits_q.reshape((- 1), len(self.model.gpt_tokenizer)), answer.flatten(), ignore_index=self.pad_token_id)
losses_q[0] += loss_q
correct = torch.eq(pred_tokens, answer).sum().item()
corrects[0] = (corrects[0] + correct)
with torch.no_grad():
(logits_q, pred_tokens) = self.model(x_qry[i], question, y_qry_mask[i], fast_weights)
loss_q = F.cross_entropy(logits_q.reshape((- 1), len(self.model.gpt_tokenizer)), answer.flatten(), ignore_index=self.pad_token_id)
losses_q[1] += loss_q
correct = torch.eq(pred_tokens, answer).sum().item()
corrects[1] = (corrects[1] + correct)
for k in range(1, self.update_step):
logits = self.model(x_spt[i], y_spt[i], y_spt_mask[i], fast_weights, get_pred_tokens=False)
loss = F.cross_entropy(logits.reshape((- 1), len(self.model.gpt_tokenizer)), y_spt[i].flatten(), ignore_index=self.pad_token_id)
grad = torch.autograd.grad(outputs=loss, inputs=fast_weights)
fast_weights = list(map((lambda p: (p[1] - (self.update_lr * p[0]))), zip(grad, fast_weights)))
(logits_q, pred_tokens) = self.model(x_qry[i], question, y_qry_mask[i], fast_weights)
loss_q = F.cross_entropy(logits_q.reshape((- 1), len(self.model.gpt_tokenizer)), answer.flatten(), ignore_index=self.pad_token_id)
losses_q[(k + 1)] += loss_q
with torch.no_grad():
correct = torch.eq(pred_tokens, answer).sum().item()
corrects[(k + 1)] = (corrects[(k + 1)] + correct)
loss_q = (torch.mean(losses_q[2:]) / task_num)
self.meta_optim.zero_grad()
loss_q.backward(inputs=list(self.model.mapper_net.parameters()))
nn.utils.clip_grad_norm_(self.model.mapper_net.parameters(), max_norm=1)
self.meta_optim.step()
accs = (np.array(corrects) / ((querysz * task_num) * self.seq_len))
losses_q_ = [round(loss.item(), 4) for loss in losses_q]
return (accs, losses_q_)
def finetunning(self, x_spt, y_spt, y_spt_mask, x_qry, y_qry, y_qry_mask, qry_answer, qry_img_id):
querysz = len(x_qry)
corrects = [0 for _ in range((self.update_step_test + 1))]
model = deepcopy(self.model)
logits = model(x_spt, y_spt, y_spt_mask, fast_weights=list(model.mapper_net.parameters()), get_pred_tokens=False)
loss = F.cross_entropy(logits.reshape((- 1), len(model.gpt_tokenizer)), y_spt.flatten(), ignore_index=self.pad_token_id)
grad = torch.autograd.grad(outputs=loss, inputs=model.mapper_net.parameters())
fast_weights = list(map((lambda p: (p[1] - (self.update_lr * p[0]))), zip(grad, model.mapper_net.parameters())))
with torch.no_grad():
(logits_q, pred_tokens) = model(x_qry, y_qry, y_qry_mask, fast_weights=list(model.mapper_net.parameters()))
correct = torch.eq(pred_tokens, qry_answer).sum().item()
corrects[0] = (corrects[0] + correct)
with torch.no_grad():
(logits_q, pred_tokens) = model(x_qry, y_qry, y_qry_mask, fast_weights=fast_weights)
correct = torch.eq(pred_tokens, qry_answer).sum().item()
corrects[1] = (corrects[1] + correct)
for k in range(1, self.update_step_test):
logits = model(x_spt, y_spt, y_spt_mask, fast_weights=fast_weights, get_pred_tokens=False)
loss = F.cross_entropy(logits.reshape((- 1), len(model.gpt_tokenizer)), y_spt.flatten(), ignore_index=self.pad_token_id)
grad = torch.autograd.grad(loss, fast_weights)
fast_weights = list(map((lambda p: (p[1] - (self.update_lr * p[0]))), zip(grad, fast_weights)))
(logits_q, pred_tokens) = model(x_qry, y_qry, y_qry_mask, fast_weights=fast_weights, get_pred_tokens=True)
with torch.no_grad():
correct = torch.eq(pred_tokens, qry_answer).sum().item()
corrects[(k + 1)] = (corrects[(k + 1)] + correct)
for idx in range(x_qry.shape[0]):
gt_answer = self.model.gpt_tokenizer.decode(qry_answer[idx], skip_special_tokens=True).strip()
pred_answer = self.model.gpt_tokenizer.decode(pred_tokens[idx], skip_special_tokens=True).strip()
write_data_to_txt(self.log_file_path, 'Img: {}, GT answer: {}, Pred. answer: {}\n'.format(qry_img_id, gt_answer, pred_answer))
del model
accs = (np.array(corrects) / (querysz * self.seq_len))
return accs
def save_mapper_model(self):
torch.save({'mapper_net': self.model.mapper_net.state_dict()}, os.path.join(MODELS_PATH, '{}'.format(self.model_name)))
print('Model saved on path {}'.format(MODELS_PATH))
def load_model(self):
model_dict = torch.load((MODELS_PATH + self.model_name), map_location=torch.device(self.device))
self.model.mapper_net.load_state_dict(model_dict['mapper_net'])
print('Model loaded from {}'.format(MODELS_PATH)) |
.parametrize('image', np.array([[[1, 1], [1, 1]], [[0, 0], [0, 0]]]))
def test_mse(image):
results = imed_metrics.mse(image, image)
assert (results == 0.0) |
def xyxy_to_xywh(xyxy_box):
(xmin, ymin, xmax, ymax) = xyxy_box
TO_REMOVE = 1
xywh_box = (xmin, ymin, ((xmax - xmin) + TO_REMOVE), ((ymax - ymin) + TO_REMOVE))
return xywh_box |
def conv_block_3(in_dim, out_dim, act_fn):
model = nn.Sequential(conv_block(in_dim, out_dim, act_fn), conv_block(out_dim, out_dim, act_fn), nn.Conv2d(out_dim, out_dim, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(out_dim))
return model |
def get_tl_line_values_from_file_contents(content, CRLF=True, LTRB=True, withTranscription=False, withConfidence=False, imWidth=0, imHeight=0, sort_by_confidences=True):
pointsList = []
transcriptionsList = []
confidencesList = []
lines = content.split(('\r\n' if CRLF else '\n'))
for line in lines:
line = line.replace('\r', '').replace('\n', '')
if (line != ''):
(points, confidence, transcription) = get_tl_line_values_gt(line, LTRB, withTranscription, withConfidence, imWidth, imHeight)
pointsList.append(points)
transcriptionsList.append(transcription)
confidencesList.append(confidence)
if (withConfidence and (len(confidencesList) > 0) and sort_by_confidences):
import numpy as np
sorted_ind = np.argsort((- np.array(confidencesList)))
confidencesList = [confidencesList[i] for i in sorted_ind]
pointsList = [pointsList[i] for i in sorted_ind]
transcriptionsList = [transcriptionsList[i] for i in sorted_ind]
return (pointsList, confidencesList, transcriptionsList) |
def _add_category_id_to_contiguous_id_maps_to_metadata(merged_categories: _MergedCategoriesT) -> None:
merged_categories_per_dataset = {}
for (contiguous_cat_id, cat_id) in enumerate(sorted(merged_categories.keys())):
for cat in merged_categories[cat_id]:
if (cat.dataset_name not in merged_categories_per_dataset):
merged_categories_per_dataset[cat.dataset_name] = defaultdict(list)
merged_categories_per_dataset[cat.dataset_name][cat_id].append((contiguous_cat_id, cat))
logger = logging.getLogger(__name__)
for (dataset_name, merged_categories) in merged_categories_per_dataset.items():
meta = MetadataCatalog.get(dataset_name)
if (not hasattr(meta, 'thing_classes')):
meta.thing_classes = []
meta.thing_dataset_id_to_contiguous_id = {}
meta.thing_dataset_id_to_merged_id = {}
else:
meta.thing_classes.clear()
meta.thing_dataset_id_to_contiguous_id.clear()
meta.thing_dataset_id_to_merged_id.clear()
logger.info(f'Dataset {dataset_name}: category ID to contiguous ID mapping:')
for (_cat_id, categories) in sorted(merged_categories.items()):
added_to_thing_classes = False
for (contiguous_cat_id, cat) in categories:
if (not added_to_thing_classes):
meta.thing_classes.append(cat.mapped_name)
added_to_thing_classes = True
meta.thing_dataset_id_to_contiguous_id[cat.id] = contiguous_cat_id
meta.thing_dataset_id_to_merged_id[cat.id] = cat.mapped_id
logger.info(f'{cat.id} ({cat.name}) -> {contiguous_cat_id}') |
class PruningCriterion():
def __init__(self, modules, config, pattern):
self.scores = {}
self.modules = modules
self.config = config
self.pattern = pattern
self.low_memory_usage = config['low_memory_usage']
def on_step_begin(self):
pass
def on_before_optimizer_step(self):
pass
def on_after_optimizer_step(self):
pass |
class DeepLabV3(SegmentationModel):
def __init__(self, task, encoder_name: str='resnet34', encoder_depth: int=5, encoder_weights: Optional[str]='imagenet', decoder_channels: int=256, in_channels: int=3, classes: int=1, activation: Optional[str]=None, upsampling: int=8, aux_params: Optional[dict]=None):
super().__init__()
self.encoder = get_encoder(task, encoder_name, in_channels=in_channels, depth=encoder_depth, weights=encoder_weights)
self.encoder.make_dilated(stage_list=[4, 5], dilation_list=[2, 4])
self.decoder = DeepLabV3Decoder(in_channels=self.encoder.out_channels[(- 1)], out_channels=decoder_channels)
self.segmentation_head = SegmentationHead(in_channels=self.decoder.out_channels, out_channels=classes, activation=activation, kernel_size=1, upsampling=upsampling)
if (aux_params is not None):
self.classification_head = ClassificationHead(in_channels=self.encoder.out_channels[(- 1)], **aux_params)
else:
self.classification_head = None |
def read_facet_specific_relevances(data_path, run_path, dataset, facet, method_name):
gold_fname = os.path.join(data_path, 'test-pid2anns-{:s}-{:s}.json'.format(dataset, facet))
ranked_fname = os.path.join(run_path, 'test-pid2pool-{:s}-{:s}-{:s}-ranked.json'.format(dataset, method_name, facet))
with codecs.open(gold_fname, 'r', 'utf-8') as fp:
pid2pool_source = json.load(fp)
num_query = len(pid2pool_source)
print('Gold query pids: {:d}'.format(num_query))
pid2rels_gold = {}
for (qpid, pool_rel) in pid2pool_source.items():
pool = pool_rel['cands']
cands_rels = pool_rel['relevance_adju']
pid2rels_gold['{:s}_{:s}'.format(qpid, facet)] = dict([(pid, rel) for (pid, rel) in zip(pool, cands_rels)])
with codecs.open(ranked_fname, 'r', 'utf-8') as fp:
pid2ranks = json.load(fp)
print('Valid ranked query pids: {:d}'.format(len(pid2ranks)))
qpid2rankedcand_relevances = {}
for (qpid, citranks) in pid2ranks.items():
candpids = [pid_score[0] for pid_score in citranks]
cand_relevances = [pid2rels_gold['{:s}_{:s}'.format(qpid, facet)][pid] for pid in candpids]
qpid2rankedcand_relevances['{:s}_{:s}'.format(qpid, facet)] = cand_relevances
return qpid2rankedcand_relevances |
class LaikagoPose(object):
abduction_angle_0 = attr.ib(type=float, default=0)
hip_angle_0 = attr.ib(type=float, default=0)
knee_angle_0 = attr.ib(type=float, default=0)
abduction_angle_1 = attr.ib(type=float, default=0)
hip_angle_1 = attr.ib(type=float, default=0)
knee_angle_1 = attr.ib(type=float, default=0)
abduction_angle_2 = attr.ib(type=float, default=0)
hip_angle_2 = attr.ib(type=float, default=0)
knee_angle_2 = attr.ib(type=float, default=0)
abduction_angle_3 = attr.ib(type=float, default=0)
hip_angle_3 = attr.ib(type=float, default=0)
knee_angle_3 = attr.ib(type=float, default=0) |
class GenerationRunner(BaseRunner):
def __init__(self, model: PromptForGeneration, config: CfgNode=None, train_dataloader: Optional[PromptDataLoader]=None, valid_dataloader: Optional[PromptDataLoader]=None, test_dataloader: Optional[PromptDataLoader]=None):
super().__init__(model=model, config=config, train_dataloader=train_dataloader, valid_dataloader=valid_dataloader, test_dataloader=test_dataloader)
def inference_step(self, batch, batch_idx):
target = batch['tgt_text']
(_, pred) = self.model.generate(batch, **self.config.generation)
return (pred, target)
def inference_epoch_end(self, split, outputs):
preds = []
targets = []
for (pred, target) in outputs:
preds.extend(pred)
targets.extend(target)
self.save_results(split, {'preds': preds, 'targets': targets})
metrics = OrderedDict()
for metric_name in self.config.generation.metric:
metric = generation_metric(preds, targets, metric_name)
metrics[metric_name] = metric
return metrics
def training_step(self, batch, batch_idx):
loss = self.model(batch)
return loss |
def update_config(config_file):
exp_config = None
with open(config_file) as f:
exp_config = edict(yaml.load(f, Loader=yaml.SafeLoader))
for (k, v) in exp_config.items():
if (k in config):
if isinstance(v, dict):
if (k == 'TRAIN'):
if ('BBOX_WEIGHTS' in v):
v['BBOX_WEIGHTS'] = np.array(v['BBOX_WEIGHTS'])
elif (k == 'network'):
if ('PIXEL_MEANS' in v):
v['PIXEL_MEANS'] = np.array(v['PIXEL_MEANS'])
for (vk, vv) in v.items():
config[k][vk] = vv
elif (k == 'SCALES'):
config[k][0] = tuple(v)
else:
config[k] = v
else:
raise ValueError('key must exist in config.py') |
def parse_args():
parser = argparse.ArgumentParser(description='Simple example of a training script.')
parser.add_argument('--dataset_name', type=str, default=None, help='The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private, dataset). It can also be a path pointing to a local copy of a dataset in your filesystem, or to a folder containing files that HF Datasets can understand.')
parser.add_argument('--dataset_config_name', type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.")
parser.add_argument('--model_config_name_or_path', type=str, default=None, help='The config of the UNet model to train, leave as None to use standard DDPM configuration.')
parser.add_argument('--train_data_dir', type=str, default=None, help='A folder containing the training data. Folder contents must follow the structure described in In particular, a `metadata.jsonl` file must exist to provide the captions for the images. Ignored if `dataset_name` is specified.')
parser.add_argument('--output_dir', type=str, default='ddpm-model-64', help='The output directory where the model predictions and checkpoints will be written.')
parser.add_argument('--overwrite_output_dir', action='store_true')
parser.add_argument('--cache_dir', type=str, default=None, help='The directory where the downloaded models and datasets will be stored.')
parser.add_argument('--resolution', type=int, default=64, help='The resolution for input images, all the images in the train/validation dataset will be resized to this resolution')
parser.add_argument('--center_crop', default=False, action='store_true', help='Whether to center crop the input images to the resolution. If not set, the images will be randomly cropped. The images will be resized to the resolution first before cropping.')
parser.add_argument('--random_flip', default=False, action='store_true', help='whether to randomly flip images horizontally')
parser.add_argument('--train_batch_size', type=int, default=16, help='Batch size (per device) for the training dataloader.')
parser.add_argument('--eval_batch_size', type=int, default=16, help='The number of images to generate for evaluation.')
parser.add_argument('--dataloader_num_workers', type=int, default=0, help='The number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process.')
parser.add_argument('--num_epochs', type=int, default=100)
parser.add_argument('--save_images_epochs', type=int, default=10, help='How often to save images during training.')
parser.add_argument('--save_model_epochs', type=int, default=10, help='How often to save the model during training.')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--learning_rate', type=float, default=0.0001, help='Initial learning rate (after the potential warmup period) to use.')
parser.add_argument('--lr_scheduler', type=str, default='cosine', help='The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"]')
parser.add_argument('--lr_warmup_steps', type=int, default=500, help='Number of steps for the warmup in the lr scheduler.')
parser.add_argument('--adam_beta1', type=float, default=0.95, help='The beta1 parameter for the Adam optimizer.')
parser.add_argument('--adam_beta2', type=float, default=0.999, help='The beta2 parameter for the Adam optimizer.')
parser.add_argument('--adam_weight_decay', type=float, default=1e-06, help='Weight decay magnitude for the Adam optimizer.')
parser.add_argument('--adam_epsilon', type=float, default=1e-08, help='Epsilon value for the Adam optimizer.')
parser.add_argument('--use_ema', action='store_true', help='Whether to use Exponential Moving Average for the final model weights.')
parser.add_argument('--ema_inv_gamma', type=float, default=1.0, help='The inverse gamma value for the EMA decay.')
parser.add_argument('--ema_power', type=float, default=(3 / 4), help='The power value for the EMA decay.')
parser.add_argument('--ema_max_decay', type=float, default=0.9999, help='The maximum decay magnitude for EMA.')
parser.add_argument('--push_to_hub', action='store_true', help='Whether or not to push the model to the Hub.')
parser.add_argument('--hub_token', type=str, default=None, help='The token to use to push to the Model Hub.')
parser.add_argument('--hub_model_id', type=str, default=None, help='The name of the repository to keep in sync with the local `output_dir`.')
parser.add_argument('--hub_private_repo', action='store_true', help='Whether or not to create a private repository.')
parser.add_argument('--logger', type=str, default='tensorboard', choices=['tensorboard', 'wandb'], help='Whether to use [tensorboard]( or [wandb]( for experiment tracking and logging of model metrics and model checkpoints')
parser.add_argument('--logging_dir', type=str, default='logs', help='[TensorBoard]( log directory. Will default to *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***.')
parser.add_argument('--local_rank', type=int, default=(- 1), help='For distributed training: local_rank')
parser.add_argument('--mixed_precision', type=str, default='no', choices=['no', 'fp16', 'bf16'], help='Whether to use mixed precision. Choosebetween fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.and an Nvidia Ampere GPU.')
parser.add_argument('--prediction_type', type=str, default='epsilon', choices=['epsilon', 'sample'], help="Whether the model should predict the 'epsilon'/noise error or directly the reconstructed image 'x0'.")
parser.add_argument('--ddpm_num_steps', type=int, default=1000)
parser.add_argument('--ddpm_num_inference_steps', type=int, default=1000)
parser.add_argument('--ddpm_beta_schedule', type=str, default='linear')
parser.add_argument('--checkpointing_steps', type=int, default=500, help='Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming training using `--resume_from_checkpoint`.')
parser.add_argument('--checkpoints_total_limit', type=int, default=None, help='Max number of checkpoints to store.')
parser.add_argument('--resume_from_checkpoint', type=str, default=None, help='Whether training should be resumed from a previous checkpoint. Use a path saved by `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.')
parser.add_argument('--enable_xformers_memory_efficient_attention', action='store_true', help='Whether or not to use xformers.')
args = parser.parse_args()
env_local_rank = int(os.environ.get('LOCAL_RANK', (- 1)))
if ((env_local_rank != (- 1)) and (env_local_rank != args.local_rank)):
args.local_rank = env_local_rank
if ((args.dataset_name is None) and (args.train_data_dir is None)):
raise ValueError('You must specify either a dataset name from the hub or a train data directory.')
return args |
def train_fine(epoch):
coarse_model.eval()
fine_model.train()
train_fine_loss = 0
for (batch_idx, data) in enumerate(train_loader):
(rgb, depth) = (torch.tensor(data['image'].cuda(), requires_grad=True), torch.tensor(data['depth'].cuda(), requires_grad=True))
fine_optimizer.zero_grad()
coarse_output = coarse_model(rgb.type(dtype))
output = fine_model(rgb.type(dtype), coarse_output.type(dtype))
loss = custom_loss_function(output, depth)
loss.backward()
fine_optimizer.step()
train_fine_loss += loss.item()
train_fine_loss /= (batch_idx + 1)
return train_fine_loss |
def add_entry(data, model_key, row, headers):
ep = row[headers[EPOCH]]
tep = row[headers[TOTAL_EPOCHS]]
f1 = [row[headers[TEST_F1_NAME]], 'f1']
acc = [row[headers[TEST_ACC_NAME]], 'acc']
pc = [row[headers[TEST_PR_NAME]], 'precision']
rc = [row[headers[TEST_RC_NAME]], 'recall']
entries = [acc, pc, rc, f1]
if (not (model_key in data)):
tmp = {}
for key in headers.keys():
tmp[key] = row[headers[key]]
data[model_key] = {'full_entry': tmp}
for e in entries:
if (pd.isnull(e[0]) or (e[0] == '')):
continue
else:
data[model_key][e[1]] = e[0]
' \n for l in loss:\n try: \n tep = int(tep)\n ep = int(ep)\n except:\n continue\n \n if (pd.isnull(l[0]) or l[0] == ""):\n continue\n \n if not(l[1] in data[model_key]):\n data[model_key][l[1]] = [0] * int(tep)\n \n data[model_key][l[1]][int(ep)] = l[0]\n '
return data |
def get_cot_prompt(data: dict, backbone: str):
if (backbone == 'gpt4'):
system_message = math_prompt.GPT4_COT_SYSTEM
user_message = math_prompt.GPT4_COT_USER
assistant_message = math_prompt.GPT4_COT_ASSISTANT
elif (backbone == 'chatgpt'):
system_message = math_prompt.TURBO_COT_SYSTEM
user_message = math_prompt.TURBO_COT_USER
assistant_message = math_prompt.TURBO_COT_ASSISTANT
messages = get_user_assistant_messages(system_message, user_message, assistant_message)
question_message = data['question']
messages += [{'role': 'user', 'content': f'Question: {question_message}'}]
return messages |
class DiscriminatorMnistSN(object):
def __init__(self, z_dim=100, size=28):
self.name = 'DiscriminatorMnistSN'
self.z_dim = z_dim
self.size = size
def __call__(self, inputs, y, is_training=True, reuse=False):
with tf.variable_scope(self.name) as scope:
if reuse:
scope.reuse_variables()
out = lrelu(conv(inputs, channels=32, kernel=5, stride=1, pad=2, sn=True, scope='conv_1'))
out = lrelu(conv(out, channels=128, kernel=5, stride=2, pad=2, sn=True, scope='conv_2'))
out = lrelu(conv(out, channels=256, kernel=5, stride=2, pad=2, sn=True, scope='conv_3'))
out = lrelu(conv(out, channels=256, kernel=5, stride=2, pad=2, sn=True, scope='conv_4'))
out = tcl.flatten(out)
out = lrelu(fully_connected(out, 512, sn=True, scope='fc_8'))
y_proj = fully_connected(y, 512, use_bias=False, sn=True, scope='y_proj')
y_proj = tf.reduce_sum((y_proj * out), axis=1, keep_dims=True)
d = fully_connected(out, 1, sn=True, scope='fc_9')
return (d + y_proj)
def vars(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name) |
def get_event_given_entity_id(source_data, entity_id, id_pos=1):
events = filter((lambda x: (x[id_pos] == entity_id)), source_data)
return events |
class NetworkOnly():
def __init__(self, policy, env, max_depth_dict):
self.policy = policy
self.env = env
self.stop_index = env.programs_library['STOP']['index']
self.max_depth_dict = max_depth_dict
self.clean_sub_executions = True
def play(self, task_index):
programs_called = []
task_level = self.env.get_program_level_from_index(task_index)
max_depth = self.max_depth_dict[task_level]
depth = 0
observation = self.env.start_task(task_index)
(h, c) = self.policy.init_tensors()
while (self.clean_sub_executions and (depth <= max_depth)):
mask = self.env.get_mask_over_actions(task_index)
(priors, value, h, c) = self.policy.forward_once(observation, task_index, h, c)
priors = (priors * torch.FloatTensor(mask))
priors = torch.squeeze(priors)
program_index = torch.argmax(priors).item()
program_name = self.env.get_program_from_index(program_index)
programs_called.append(program_name)
depth += 1
if (program_name == 'STOP'):
break
elif (self.env.programs_library[program_name]['level'] == 0):
observation = self.env.act(program_name)
else:
(r, _) = self.play(task_index=program_index)
if (r < 1.0):
self.clean_sub_executions = False
observation = self.env.get_observation()
if (depth <= max_depth):
reward = self.env.get_reward()
else:
reward = 0.0
self.env.end_task()
return (reward, programs_called) |
def getDrivingMetas(root, data_type='clean'):
Metas = []
imgDir = (('driving/frames_' + data_type) + 'pass')
dispDir = 'driving/disparity'
focalLengthDirs = os.listdir(osp.join(root, dispDir))
for focalLengthDir in focalLengthDirs:
wardDirs = os.listdir(osp.join(root, dispDir, focalLengthDir))
for wardDir in wardDirs:
speedDirs = os.listdir(osp.join(root, dispDir, focalLengthDir, wardDir))
for speedDir in speedDirs:
dispNames = os.listdir(osp.join(root, dispDir, focalLengthDir, wardDir, speedDir, 'left'))
imgNames = ['{}.png'.format(name.split('.')[0]) for name in dispNames]
for (imgName, dispName) in zip(imgNames, dispNames):
meta = dict(left_image_path=osp.join(imgDir, focalLengthDir, wardDir, speedDir, 'left', imgName), right_image_path=osp.join(imgDir, focalLengthDir, wardDir, speedDir, 'right', imgName), left_disp_map_path=osp.join(dispDir, focalLengthDir, wardDir, speedDir, 'left', dispName), right_disp_map_path=osp.join(dispDir, focalLengthDir, wardDir, speedDir, 'right', dispName))
Metas.append(meta)
return Metas |
class Cell(nn.Module):
def __init__(self, steps, block_multiplier, prev_prev_fmultiplier, prev_fmultiplier_down, prev_fmultiplier_same, prev_fmultiplier_up, filter_multiplier):
super(Cell, self).__init__()
self.C_in = (block_multiplier * filter_multiplier)
self.C_out = filter_multiplier
self.C_prev_prev = int((prev_prev_fmultiplier * block_multiplier))
self._prev_fmultiplier_same = prev_fmultiplier_same
if (prev_fmultiplier_down is not None):
self.C_prev_down = int((prev_fmultiplier_down * block_multiplier))
self.preprocess_down = ConvBR(self.C_prev_down, self.C_out, 1, 1, 0)
if (prev_fmultiplier_same is not None):
self.C_prev_same = int((prev_fmultiplier_same * block_multiplier))
self.preprocess_same = ConvBR(self.C_prev_same, self.C_out, 1, 1, 0)
if (prev_fmultiplier_up is not None):
self.C_prev_up = int((prev_fmultiplier_up * block_multiplier))
self.preprocess_up = ConvBR(self.C_prev_up, self.C_out, 1, 1, 0)
if (prev_prev_fmultiplier != (- 1)):
self.pre_preprocess = ConvBR(self.C_prev_prev, self.C_out, 1, 1, 0)
self._steps = steps
self.block_multiplier = block_multiplier
self._ops = nn.ModuleList()
for i in range(self._steps):
for j in range((2 + i)):
stride = 1
if ((prev_prev_fmultiplier == (- 1)) and (j == 0)):
op = None
else:
op = MixedOp(self.C_out, stride)
self._ops.append(op)
self._initialize_weights()
def scale_dimension(self, dim, scale):
assert isinstance(dim, int)
return (int((((float(dim) - 1.0) * scale) + 1.0)) if (dim % 2) else int((dim * scale)))
def prev_feature_resize(self, prev_feature, mode):
if (mode == 'down'):
feature_size_d = self.scale_dimension(prev_feature.shape[2], 0.5)
feature_size_h = self.scale_dimension(prev_feature.shape[3], 0.5)
feature_size_w = self.scale_dimension(prev_feature.shape[4], 0.5)
elif (mode == 'up'):
feature_size_d = self.scale_dimension(prev_feature.shape[2], 2)
feature_size_h = self.scale_dimension(prev_feature.shape[3], 2)
feature_size_w = self.scale_dimension(prev_feature.shape[4], 2)
return F.interpolate(prev_feature, (feature_size_d, feature_size_h, feature_size_w), mode='trilinear', align_corners=True)
def forward(self, s0, s1_down, s1_same, s1_up, n_alphas):
if (s1_down is not None):
s1_down = self.prev_feature_resize(s1_down, 'down')
s1_down = self.preprocess_down(s1_down)
(size_d, size_h, size_w) = (s1_down.shape[2], s1_down.shape[3], s1_down.shape[4])
if (s1_same is not None):
s1_same = self.preprocess_same(s1_same)
(size_d, size_h, size_w) = (s1_same.shape[2], s1_same.shape[3], s1_same.shape[4])
if (s1_up is not None):
s1_up = self.prev_feature_resize(s1_up, 'up')
s1_up = self.preprocess_up(s1_up)
(size_d, size_h, size_w) = (s1_up.shape[2], s1_up.shape[3], s1_up.shape[4])
all_states = []
if (s0 is not None):
s0 = (F.interpolate(s0, (size_d, size_h, size_w), mode='trilinear', align_corners=True) if ((s0.shape[3] != size_h) or (s0.shape[4] != size_w) or (s0.shape[2] != size_d)) else s0)
s0 = (self.pre_preprocess(s0) if (s0.shape[1] != self.C_out) else s0)
if (s1_down is not None):
states_down = [s0, s1_down]
all_states.append(states_down)
if (s1_same is not None):
states_same = [s0, s1_same]
all_states.append(states_same)
if (s1_up is not None):
states_up = [s0, s1_up]
all_states.append(states_up)
else:
if (s1_down is not None):
states_down = [0, s1_down]
all_states.append(states_down)
if (s1_same is not None):
states_same = [0, s1_same]
all_states.append(states_same)
if (s1_up is not None):
states_up = [0, s1_up]
all_states.append(states_up)
final_concates = []
for states in all_states:
offset = 0
for i in range(self._steps):
new_states = []
for (j, h) in enumerate(states):
branch_index = (offset + j)
if (self._ops[branch_index] is None):
continue
new_state = self._ops[branch_index](h, n_alphas[branch_index])
new_states.append(new_state)
s = sum(new_states)
offset += len(states)
states.append(s)
concat_feature = torch.cat(states[(- self.block_multiplier):], dim=1)
final_concates.append(concat_feature)
return final_concates
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm3d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0) |
def get_jitters(f0_contour, p_floor=0.0001, p_ceil=0.02, max_p_factor=1.3):
local_absolute_jitter = get_local_absolute_jitter(f0_contour, p_floor, p_ceil, max_p_factor)
local_jitter = get_local_jitter(f0_contour, p_floor, p_ceil, max_p_factor)
rap_jitter = get_rap_jitter(f0_contour, p_floor, p_ceil, max_p_factor)
ppq5_jitter = get_ppq5_jitter(f0_contour, p_floor, p_ceil, max_p_factor)
ddp_jitter = get_ddp_jitter(f0_contour, p_floor, p_ceil, max_p_factor)
jitters_dict = {'localJitter': local_jitter, 'localabsoluteJitter': local_absolute_jitter, 'rapJitter': rap_jitter, 'ppq5Jitter': ppq5_jitter, 'ddpJitter': ddp_jitter}
return jitters_dict |
def model(dataloaders_with_covariates):
dataset = dataloaders_with_covariates['train'].dataset
net = TemporalFusionTransformer.from_dataset(dataset, learning_rate=0.15, hidden_size=4, attention_head_size=1, dropout=0.2, hidden_continuous_size=2, loss=PoissonLoss(), output_size=1, log_interval=5, log_val_interval=1, log_gradient_flow=True)
return net |
class StringIndex(Table):
def __init__(self, df: 'SparkDataFrame', col_name: str) -> None:
super().__init__(df)
cols = df.columns
invalidInputError((len(cols) >= 2), 'StringIndex should have >= 2 columns: col_name, id and other columns')
invalidInputError(('id' in cols), 'id should be a column of the DataFrame')
invalidInputError((col_name in cols), (col_name + ' should be a column of the DataFrame'))
self.col_name = col_name
def read_parquet(cls, paths: Union[(str, List[str])], col_name: Optional[str]=None) -> 'StringIndex':
if (not isinstance(paths, list)):
paths = [paths]
if ((col_name is None) and (len(paths) >= 1)):
col_name = os.path.basename(paths[0]).split('.')[0]
return cls(Table._read_parquet(paths), col_name)
def from_dict(cls, indices: Dict[(str, int)], col_name: str) -> 'StringIndex':
spark = OrcaContext.get_spark_session()
if (not isinstance(indices, dict)):
invalidInputError(False, ('indices should be dict, but get ' + indices.__class__.__name__))
if (not col_name):
invalidInputError(False, 'col_name should be str, but get None')
if (not isinstance(col_name, str)):
invalidInputError(False, ('col_name should be str, but get ' + col_name.__class__.__name__))
indices = map((lambda x: {col_name: x[0], 'id': x[1]}), indices.items())
schema = StructType([StructField(col_name, StringType(), False), StructField('id', IntegerType(), False)])
df = spark.createDataFrame((Row(**x) for x in indices), schema=schema)
return cls(df, col_name)
def to_dict(self) -> Dict[(str, int)]:
cols = self.df.columns
index_id = cols.index('id')
col_id = cols.index(self.col_name)
rows = self.df.collect()
res_dict = {}
for row in rows:
res_dict[row[col_id]] = row[index_id]
return res_dict
def write_parquet(self, path: str, mode: str='overwrite') -> None:
path = (((path + '/') + self.col_name) + '.parquet')
write_parquet(self.df, path, mode)
def cast(self, columns: str, dtype: str) -> 'StringIndex':
df_cast = super().cast(columns, dtype)
return StringIndex(df_cast.df, self.col_name) |
def generate_unroll(env: envs.Env, env_state: envs.State, policy: Policy, key: PRNGKey, unroll_length: int, extra_fields: Sequence[str]=()) -> Tuple[(envs.State, Transition)]:
def f(carry, unused_t):
(state, current_key) = carry
(current_key, next_key) = jax.random.split(current_key)
(nstate, qp) = actor_step(env, state, policy, current_key, extra_fields=extra_fields)
return ((nstate, next_key), qp)
((final_state, _), qp_list) = jax.lax.scan(f, (env_state, key), (), length=unroll_length)
return (final_state, qp_list) |
class AdamP(Optimizer):
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, delta=0.1, wd_ratio=0.1, nesterov=False):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, delta=delta, wd_ratio=wd_ratio, nesterov=nesterov)
super(AdamP, self).__init__(params, defaults)
_grad()
def step(self, closure=None):
loss = None
if (closure is not None):
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad
(beta1, beta2) = group['betas']
nesterov = group['nesterov']
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
state['step'] += 1
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
exp_avg.mul_(beta1).add_(grad, alpha=(1 - beta1))
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2))
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = (group['lr'] / bias_correction1)
if nesterov:
perturb = (((beta1 * exp_avg) + ((1 - beta1) * grad)) / denom)
else:
perturb = (exp_avg / denom)
wd_ratio = 1.0
if (len(p.shape) > 1):
(perturb, wd_ratio) = projection(p, grad, perturb, group['delta'], group['wd_ratio'], group['eps'])
if (group['weight_decay'] > 0):
p.mul_((1.0 - ((group['lr'] * group['weight_decay']) * wd_ratio)))
p.add_(perturb, alpha=(- step_size))
return loss |
class DocumentIterator(torchtext.data.Iterator):
def __init__(self, dataset, batch_size, device=None, batch_size_fn=None, train=True, shuffle=None, sort_within_batch=None):
super(DocumentIterator, self).__init__(dataset, batch_size, device=device, batch_size_fn=batch_size_fn, train=train, repeat=False, shuffle=False, sort=False, sort_within_batch=sort_within_batch)
(self.doc_index, self.doc_range) = self.get_context_index(self.data())
self.indx = None
def document_shuffler(self):
shuffler_index = self.random_shuffler(range(len(self.doc_range)))
(docs, indx) = ([], [])
for i in shuffler_index:
docs.extend(self.dataset[self.doc_range[i][0]:self.doc_range[i][1]])
indx.extend(self.doc_index[self.doc_range[i][0]:self.doc_range[i][1]])
assert (len(docs) == len(self.doc_index)), 'Error in document indexes'
assert (len(indx) == len(self.dataset)), 'Error in document indexes'
return (docs, np.array(indx))
def create_batches(self):
if self.train:
(data, indx) = self.document_shuffler()
self.batches = torchtext.data.batch(data, self.batch_size, self.batch_size_fn)
self.indx = indx
else:
self.batches = self.batch_eval()
self.indx = np.array(self.doc_index)
def get_context_index(self, batch):
(d_index, d_range, prev_i, i) = (([False] * len(batch)), [], 0, 0)
for (i, m) in enumerate(batch):
if (m.indices in self.dataset.doc_index):
d_index[i] = True
if (prev_i != i):
d_range.append((prev_i, i))
prev_i = i
if (prev_i != (i + 1)):
d_range.append((prev_i, (i + 1)))
return (d_index, d_range)
def __iter__(self):
while True:
self.init_epoch()
count = 0
for (idx, minibatch) in enumerate(self.batches):
if (self._iterations_this_epoch > idx):
continue
self.iterations += 1
self._iterations_this_epoch += 1
indx = np.where(self.indx[count:(count + len(minibatch))])[0].tolist()
count += len(minibatch)
(yield (torchtext.data.Batch(minibatch, self.dataset, self.device, self.train), indx))
if (not self.repeat):
raise StopIteration
def batch_eval(self):
for r in self.doc_range:
if ((r[1] - r[0]) > self.batch_size):
for i in range(int(((r[1] - r[0]) / self.batch_size))):
(yield self.dataset[(r[0] + (i * self.batch_size)):((r[0] + (i * self.batch_size)) + self.batch_size)])
if (((r[0] + (i * self.batch_size)) + self.batch_size) < r[1]):
(yield self.dataset[((r[0] + (i * self.batch_size)) + self.batch_size):r[1]])
else:
(yield self.dataset[r[0]:r[1]]) |
class BenchmarkVINFModel(VINFModel):
def __init__(self, loader, criterion, optimizer, epochs, base, subspace, flow, prior_log_sigma=3.0, lr=0.1, temperature=1.0, num_samples=45000, *args, **kwargs):
super(BenchmarkVINFModel, self).__init__(base, subspace, flow, prior_log_sigma=prior_log_sigma)
self.loader = loader
self.criterion = criterion
self.optimizer = torch.optim.Adam([param for param in self.parameters()], lr=lr)
self.elbo = ELBO_NF(self.criterion, num_samples, temperature)
def fit(self, *args, **kwargs):
for epoch in range(self.epochs):
train_res = train_epoch(self.loader, self, self.elbo, self.optimizer)
values = [('%d/%d' % ((epoch + 1), self.epochs)), train_res['accuracy'], train_res['loss'], train_res['stats']['kl'], train_res['stats']['nll']]
print(values) |
def get_max_min_notes(piano_roll_dict):
max_note = 0
min_note = .0
for dataset in piano_roll_dict:
unrolled = unroll(piano_roll_dict[dataset])
max_note = max(np.max(unrolled), max_note)
min_note = min(np.min(unrolled), min_note)
return (max_note, min_note) |
class SoftError(torchmetrics.Metric):
full_state_update = False
def __init__(self):
super().__init__()
self.add_state('correct', default=torch.tensor(0.0), dist_reduce_fx='sum')
self.add_state('total', default=torch.tensor(0.0), dist_reduce_fx='sum')
def update(self, preds: torch.Tensor, target: torch.Tensor):
soft = preds[(range(preds.shape[0]), target)]
self.correct += (target.numel() - torch.sum(soft))
self.total += target.numel()
def compute(self):
return (self.correct / self.total) |
def conv2d_biprec(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, num_bits_grad=None):
out1 = F.conv2d(input.detach(), weight, bias, stride, padding, dilation, groups)
out2 = F.conv2d(input, weight.detach(), (bias.detach() if (bias is not None) else None), stride, padding, dilation, groups)
out2 = quantize_grad(out2, num_bits=num_bits_grad, flatten_dims=(1, (- 1)))
return ((out1 + out2) - out1.detach()) |
def test_mit_init():
path = 'PATH_THAT_DO_NOT_EXIST'
model = MixVisionTransformer(pretrained=None, init_cfg=None)
assert (model.init_cfg is None)
model.init_weights()
model = MixVisionTransformer(pretrained=None, init_cfg=dict(type='Pretrained', checkpoint=path))
assert (model.init_cfg == dict(type='Pretrained', checkpoint=path))
with pytest.raises(OSError):
model.init_weights()
model = MixVisionTransformer(pretrained=None, init_cfg=123)
with pytest.raises(TypeError):
model.init_weights()
model = MixVisionTransformer(pretrained=path, init_cfg=None)
assert (model.init_cfg == dict(type='Pretrained', checkpoint=path))
with pytest.raises(OSError):
model.init_weights()
with pytest.raises(AssertionError):
MixVisionTransformer(pretrained=path, init_cfg=dict(type='Pretrained', checkpoint=path))
with pytest.raises(AssertionError):
MixVisionTransformer(pretrained=path, init_cfg=123)
with pytest.raises(TypeError):
MixVisionTransformer(pretrained=123, init_cfg=None)
with pytest.raises(AssertionError):
MixVisionTransformer(pretrained=123, init_cfg=dict(type='Pretrained', checkpoint=path))
with pytest.raises(AssertionError):
MixVisionTransformer(pretrained=123, init_cfg=123) |
class TabNet(BaseTabularModelWithoutAttention):
def __init__(self, column_idx: Dict[(str, int)], cat_embed_input: Optional[List[Tuple[(str, int, int)]]]=None, cat_embed_dropout: float=0.1, use_cat_bias: bool=False, cat_embed_activation: Optional[str]=None, continuous_cols: Optional[List[str]]=None, cont_norm_layer: str=None, embed_continuous: bool=False, cont_embed_dim: int=32, cont_embed_dropout: float=0.1, use_cont_bias: bool=True, cont_embed_activation: Optional[str]=None, n_steps: int=3, step_dim: int=8, attn_dim: int=8, dropout: float=0.0, n_glu_step_dependent: int=2, n_glu_shared: int=2, ghost_bn: bool=True, virtual_batch_size: int=128, momentum: float=0.02, gamma: float=1.3, epsilon: float=1e-15, mask_type: str='sparsemax'):
super(TabNet, self).__init__(column_idx=column_idx, cat_embed_input=cat_embed_input, cat_embed_dropout=cat_embed_dropout, use_cat_bias=use_cat_bias, cat_embed_activation=cat_embed_activation, continuous_cols=continuous_cols, cont_norm_layer=cont_norm_layer, embed_continuous=embed_continuous, cont_embed_dim=cont_embed_dim, cont_embed_dropout=cont_embed_dropout, use_cont_bias=use_cont_bias, cont_embed_activation=cont_embed_activation)
self.n_steps = n_steps
self.step_dim = step_dim
self.attn_dim = attn_dim
self.dropout = dropout
self.n_glu_step_dependent = n_glu_step_dependent
self.n_glu_shared = n_glu_shared
self.ghost_bn = ghost_bn
self.virtual_batch_size = virtual_batch_size
self.momentum = momentum
self.gamma = gamma
self.epsilon = epsilon
self.mask_type = mask_type
self.embed_out_dim = self.cat_and_cont_embed.output_dim
self.encoder = TabNetEncoder(self.embed_out_dim, n_steps, step_dim, attn_dim, dropout, n_glu_step_dependent, n_glu_shared, ghost_bn, virtual_batch_size, momentum, gamma, epsilon, mask_type)
def forward(self, X: Tensor, prior: Optional[Tensor]=None) -> Tuple[(Tensor, Tensor)]:
x = self._get_embeddings(X)
(steps_output, M_loss) = self.encoder(x, prior)
res = torch.sum(torch.stack(steps_output, dim=0), dim=0)
return (res, M_loss)
def forward_masks(self, X: Tensor) -> Tuple[(Tensor, Dict[(int, Tensor)])]:
x = self._get_embeddings(X)
return self.encoder.forward_masks(x)
def output_dim(self) -> int:
return self.step_dim |
class DummyIntegerProblem(IntegerProblem):
def __init__(self):
super(DummyIntegerProblem, self).__init__()
def number_of_objectives(self) -> int:
return 2
def number_of_constraints(self) -> int:
return 0
def evaluate(self, solution: IntegerSolution) -> IntegerSolution:
return solution
def name(self) -> str:
return 'Dummy integer problem' |
def count_parameters(model, trainable_only=True, is_dict=False):
if is_dict:
return sum((np.prod(list(model[k].size())) for k in model))
if trainable_only:
return sum((p.numel() for p in model.parameters() if p.requires_grad))
else:
return sum((p.numel() for p in model.parameters())) |
def remove_duplicate_anns_by_id(annotation_list):
if (annotation_list is None):
return
ann_by_id = dict()
for ann in annotation_list:
if (ann['id'] in ann_by_id):
ann['delete'] = True
else:
ann_by_id[ann['id']] = ann
annotation_list = [ann for ann in annotation_list if (ann.get('delete', False) != True)]
return annotation_list |
class UnpackLayerConv3d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, r=2, d=8):
super().__init__()
self.conv = Conv2D(in_channels, ((out_channels * (r ** 2)) // d), kernel_size, 1)
self.unpack = nn.PixelShuffle(r)
self.conv3d = nn.Conv3d(1, d, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1))
def forward(self, x):
x = self.conv(x)
x = x.unsqueeze(1)
x = self.conv3d(x)
(b, c, d, h, w) = x.shape
x = x.view(b, (c * d), h, w)
x = self.unpack(x)
return x |
def download_pretrained_models(method, file_ids):
save_path_root = f'./experiments/pretrained_models/{method}'
os.makedirs(save_path_root, exist_ok=True)
for (file_name, file_id) in file_ids.items():
save_path = osp.abspath(osp.join(save_path_root, file_name))
if osp.exists(save_path):
user_response = input(f'''{file_name} already exist. Do you want to cover it? Y/N
''')
if (user_response.lower() == 'y'):
print(f'Covering {file_name} to {save_path}')
download_file_from_google_drive(file_id, save_path)
elif (user_response.lower() == 'n'):
print(f'Skipping {file_name}')
else:
raise ValueError('Wrong input. Only accpets Y/N.')
else:
print(f'Downloading {file_name} to {save_path}')
download_file_from_google_drive(file_id, save_path) |
class Encoding(nn.Module):
def __init__(self, channels, num_codes):
super(Encoding, self).__init__()
(self.channels, self.num_codes) = (channels, num_codes)
std = (1.0 / ((num_codes * channels) ** 0.5))
self.codewords = nn.Parameter(torch.empty(num_codes, channels, dtype=torch.float).uniform_((- std), std), requires_grad=True)
self.scale = nn.Parameter(torch.empty(num_codes, dtype=torch.float).uniform_((- 1), 0), requires_grad=True)
def scaled_l2(x, codewords, scale):
(num_codes, channels) = codewords.size()
batch_size = x.size(0)
reshaped_scale = scale.view((1, 1, num_codes))
expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1), num_codes, channels))
reshaped_codewords = codewords.view((1, 1, num_codes, channels))
scaled_l2_norm = (reshaped_scale * (expanded_x - reshaped_codewords).pow(2).sum(dim=3))
return scaled_l2_norm
def aggregate(assignment_weights, x, codewords):
(num_codes, channels) = codewords.size()
reshaped_codewords = codewords.view((1, 1, num_codes, channels))
batch_size = x.size(0)
expanded_x = x.unsqueeze(2).expand((batch_size, x.size(1), num_codes, channels))
encoded_feat = (assignment_weights.unsqueeze(3) * (expanded_x - reshaped_codewords)).sum(dim=1)
return encoded_feat
def forward(self, x):
assert ((x.dim() == 4) and (x.size(1) == self.channels))
batch_size = x.size(0)
x = x.view(batch_size, self.channels, (- 1)).transpose(1, 2).contiguous()
assignment_weights = F.softmax(self.scaled_l2(x, self.codewords, self.scale), dim=2)
encoded_feat = self.aggregate(assignment_weights, x, self.codewords)
return encoded_feat
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(Nx{self.channels}xHxW =>Nx{self.num_codes}x{self.channels})'
return repr_str |
def invert(mapper):
inverted_map = defaultdict(list)
for (key, val) in mapper.items():
inverted_map[val].append(key)
return inverted_map |
def test_active_set_finance_without_subprocess_intercept(dataset_finance):
warnings.filterwarnings('ignore', category=ConvergenceWarning)
(X, y) = (dataset_finance[0], dataset_finance[1])
n_samples = X.shape[0]
primary = Regression(loss='square', penalty='l1', fit_intercept=True, lambda_1=(0.1 / n_samples), max_iter=10, verbose=False)
secondary = Regression(loss='square', penalty='l1', fit_intercept=True, lambda_1=(0.1 / n_samples), max_iter=10, verbose=False)
type(primary).__name__ = 'Lasso'
type(secondary).__name__ = 'Lasso'
fit_large_feature_number(primary, secondary, X, y) |
def create_random_map(height, width, corridor_radius, iterations, obstacle_number, obstacle_extra_radius, map_type: str, indoor_prob: float, seed: int):
np.random.seed(seed)
if (map_type == 'mixed'):
if (np.random.random() <= indoor_prob):
map_type = 'indoor'
else:
map_type = 'outdoor'
if (map_type == 'indoor'):
map = create_indoor_map(height, width, corridor_radius, iterations)
return map
else:
map = create_outdoor_map(height, width, obstacle_number, obstacle_extra_radius)
return map |
def add_sub_symbol(bert_tokens: List[str], chinese_word_set: set()):
if (not chinese_word_set):
return bert_tokens
max_word_len = max([len(w) for w in chinese_word_set])
bert_word = bert_tokens
(start, end) = (0, len(bert_word))
while (start < end):
single_word = True
if is_chinese(bert_word[start]):
l = min((end - start), max_word_len)
for i in range(l, 1, (- 1)):
whole_word = ''.join(bert_word[start:(start + i)])
if (whole_word in chinese_word_set):
for j in range((start + 1), (start + i)):
bert_word[j] = ('##' + bert_word[j])
start = (start + i)
single_word = False
break
if single_word:
start += 1
return bert_word |
def get_object_ids(data_root, ann_file, object_names):
coco = COCO(os.path.join(data_root, 'annotations', ann_file))
object_ids_map = {cat['name']: cat['id'] for cat in coco.dataset['categories']}
return [object_ids_map[object_name] for object_name in object_names] |
class HansProcessor(DataProcessor):
def get_example_from_tensor_dict(self, tensor_dict):
return InputExample(tensor_dict['idx'].numpy(), tensor_dict['premise'].numpy().decode('utf-8'), tensor_dict['hypothesis'].numpy().decode('utf-8'), str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'heuristics_train_set.txt')), 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'heuristics_evaluation_set.txt')), 'dev')
def get_labels(self):
return ['contradiction', 'entailment', 'neutral']
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if (i == 0):
continue
guid = ('%s-%s' % (set_type, line[0]))
text_a = line[5]
text_b = line[6]
pairID = (line[7][2:] if line[7].startswith('ex') else line[7])
label = line[(- 1)]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, pairID=pairID))
return examples |
def evaluate(pred_root, gt_root, trimap_root, verbose, nproc):
images = sorted(mmcv.scandir(pred_root))
gt_files_num = len(list(mmcv.scandir(gt_root)))
if (gt_files_num == 50):
pattern = re.compile('(.+)_(?:\\d+)(.png)')
pairs = []
for img in images:
pred_alpha_path = osp.join(pred_root, img)
if (gt_files_num == 50):
groups = pattern.match(img).groups()
alpha_path = osp.join(gt_root, ''.join(groups))
else:
alpha_path = osp.join(gt_root, img)
trimap_path = (osp.join(trimap_root, img) if (trimap_root is not None) else None)
pairs.append((pred_alpha_path, alpha_path, trimap_path))
results = mmcv.track_parallel_progress(evaluate_one, pairs, nproc)
if verbose:
for (i, img) in enumerate(images):
(sad_result, mse_result, grad_result, conn_result) = results[i]
print(f'{img} SAD: {sad_result:.6g} MSE: {mse_result:.6g} GRAD: {grad_result:.6g} CONN: {conn_result:.6g}')
(sad_mean, mse_mean, grad_mean, conn_mean) = np.mean(results, axis=0)
print(f'MEAN: SAD: {sad_mean:.6g} MSE: {mse_mean:.6g} GRAD: {grad_mean:.6g} CONN: {conn_mean:.6g}') |
def item_from_space(space: Space) -> Item:
return Item(x_len=(space.x2 - space.x1), y_len=(space.y2 - space.y1), z_len=(space.z2 - space.z1)) |
class DepthWiseConvOp(nn.Module):
def __init__(self, C_in, C_out, kernel_size, act_op, affine=True):
super(DepthWiseConvOp, self).__init__()
padding = PADDING_OPS[kernel_size]
kernel_size = KERNEL_SIZE_OPS[kernel_size]
activation = ACTIVATION_OPS[act_op]
if (not activation):
self.op = nn.Sequential(nn.Conv2d(C_in, C_out, kernel_size=kernel_size, padding=padding, groups=C_out, bias=False))
else:
self.op = nn.Sequential(nn.Conv2d(C_in, C_out, kernel_size=kernel_size, padding=padding, groups=C_out, bias=False), activation)
def forward(self, x):
return self.op(x) |
def compute_used_samples(update_state_gate):
batch_size = update_state_gate.shape[0]
steps = 0.0
for idx in range(batch_size):
for idt in range(update_state_gate.shape[1]):
steps += update_state_gate[(idx, idt)]
return (steps / batch_size) |
def partial_repr(t):
args = (((t.func,) + t.args) + tuple([f'{k}={v}' for (k, v) in t.keywords.items()]))
reprs = ', '.join([link_type(o) for o in args])
return f'<code>partial(</code>{reprs}<code>)</code>' |
def test_empty_book():
book = OrderBook(FakeExchangeAgent(), SYMBOL)
assert (book.get_l1_bid_data() == None)
assert (book.get_l1_ask_data() == None)
assert (book.get_l2_bid_data() == [])
assert (book.get_l2_ask_data() == [])
assert (book.get_l3_bid_data() == [])
assert (book.get_l3_ask_data() == [])
assert (book.get_transacted_volume() == (0, 0)) |
class VGGBackbone(nn.Module):
def __init__(self, cfg, extra_args=[], norm_layers=[]):
super().__init__()
self.channels = []
self.layers = nn.ModuleList()
self.in_channels = 3
self.extra_args = list(reversed(extra_args))
self.total_layer_count = 0
self.state_dict_lookup = {}
for (idx, layer_cfg) in enumerate(cfg):
self._make_layer(layer_cfg)
self.norms = nn.ModuleList([nn.BatchNorm2d(self.channels[l]) for l in norm_layers])
self.norm_lookup = {l: idx for (idx, l) in enumerate(norm_layers)}
self.backbone_modules = [m for m in self.modules() if isinstance(m, nn.Conv2d)]
def _make_layer(self, cfg):
layers = []
for v in cfg:
args = None
if isinstance(v, tuple):
args = v[1]
v = v[0]
if (v == 'M'):
if (args is None):
args = {'kernel_size': 2, 'stride': 2}
layers.append(nn.MaxPool2d(**args))
else:
cur_layer_idx = (self.total_layer_count + len(layers))
self.state_dict_lookup[cur_layer_idx] = ('%d.%d' % (len(self.layers), len(layers)))
if (args is None):
args = {'kernel_size': 3, 'padding': 1}
layers.append(nn.Conv2d(self.in_channels, v, **args))
layers.append(nn.ReLU(inplace=True))
self.in_channels = v
self.total_layer_count += len(layers)
self.channels.append(self.in_channels)
self.layers.append(nn.Sequential(*layers))
def forward(self, x):
outs = []
for (idx, layer) in enumerate(self.layers):
x = layer(x)
if (idx in self.norm_lookup):
x = self.norms[self.norm_lookup[idx]](x)
outs.append(x)
return tuple(outs)
def transform_key(self, k):
vals = k.split('.')
layerIdx = self.state_dict_lookup[int(vals[0])]
return ('layers.%s.%s' % (layerIdx, vals[1]))
def init_backbone(self, path):
state_dict = torch.load(path)
state_dict = OrderedDict([(self.transform_key(k), v) for (k, v) in state_dict.items()])
self.load_state_dict(state_dict, strict=False)
def add_layer(self, conv_channels=128, downsample=2):
if (len(self.extra_args) > 0):
(conv_channels, downsample) = self.extra_args.pop()
padding = (1 if (downsample > 1) else 0)
layer = nn.Sequential(nn.Conv2d(self.in_channels, conv_channels, kernel_size=1), nn.ReLU(inplace=True), nn.Conv2d(conv_channels, (conv_channels * 2), kernel_size=3, stride=downsample, padding=padding), nn.ReLU(inplace=True))
self.in_channels = (conv_channels * 2)
self.channels.append(self.in_channels)
self.layers.append(layer) |
class _LazyModule(ModuleType):
def __init__(self, name, module_file, import_structure, module_spec=None, extra_objects=None):
super().__init__(name)
self._modules = set(import_structure.keys())
self._class_to_module = {}
for (key, values) in import_structure.items():
for value in values:
self._class_to_module[value] = key
self.__all__ = (list(import_structure.keys()) + list(chain(*import_structure.values())))
self.__file__ = module_file
self.__spec__ = module_spec
self.__path__ = [os.path.dirname(module_file)]
self._objects = ({} if (extra_objects is None) else extra_objects)
self._name = name
self._import_structure = import_structure
def __dir__(self):
result = super().__dir__()
for attr in self.__all__:
if (attr not in result):
result.append(attr)
return result
def __getattr__(self, name: str) -> Any:
if (name in self._objects):
return self._objects[name]
if (name in self._modules):
value = self._get_module(name)
elif (name in self._class_to_module.keys()):
module = self._get_module(self._class_to_module[name])
value = getattr(module, name)
else:
raise AttributeError(f'module {self.__name__} has no attribute {name}')
setattr(self, name, value)
return value
def _get_module(self, module_name: str):
try:
return importlib.import_module(('.' + module_name), self.__name__)
except Exception as e:
raise RuntimeError(f'''Failed to import {self.__name__}.{module_name} because of the following error (look up to see its traceback):
{e}''') from e
def __reduce__(self):
return (self.__class__, (self._name, self.__file__, self._import_structure)) |
def text_to_html_table(items):
html_code = '<table border="1" class="dataframe">\n'
html_code += ' <thead>\n <tr style="text-align: left;">\n'
for i in items[0]:
html_code += f''' <th>{i}</th>
'''
html_code += ' </tr>\n </thead>\n <tbody>\n'
for line in items[1:]:
html_code += ' <tr>\n'
for elt in line:
elt = (f'{elt:.6f}' if isinstance(elt, float) else str(elt))
html_code += f''' <td>{elt}</td>
'''
html_code += ' </tr>\n'
html_code += ' </tbody>\n</table><p>'
return html_code |
def decode_rerank_scores(args):
if ((args.max_tokens is None) and (args.batch_size is None)):
args.batch_size = 1
logger.info(args)
use_cuda = (torch.cuda.is_available() and (not args.cpu))
logger.info('loading model(s) from {}'.format(args.path))
(models, _model_args, task) = checkpoint_utils.load_model_ensemble_and_task([args.path], arg_overrides=eval(args.model_overrides))
for model in models:
if args.fp16:
model.half()
if use_cuda:
model.cuda()
generator = task.build_generator(args)
tokenizer = task.build_tokenizer(args)
bpe = task.build_bpe(args)
def encode_fn(x):
if (tokenizer is not None):
x = tokenizer.encode(x)
if (bpe is not None):
x = bpe.encode(x)
return x
max_positions = utils.resolve_max_positions(task.max_positions(), *[model.max_positions() for model in models])
(src, hyp, mt_scores) = parse_fairseq_gen(args.in_text, task)
model_scores = {}
logger.info('decode reranker score')
for batch in make_batches(args, src, hyp, task, max_positions, encode_fn):
src_tokens = batch.src_tokens
src_lengths = batch.src_lengths
if use_cuda:
src_tokens = src_tokens.cuda()
src_lengths = src_lengths.cuda()
sample = {'net_input': {'src_tokens': src_tokens, 'src_lengths': src_lengths}}
scores = task.inference_step(generator, models, sample)
for (id, sc) in zip(batch.ids.tolist(), scores.tolist()):
model_scores[id] = sc[0]
model_scores = [model_scores[i] for i in range(len(model_scores))]
return (src, hyp, mt_scores, model_scores) |
class FP16Optimizer(_FP16OptimizerMixin, optim.FairseqOptimizer):
def __init__(self, cfg: DictConfig, params, fp32_optimizer, fp32_params, **kwargs):
super().__init__(cfg.optimizer)
self.fp16_params = params
self.fp32_optimizer = fp32_optimizer
self.fp32_params = fp32_params
if (getattr(cfg.common, 'fp16_scale_window', None) is None):
if (len(cfg.optimization.update_freq) > 1):
raise ValueError('--fp16-scale-window must be given explicitly when using a custom --update-freq schedule')
data_parallel_size = int((cfg.distributed_training.distributed_world_size / cfg.common.model_parallel_size))
scale_window = int((((2 ** 14) / data_parallel_size) / cfg.optimization.update_freq[0]))
else:
scale_window = cfg.common.fp16_scale_window
if (not getattr(cfg.common, 'bf16', False)):
self.scaler = DynamicLossScaler(init_scale=cfg.common.fp16_init_scale, scale_window=scale_window, tolerance=cfg.common.fp16_scale_tolerance, threshold=cfg.common.threshold_loss_scale, min_loss_scale=cfg.common.min_loss_scale)
else:
self.scaler = None
def build_optimizer(cls, cfg: DictConfig, params, **kwargs):
flatten = (not getattr(cfg.common, 'fp16_no_flatten_grads', False))
if getattr(cfg.common, 'bf16', False):
flatten = False
fp32_params = cls.build_fp32_params(cfg.optimizer, params, flatten=flatten)
if flatten:
fp32_optimizer = optim.build_optimizer(cfg.optimizer, [fp32_params])
else:
fp32_optimizer = optim.build_optimizer(cfg.optimizer, fp32_params)
if (flatten and (not fp32_optimizer.supports_flat_params)):
raise RuntimeError(f'chosen optimizer {fp32_optimizer.__class__.__name__} does not support flat params, please set --fp16-no-flatten-grads')
return cls(cfg, params, fp32_optimizer, fp32_params, **kwargs)
def optimizer(self):
return self.fp32_optimizer.optimizer
def optimizer(self, optimizer):
self.fp32_optimizer.optimizer = optimizer
def lr_scheduler(self):
return getattr(self.fp32_optimizer, 'lr_scheduler', None)
def optimizer_config(self):
return self.fp32_optimizer.optimizer_config
def get_lr(self):
return self.fp32_optimizer.get_lr()
def set_lr(self, lr):
self.fp32_optimizer.set_lr(lr)
def all_reduce_grads(self, module):
self.fp32_optimizer.all_reduce_grads(module) |
class NATSpeechToSpeechDataset(FairseqDataset):
def __init__(self, split: str, is_train_split: bool, cfg: NATS2SDataConfig, src_audio_paths: List[str], src_n_frames: List[int], tgt_audio_paths: Optional[List[str]]=None, tgt_n_frames: Optional[List[int]]=None, tgt_texts: Optional[List[str]]=None, ids: Optional[List[str]]=None, tgt_dict: Optional[Dictionary]=None, pre_tokenizer=None, bpe_tokenizer=None, n_frames_per_step: int=1, durations: Optional[List[List[int]]]=None, pitches: Optional[List[str]]=None, energies: Optional[List[str]]=None):
(self.split, self.is_train_split) = (split, is_train_split)
self.cfg = cfg
(self.src_audio_paths, self.src_n_frames) = (src_audio_paths, src_n_frames)
(self.tgt_audio_paths, self.tgt_n_frames) = (tgt_audio_paths, tgt_n_frames)
self.tgt_texts = tgt_texts
self.ids = ids
self.n_samples = len(src_n_frames)
self.shuffle = (cfg.shuffle if is_train_split else False)
self.source_feature_transforms = CompositeAudioFeatureTransform.from_config_dict(self.cfg.get_source_feature_transforms(split, is_train_split))
self.source_waveform_transforms = CompositeAudioWaveformTransform.from_config_dict(self.cfg.get_source_waveform_transforms(split, is_train_split))
self.target_feature_transforms = CompositeAudioFeatureTransform.from_config_dict(self.cfg.get_target_feature_transforms(split, is_train_split))
self.target_waveform_transforms = CompositeAudioWaveformTransform.from_config_dict(self.cfg.get_target_waveform_transforms(split, is_train_split))
assert (not self.cfg.use_audio_input)
self.tgt_dict = tgt_dict
self.pre_tokenizer = pre_tokenizer
self.bpe_tokenizer = bpe_tokenizer
self.tgt_lens = self.get_tgt_lens_and_check_oov()
self.n_frames_per_step = n_frames_per_step
self.durations = durations
self.pitches = pitches
self.energies = energies
logger.info(self.__repr__())
def get_tgt_lens_and_check_oov(self):
if (self.tgt_texts is None):
return [0 for _ in range(self.n_samples)]
tgt_lens = []
(n_tokens, n_oov_tokens) = (0, 0)
for i in range(self.n_samples):
tokenized = self.get_tokenized_tgt_text(i).split(' ')
oov_tokens = [t for t in tokenized if (self.tgt_dict.index(t) == self.tgt_dict.unk_index)]
n_tokens += len(tokenized)
n_oov_tokens += len(oov_tokens)
tgt_lens.append(len(tokenized))
logger.info(f"'{self.split}' has {((n_oov_tokens / n_tokens) * 100):.2f}% OOV")
return tgt_lens
def __repr__(self):
return (self.__class__.__name__ + f'(split="{self.split}", n_samples={self.n_samples:_}, prepend_tgt_lang_tag={self.cfg.prepend_tgt_lang_tag}, n_frames_per_step={self.n_frames_per_step}, shuffle={self.shuffle}, source_feature_transforms={self.source_feature_transforms}, source_waveform_transforms={self.source_waveform_transforms}, target_feature_transforms={self.target_feature_transforms}, target_waveform_transforms={self.target_waveform_transforms}, ')
def tokenize(cls, tokenizer, text: str):
return (text if (tokenizer is None) else tokenizer.encode(text))
def get_tokenized_tgt_text(self, index: Union[(int, List[int])]):
if _is_int_or_np_int(index):
text = self.tgt_texts[index]
else:
text = ' '.join([self.tgt_texts[i] for i in index])
text = self.tokenize(self.pre_tokenizer, text)
text = self.tokenize(self.bpe_tokenizer, text)
return text
def pack_frames(self, feature: torch.Tensor):
if (self.n_frames_per_step == 1):
return feature
n_packed_frames = (feature.shape[0] // self.n_frames_per_step)
feature = feature[:(self.n_frames_per_step * n_packed_frames)]
return feature.reshape(n_packed_frames, (- 1))
def _get_source_audio(self, index: int) -> torch.Tensor:
source = get_features_or_waveform(self.src_audio_paths[index], waveform_transforms=self.source_waveform_transforms)
if (self.source_feature_transforms is not None):
source = self.source_feature_transforms(source)
source = torch.from_numpy(source).float()
return source
def _get_target_audio(self, index: int) -> torch.Tensor:
target = get_features_or_waveform(self.tgt_audio_paths[index], waveform_transforms=self.target_waveform_transforms)
if (self.target_feature_transforms is not None):
target = self.target_feature_transforms(target)
target = torch.from_numpy(target).float()
return target
def __getitem__(self, index: int) -> NATSpeechToSpeechDatasetItem:
source = self._get_source_audio(index)
target_text = None
if (self.tgt_texts is not None):
tokenized = self.get_tokenized_tgt_text(index)
target_text = self.tgt_dict.encode_line(tokenized, add_if_not_exist=False, append_eos=True).long()
bos = torch.LongTensor([self.tgt_dict.bos()])
target_text = torch.cat((bos, target_text), 0)
target_audio = None
if (self.tgt_audio_paths is not None):
target_audio = self._get_target_audio(index)
target_audio = self.pack_frames(target_audio)
(duration, pitch, energy) = (None, None, None)
if (self.durations is not None):
duration = torch.tensor((self.durations[index] + [0]), dtype=torch.long)
if (self.pitches is not None):
pitch = get_features_or_waveform(self.pitches[index])
pitch = torch.from_numpy(np.concatenate((pitch, [0]))).float()
if (self.energies is not None):
energy = get_features_or_waveform(self.energies[index])
energy = torch.from_numpy(np.concatenate((energy, [0]))).float()
return NATSpeechToSpeechDatasetItem(index=index, source=source, target_text=target_text, target_audio=target_audio, duration=duration, pitch=pitch, energy=energy)
def collater(self, samples: List[NATSpeechToSpeechDatasetItem], return_order: bool=False) -> Dict:
if (len(samples) == 0):
return {}
indices = torch.tensor([x.index for x in samples], dtype=torch.long)
sources = [x.source for x in samples]
frames = _collate_frames(sources, self.cfg.use_audio_input)
n_frames = torch.tensor([x.size(0) for x in sources], dtype=torch.long)
(n_frames, order) = n_frames.sort(descending=True)
indices = indices.index_select(0, order)
frames = frames.index_select(0, order)
(target_text, target_text_lengths, ntokens_text) = (None, None, None)
if (self.tgt_texts is not None):
target_text = fairseq_data_utils.collate_tokens([x.target_text for x in samples], self.tgt_dict.pad(), self.tgt_dict.eos(), left_pad=False, move_eos_to_beginning=False).index_select(0, order)
target_text_lengths = torch.tensor([x.target_text.size(0) for x in samples], dtype=torch.long).index_select(0, order)
ntokens_text = sum((x.target_text.size(0) for x in samples))
(target_audio, target_audio_lengths, ntokens_audio) = (None, None, None)
if (self.tgt_audio_paths is not None):
target_audio = _collate_frames([x.target_audio for x in samples], is_audio_input=False).index_select(0, order)
target_audio_lengths = torch.tensor([x.target_audio.size(0) for x in samples], dtype=torch.long).index_select(0, order)
ntokens_audio = sum((x.target_audio.size(0) for x in samples))
(durations, pitches, energies) = (None, None, None)
if (self.durations is not None):
durations = fairseq_data_utils.collate_tokens([x.duration for x in samples], 0).index_select(0, order)
if (self.pitches is not None):
pitches = _collate_frames([x.pitch for x in samples], True)
pitches = pitches.index_select(0, order)
if (self.energies is not None):
energies = _collate_frames([x.energy for x in samples], True)
energies = energies.index_select(0, order)
net_input = {'src_tokens': frames, 'src_lengths': n_frames}
out = {'id': indices, 'net_input': net_input, 'target_text': target_text, 'target_text_lengths': target_text_lengths, 'target_audio': target_audio, 'target_audio_lengths': target_audio_lengths, 'durations': durations, 'pitches': pitches, 'energies': energies, 'ntokens_text': ntokens_text, 'ntokens_audio': ntokens_audio, 'nsentences': len(samples)}
if return_order:
out['order'] = order
return out
def __len__(self):
return self.n_samples
def num_tokens(self, index):
return self.src_n_frames[index]
def size(self, index):
return (self.src_n_frames[index], self.tgt_lens[index], self.tgt_n_frames[index])
def sizes(self):
return np.array(self.src_n_frames)
def can_reuse_epoch_itr_across_epochs(self):
return True
def ordered_indices(self):
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append([(- n) for n in self.src_n_frames])
return np.lexsort(order)
def prefetch(self, indices):
raise False |
def calculate_fid(dataset_name, generated_dir, target_size=256):
real_dir = os.path.join('EvalImages', ((dataset_name + '_real_images_') + str(target_size)))
fid = fid_score.calculate_fid_given_paths([real_dir, generated_dir], target_size, 'cuda', 2048)
torch.cuda.empty_cache()
return fid |
def xresnet152(pretrained=False, **kwargs):
model = XResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['xresnet152']))
return model |
class ConvTransformerEncoder(FairseqEncoder):
def __init__(self, args):
super().__init__(None)
self.dropout = args.dropout
self.embed_scale = (1.0 if args.no_scale_embedding else math.sqrt(args.encoder_embed_dim))
self.padding_idx = 1
self.in_channels = 1
self.input_dim = args.input_feat_per_channel
self.conv = torch.nn.Sequential(torch.nn.Conv2d(1, args.conv_out_channels, 3, stride=2, padding=(3 // 2)), torch.nn.ReLU(), torch.nn.Conv2d(args.conv_out_channels, args.conv_out_channels, 3, stride=2, padding=(3 // 2)), torch.nn.ReLU())
transformer_input_dim = self.infer_conv_output_dim(self.in_channels, self.input_dim, args.conv_out_channels)
self.out = torch.nn.Linear(transformer_input_dim, args.encoder_embed_dim)
self.embed_positions = PositionalEmbedding(args.max_source_positions, args.encoder_embed_dim, self.padding_idx, learned=False)
self.transformer_layers = nn.ModuleList([])
self.transformer_layers.extend([TransformerEncoderLayer(args) for i in range(args.encoder_layers)])
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(args.encoder_embed_dim)
else:
self.layer_norm = None
def pooling_ratio(self):
return 4
def infer_conv_output_dim(self, in_channels, input_dim, out_channels):
sample_seq_len = 200
sample_bsz = 10
x = torch.randn(sample_bsz, in_channels, sample_seq_len, input_dim)
x = torch.nn.Conv2d(1, out_channels, 3, stride=2, padding=(3 // 2))(x)
x = torch.nn.Conv2d(out_channels, out_channels, 3, stride=2, padding=(3 // 2))(x)
x = x.transpose(1, 2)
(mb, seq) = x.size()[:2]
return x.contiguous().view(mb, seq, (- 1)).size((- 1))
def forward(self, src_tokens, src_lengths):
(bsz, max_seq_len, _) = src_tokens.size()
x = src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim).transpose(1, 2).contiguous()
x = self.conv(x)
(bsz, _, output_seq_len, _) = x.size()
x = x.transpose(1, 2).transpose(0, 1).contiguous().view(output_seq_len, bsz, (- 1))
x = self.out(x)
x = (self.embed_scale * x)
subsampling_factor = int((((max_seq_len * 1.0) / output_seq_len) + 0.5))
input_len_0 = (src_lengths.float() / subsampling_factor).ceil().long()
input_len_1 = (x.size(0) * torch.ones([src_lengths.size(0)]).long().to(input_len_0.device))
input_lengths = torch.min(input_len_0, input_len_1)
encoder_padding_mask = lengths_to_padding_mask(input_lengths)
positions = self.embed_positions(encoder_padding_mask).transpose(0, 1)
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
for layer in self.transformer_layers:
x = layer(x, encoder_padding_mask)
maybe_encoder_padding_mask = encoder_padding_mask
return {'encoder_out': [x], 'encoder_padding_mask': ([maybe_encoder_padding_mask] if (maybe_encoder_padding_mask is not None) else []), 'encoder_embedding': [], 'encoder_states': [], 'src_tokens': [], 'src_lengths': []}
.export
def reorder_encoder_out(self, encoder_out: Dict[(str, List[Tensor])], new_order):
new_encoder_out = [encoder_out['encoder_out'][0].index_select(1, new_order)]
if (len(encoder_out['encoder_padding_mask']) == 0):
new_encoder_padding_mask = []
else:
new_encoder_padding_mask = [encoder_out['encoder_padding_mask'][0].index_select(0, new_order)]
if (len(encoder_out['encoder_embedding']) == 0):
new_encoder_embedding = []
else:
new_encoder_embedding = [encoder_out['encoder_embedding'][0].index_select(0, new_order)]
encoder_states = encoder_out['encoder_states']
if (len(encoder_states) > 0):
for (idx, state) in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {'encoder_out': new_encoder_out, 'encoder_padding_mask': new_encoder_padding_mask, 'encoder_embedding': new_encoder_embedding, 'encoder_states': encoder_states, 'src_tokens': [], 'src_lengths': []} |
def data_type_dict():
return {'float16': tf.float16, 'float32': tf.float32, 'float64': tf.float64, 'uint8': tf.uint8, 'int8': tf.int8, 'int16': tf.int16, 'int32': tf.int32, 'int64': tf.int64, 'bool': tf.bool} |
def ucf101_root():
with get_tmp_dir() as tmp_dir:
ucf_dir = os.path.join(tmp_dir, 'UCF-101')
video_dir = os.path.join(ucf_dir, 'video')
annotations = os.path.join(ucf_dir, 'annotations')
os.makedirs(ucf_dir)
os.makedirs(video_dir)
os.makedirs(annotations)
fold_files = []
for split in {'train', 'test'}:
for fold in range(1, 4):
fold_file = '{:s}list{:02d}.txt'.format(split, fold)
fold_files.append(os.path.join(annotations, fold_file))
file_handles = [open(x, 'w') for x in fold_files]
file_iter = cycle(file_handles)
for i in range(0, 2):
current_class = 'class_{0}'.format((i + 1))
class_dir = os.path.join(video_dir, current_class)
os.makedirs(class_dir)
for group in range(0, 3):
for clip in range(0, 4):
clip_name = 'v_{0}_g{1}_c{2}.avi'.format(current_class, group, clip)
clip_path = os.path.join(class_dir, clip_name)
length = random.randrange(10, 21)
this_clip = torch.randint(0, 256, ((length * 25), 320, 240, 3), dtype=torch.uint8)
write_video(clip_path, this_clip, 25)
ann_file = next(file_iter)
ann_file.write('{0}\n'.format(os.path.join(current_class, clip_name)))
for f in file_handles:
f.close()
(yield (video_dir, annotations)) |
class ParametricConcurrent(nn.Sequential):
def __init__(self, axis=1):
super(ParametricConcurrent, self).__init__()
self.axis = axis
def forward(self, x, **kwargs):
out = []
for module in self._modules.values():
out.append(module(x, **kwargs))
out = torch.cat(tuple(out), dim=self.axis)
return out |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.