code stringlengths 101 5.91M |
|---|
class META(nn.Module):
def __init__(self, ebd, args):
super(META, self).__init__()
self.args = args
self.ebd = ebd
self.aux = get_embedding(args)
self.ebd_dim = self.ebd.embedding_dim
input_dim = (((int(args.meta_idf) + self.aux.embedding_dim) + int(args.meta_w_target)) + int(args.meta_iwf))
if args.meta_ebd:
input_dim += self.ebd_dim
if (args.embedding == 'meta'):
self.rnn = RNN(input_dim, 25, 1, True, 0)
self.seq = nn.Sequential(nn.Dropout(self.args.dropout), nn.Linear(50, 1))
else:
self.seq = nn.Sequential(nn.Linear(input_dim, 50), nn.ReLU(), nn.Dropout(self.args.dropout), nn.Linear(50, 1))
def forward(self, data, return_score=False):
ebd = self.ebd(data)
scale = self.compute_score(data, ebd)
ebd = torch.sum((ebd * scale), dim=1)
if return_score:
return (ebd, scale)
return ebd
def _varlen_softmax(self, logit, text_len):
logit = torch.exp(logit)
mask = (torch.arange(logit.size()[(- 1)], device=logit.device, dtype=text_len.dtype).expand(*logit.size()) < text_len.unsqueeze((- 1)))
logit = (mask.float() * logit)
score = (logit / torch.sum(logit, dim=1, keepdim=True))
return score
def compute_score(self, data, ebd, return_stats=False):
x = self.aux(data)
if self.args.meta_idf:
idf = F.embedding(data['text'], data['idf']).detach()
x = torch.cat([x, idf], dim=(- 1))
if self.args.meta_iwf:
iwf = F.embedding(data['text'], data['iwf']).detach()
x = torch.cat([x, iwf], dim=(- 1))
if self.args.meta_ebd:
x = torch.cat([x, ebd], dim=(- 1))
if self.args.meta_w_target:
if self.args.meta_target_entropy:
w_target = (ebd data['w_target'])
w_target = (F.softmax(w_target, dim=2) * F.log_softmax(w_target, dim=2))
w_target = (- torch.sum(w_target, dim=2, keepdim=True))
w_target = (1.0 / w_target)
x = torch.cat([x, w_target.detach()], dim=(- 1))
else:
w_target = torch.abs((ebd data['w_target']))
w_target = w_target.max(dim=2, keepdim=True)[0]
x = torch.cat([x, w_target.detach()], dim=(- 1))
if (self.args.embedding == 'meta'):
hidden = self.rnn(x, data['text_len'])
else:
hidden = x
logit = self.seq(hidden).squeeze((- 1))
score = self._varlen_softmax(logit, data['text_len']).unsqueeze((- 1))
if return_stats:
return (score.squeeze(), idf.squeeze(), w_target.squeeze())
else:
return score |
_task('commonsense_qa')
class CommonsenseQATask(FairseqTask):
def add_args(parser):
parser.add_argument('data', metavar='DIR', help='path to data directory; we load <split>.jsonl')
parser.add_argument('--init-token', type=int, default=None, help='add token at the beginning of each batch item')
parser.add_argument('--num-classes', type=int, default=5)
def __init__(self, args, vocab):
super().__init__(args)
self.vocab = vocab
self.mask = vocab.add_symbol('<mask>')
self.bpe = encoders.build_bpe(args)
def load_dictionary(cls, filename):
dictionary = Dictionary.load(filename)
dictionary.add_symbol('<mask>')
return dictionary
def setup_task(cls, args, **kwargs):
assert (args.criterion == 'sentence_ranking'), 'Must set --criterion=sentence_ranking'
vocab = cls.load_dictionary(os.path.join(args.data, 'dict.txt'))
print('| dictionary: {} types'.format(len(vocab)))
return cls(args, vocab)
def load_dataset(self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs):
def binarize(s, append_bos=False):
if (self.bpe is not None):
s = self.bpe.encode(s)
tokens = self.vocab.encode_line(s, append_eos=True, add_if_not_exist=False).long()
if (append_bos and (self.args.init_token is not None)):
tokens = torch.cat([tokens.new([self.args.init_token]), tokens])
return tokens
if (data_path is None):
data_path = os.path.join(self.args.data, (split + '.jsonl'))
if (not os.path.exists(data_path)):
raise FileNotFoundError('Cannot find data: {}'.format(data_path))
src_tokens = [[] for i in range(self.args.num_classes)]
src_lengths = [[] for i in range(self.args.num_classes)]
labels = []
with open(data_path) as h:
for line in h:
example = json.loads(line.strip())
if ('answerKey' in example):
label = (ord(example['answerKey']) - ord('A'))
labels.append(label)
question = example['question']['stem']
assert (len(example['question']['choices']) == self.args.num_classes)
question = ('Q: ' + question)
question_toks = binarize(question, append_bos=True)
for (i, choice) in enumerate(example['question']['choices']):
src = ('A: ' + choice['text'])
src_bin = torch.cat([question_toks, binarize(src)])
src_tokens[i].append(src_bin)
src_lengths[i].append(len(src_bin))
assert all(((len(src_tokens[0]) == len(src_tokens[i])) for i in range(self.args.num_classes)))
assert (len(src_tokens[0]) == len(src_lengths[0]))
assert ((len(labels) == 0) or (len(labels) == len(src_tokens[0])))
for i in range(self.args.num_classes):
src_lengths[i] = np.array(src_lengths[i])
src_tokens[i] = ListDataset(src_tokens[i], src_lengths[i])
src_lengths[i] = ListDataset(src_lengths[i])
dataset = {'id': IdDataset(), 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(src_tokens[0], reduce=True)}
for i in range(self.args.num_classes):
dataset.update({'net_input{}'.format((i + 1)): {'src_tokens': RightPadDataset(src_tokens[i], pad_idx=self.source_dictionary.pad()), 'src_lengths': src_lengths[i]}})
if (len(labels) > 0):
dataset.update({'target': RawLabelDataset(labels)})
dataset = NestedDictionaryDataset(dataset, sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])])
with data_utils.numpy_seed(self.args.seed):
dataset = SortDataset(dataset, sort_order=[np.random.permutation(len(dataset))])
print('| Loaded {} with {} samples'.format(split, len(dataset)))
self.datasets[split] = dataset
return self.datasets[split]
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
model.register_classification_head('sentence_classification_head', num_classes=1)
return model
def source_dictionary(self):
return self.vocab
def target_dictionary(self):
return self.vocab |
class Progress():
def __init__(self, n_iter, pmax, batchSizeList):
assert ((n_iter > 0) and isinstance(n_iter, int)), 'n_iter must be int >= 1'
assert ((pmax >= 0) and isinstance(pmax, int)), 'pmax must be int >= 0'
assert (isinstance(batchSizeList, list) and all((isinstance(x, int) for x in batchSizeList)) and all(((x > 0) for x in batchSizeList)) and (len(batchSizeList) == (pmax + 1))), 'batchSizeList must be a list of int > 0 and of length pmax+1'
self.n_iter = n_iter
self.pmax = pmax
self.p = 0
self.batchSizeList = batchSizeList
def progress(self, epoch, i, total):
x = ((epoch + (i / total)) / self.n_iter)
self.p = min(max(int((x / 2)), (x - ceil((x / 2))), 0), self.pmax)
return self.p
def resize(self, images):
x = int(ceil(self.p))
if (x >= self.pmax):
return images
else:
return F.adaptive_avg_pool2d(images, (4 * (2 ** x)))
def batchSize(self):
x = int(ceil(self.p))
return self.batchSizeList[x] |
def disparity_regression(x, maxdisp):
assert (len(x.shape) == 4)
disp_values = torch.arange(0, maxdisp, dtype=x.dtype, device=x.device)
disp_values = disp_values.view(1, maxdisp, 1, 1)
return torch.sum((x * disp_values), 1, keepdim=False) |
def tokenizer_class_from_name(class_name: str):
all_tokenizer_classes = (([v[0] for v in TOKENIZER_MAPPING.values() if (v[0] is not None)] + [v[1] for v in TOKENIZER_MAPPING.values() if (v[1] is not None)]) + NO_CONFIG_TOKENIZER)
for c in all_tokenizer_classes:
if (c.__name__ == class_name):
return c |
class BertSoftmaxParallel(nn.DataParallel, BertSoftmaxFunction):
def __init__(self, module, device_ids):
nn.DataParallel.__init__(self, module=module, device_ids=device_ids)
self.label_size = self.module.label_size
self.device = self.module.device |
def main():
cfg = yaml.full_load(open('config.yml', 'r'))
inferenceConfig = cfg['INFERENCE']
os.environ['CUDA_VISIBLE_DEVICES'] = inferenceConfig['gpuID']
print(('=' * 2), 'Inferenc configs', ('=' * 5))
print(json.dumps(inferenceConfig, indent=1, sort_keys=True))
CHECKPOINT_FOLDER = inferenceConfig['experiment_dir']
midi_folder = inferenceConfig['generated_dir']
checkpoint_type = inferenceConfig['checkpoint_type']
if (checkpoint_type == 'best_train'):
model_path = os.path.join(CHECKPOINT_FOLDER, 'model_best.pth.tar')
output_prefix = 'best_train_'
elif (checkpoint_type == 'best_val'):
model_path = os.path.join(CHECKPOINT_FOLDER, 'model_best_val.pth.tar')
output_prefix = 'best_val_'
elif (checkpoint_type == 'epoch_idx'):
model_path = os.path.join(CHECKPOINT_FOLDER, 'ep_{}.pth.tar'.format(str(inferenceConfig['model_epoch'])))
output_prefix = (str(inferenceConfig['model_epoch']) + '_')
pretrainCfg = yaml.full_load(open(os.path.join(CHECKPOINT_FOLDER, 'config.yml'), 'r'))
modelConfig = pretrainCfg['MODEL']
if (not os.path.exists(midi_folder)):
os.mkdir(midi_folder)
(event2word, word2event) = pickle.load(open(inferenceConfig['dictionary_path'], 'rb'))
device = torch.device(('cuda' if ((not inferenceConfig['no_cuda']) and torch.cuda.is_available()) else 'cpu'))
print('Device to generate:', device)
model = TransformerXL(modelConfig, device, event2word=event2word, word2event=word2event, is_training=False)
song_time_list = []
words_len_list = []
num_samples = inferenceConfig['num_sample']
for idx in range(num_samples):
print(f'==={idx}/{num_samples}===')
print(midi_folder, (output_prefix + str(idx)))
(song_time, word_len) = model.inference(model_path=model_path, token_lim=7680, strategies=['temperature', 'nucleus'], params={'t': 1.2, 'p': 0.9}, bpm=120, output_path='{}/{}.mid'.format(midi_folder, (output_prefix + str(idx))))
print('song time:', song_time)
print('word_len:', word_len)
words_len_list.append(word_len)
song_time_list.append(song_time)
print('ave token time:', (sum(words_len_list) / sum(song_time_list)))
print('ave song time:', np.mean(song_time_list))
runtime_result = {'song_time': song_time_list, 'words_len_list': words_len_list, 'ave token time:': (sum(words_len_list) / sum(song_time_list)), 'ave song time': float(np.mean(song_time_list))}
with open('runtime_stats.json', 'w') as f:
json.dump(runtime_result, f) |
def pre_process_images(raw_images_path):
current_directory = os.getcwd()
IMAGE_SIZE = 1024
predictor = dlib.shape_predictor(paths_config.dlib)
os.chdir(raw_images_path)
images_names = glob.glob(f'*')
aligned_images = []
for image_name in tqdm(images_names):
try:
aligned_image = align_face(filepath=f'{raw_images_path}/{image_name}', predictor=predictor, output_size=IMAGE_SIZE)
aligned_images.append(aligned_image)
except Exception as e:
print(e)
os.makedirs(paths_config.input_data_path, exist_ok=True)
for (image, name) in zip(aligned_images, images_names):
real_name = name.split('.')[0]
image.save(f'{paths_config.input_data_path}/{real_name}.jpeg')
os.chdir(current_directory) |
def main():
print(window_width, window_height)
top_widgets = []
left_widgets = []
for i in range(num_top):
top_widgets.append(ORCWidget(('HF_' + str(i)), [top_button_width_min, top_button_width_pref, top_button_width_max, top_button_height_min, top_button_height_pref, top_button_height_max]))
for i in range(num_top, (num_top + num_left)):
left_widgets.append(ORCWidget(('VF_' + str(i)), [left_button_width_min, left_button_width_pref, left_button_width_max, left_button_height_min, left_button_height_pref, left_button_height_max]))
column = ORCColumn('column', None, window_width, window_height)
horizonalflow = HorizontalFlow('HF', top_widgets, column)
row = ORCRow('row', horizonalflow)
verticalflow = VerticalFlow('VF', left_widgets, row)
textbox = ORCWidget('textbox', [textbox_min, textbox_pref, textbox_max, textbox_min, textbox_pref, textbox_max], verticalflow)
textbox.set_weight(1e-06)
column.define_sublayouts([horizonalflow, row])
row.define_sublayouts([verticalflow, textbox])
horizonalflow.connect_to_flow(verticalflow)
start = time.time()
column.solve()
print(('Time: ' + str((time.time() - start))))
if show_window:
time_result.insert(0, str((time.time() - start)))
(best_leaf, best_leaf_result, best_leaf_loss) = column.get_best()
if (best_leaf == None):
print('No Solution!')
exit()
horizonalflow_row_height = best_leaf.parent.parent.parent.best_row_height
horizonalflow_row_width = best_leaf.parent.parent.parent.best_row_width
horizonalflow_result_index = best_leaf.parent.parent.parent.best_result_index
verticalflow_row_height = best_leaf.parent.best_row_height
verticalflow_row_width = best_leaf.parent.best_row_width
verticalflow_result_index = best_leaf.parent.best_result_index
HF_l = best_leaf_result['HF_l']
HF_r = best_leaf_result['HF_r']
HF_t = best_leaf_result['HF_t']
HF_b = best_leaf_result['HF_b']
VF_l = best_leaf_result['VF_l']
VF_r = best_leaf_result['VF_r']
VF_t = best_leaf_result['VF_t']
VF_b = best_leaf_result['VF_b']
textbox_l = best_leaf_result['textbox_l']
textbox_r = best_leaf_result['textbox_r']
textbox_t = best_leaf_result['textbox_t']
textbox_b = best_leaf_result['textbox_b']
left = HF_l
top = HF_t
index = 0
for i in range(len(horizonalflow_result_index)):
for j in range(len(horizonalflow_result_index[i])):
widget_width = horizonalflow_row_width[i][j]
widget_height = horizonalflow_row_height[i]
if show_window:
widgets[index][0].place(x=left, y=top, width=widget_width, height=widget_height)
left += widget_width
index += 1
left = HF_l
top += widget_height
left = VF_l
top = VF_t
for i in range(len(verticalflow_result_index)):
for j in range(len(verticalflow_result_index[i])):
widget_width = verticalflow_row_width[i]
widget_height = verticalflow_row_height[i][j]
if show_window:
widgets[index][0].place(x=left, y=top, width=widget_width, height=widget_height)
top += widget_height
index += 1
left += widget_width
top = VF_t
if show_window:
widgets[(- 1)][0].place(x=textbox_l, y=textbox_t, width=(textbox_r - textbox_l), height=(textbox_b - textbox_t))
if show_window:
mainloop() |
class Encoder(Module):
def __init__(self, channels=(3, 16, 32, 64)):
super().__init__()
self.encBlocks = ModuleList([Block(channels[i], channels[(i + 1)]) for i in range((len(channels) - 1))])
self.pool = MaxPool2d(2)
def forward(self, x):
blockOutputs = []
for block in self.encBlocks:
x = block(x)
blockOutputs.append(x)
x = self.pool(x)
return blockOutputs |
def PSNR(img1, img2):
mse = np.mean((((img1 / 255.0) - (img2 / 255.0)) ** 2))
if (mse == 0):
return 100
PIXEL_MAX = 1
return (20 * math.log10((PIXEL_MAX / math.sqrt(mse)))) |
def main():
parser = get_parser()
args = parser.parse_args()
spec = osp.basename(args.path)
try:
faiss_spec = parse_faiss_specs(spec.rstrip('/'))[0]
except:
print(spec)
raise
print('Faiss Spec:', faiss_spec, file=sys.stderr)
if faiss_spec.pca:
A = torch.from_numpy(np.load(osp.join(args.path, 'pca_A.npy'))).cuda()
b = torch.from_numpy(np.load(osp.join(args.path, 'pca_b.npy'))).cuda()
print('Loaded PCA', file=sys.stderr)
centroids = np.load(osp.join(args.path, 'centroids.npy'))
print('Loaded centroids', centroids.shape, file=sys.stderr)
res = faiss.StandardGpuResources()
index_flat = (faiss.IndexFlatL2(centroids.shape[1]) if (not faiss_spec.sphere) else faiss.IndexFlatIP(centroids.shape[1]))
faiss_index = faiss.index_cpu_to_gpu(res, 0, index_flat)
faiss_index.add(centroids)
(generator, num, root) = get_iterator(args)
iterator = generator()
had_labels = False
label_path = osp.join(args.path, f'{args.split}.{args.labels}')
with torch.no_grad():
with open(osp.join(args.path, f'{args.split}.src'), 'w') as fp, open(osp.join(args.path, f'{args.split}.tsv'), 'w') as pp, open(label_path, 'w') as lp:
print(root, file=pp)
for (f, fname, lbl) in tqdm.tqdm(iterator, total=num):
if faiss_spec.pca:
f = (torch.mm(f, A) + b)
if faiss_spec.norm:
f = F.normalize(f, p=2, dim=(- 1))
f = f.cpu().numpy()
(_, z) = faiss_index.search(f, 1)
print(' '.join((str(x.item()) for x in z)), file=fp)
print(fname, file=pp)
if (lbl is not None):
print(lbl, file=lp)
had_labels = True
if (not had_labels):
os.remove(label_path) |
class PLN(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(PLN, self).__init__()
self.rebnconvin = REBNCONV(in_ch, mid_ch, dirate=1)
self.rebnconvout = REBNCONV(mid_ch, out_ch, dirate=1)
def forward(self, x):
hx = x
hxin = self.rebnconvin(hx)
hxout = self.rebnconvout(hxin)
return hxout |
class RandomEnv(gym.Env):
def __init__(self):
super(RandomEnv, self).__init__()
self.action_space = spaces.Discrete(6)
self.observation_space = gym.spaces.Dict()
self.observation_space.spaces['image'] = gym.spaces.Box(low=0.0, high=1.0, shape=(10, 10, 10))
self.channels = [f'channel{i}' for i in range(10)]
self.spec = Spec('random')
self._step = 0
def _next_observation(self):
return np.random.randint(2, size=(10, 10, 10)).astype(np.float32)
def step(self, action):
self._step += 1
done = (True if (self._step == 10) else False)
return ({'image': self._next_observation()}, 0, done, {})
def reset(self):
self._step = 0
return {'image': self._next_observation()} |
class LBP_NET(nn.Module):
def __init__(self, T):
super(LBP_NET, self).__init__()
self.T = T
self.W1 = nn.Parameter(torch.randn(32, 3, 4, 4), requires_grad=True)
self.strd1 = 2
self.W2 = nn.Parameter(torch.randn(64, 32, 4, 4), requires_grad=True)
self.strd2 = 2
self.W3 = nn.Parameter(torch.randn(128, 64, 4, 4), requires_grad=True)
self.strd3 = 2
self.W4 = nn.Parameter(torch.randn(256, 128, 3, 3), requires_grad=True)
self.strd4 = 1
self.W5 = nn.Parameter(torch.randn(512, 256, 3, 3), requires_grad=True)
self.strd5 = 1
self.W6 = nn.Parameter(torch.randn(512, 512, 3, 3), requires_grad=True)
self.strd6 = 1
self.c1 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True)
self.c2 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True)
self.c3 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True)
self.b1 = nn.Parameter(torch.zeros(1, 32, 1, 1), requires_grad=True)
self.b2 = nn.Parameter(torch.zeros(1, 64, 1, 1), requires_grad=True)
self.b3 = nn.Parameter(torch.zeros(1, 128, 1, 1), requires_grad=True)
self.b4 = nn.Parameter(torch.zeros(1, 256, 1, 1), requires_grad=True)
self.b5 = nn.Parameter(torch.zeros(1, 512, 1, 1), requires_grad=True)
self.b6 = nn.Parameter(torch.zeros(1, 512, 1, 1), requires_grad=True)
self.Wclass = nn.Linear(512, 10)
self.W1.data = ((0.1 / np.sqrt((3 * 16))) * self.W1.data)
self.W2.data = ((0.1 / np.sqrt((32 * 16))) * self.W2.data)
self.W3.data = ((0.1 / np.sqrt((64 * 16))) * self.W3.data)
self.W4.data = ((1 / np.sqrt((128 * 9))) * self.W4.data)
self.W5.data = ((1 / np.sqrt((256 * 9))) * self.W5.data)
self.W6.data = ((1 / np.sqrt((512 * 9))) * self.W6.data)
def forward(self, x):
if (self.T == 0):
gamma1 = F.relu(((self.c1 * F.conv2d(x, self.W1, stride=self.strd1, padding=1)) + self.b1))
gamma2 = F.relu(((self.c2 * F.conv2d(gamma1, self.W2, stride=self.strd2, padding=1)) + self.b2))
gamma3 = F.relu(((self.c3 * F.conv2d(gamma2, self.W3, stride=self.strd3, padding=1)) + self.b3))
else:
gamma1 = F.relu(((self.c1 * F.conv2d(x, self.W1, stride=self.strd1, padding=1)) + self.b1))
for _ in range(self.T):
gamma1 = F.relu(((gamma1 - (self.c1 * F.conv2d((F.conv_transpose2d(gamma1, self.W1, stride=self.strd1, padding=1) - x), self.W1, stride=self.strd1, padding=1))) + self.b1))
gamma2 = F.relu(((self.c2 * F.conv2d(gamma1, self.W2, stride=self.strd2, padding=1)) + self.b2))
for _ in range(self.T):
gamma2 = F.relu(((gamma2 - (self.c2 * F.conv2d((F.conv_transpose2d(gamma2, self.W2, stride=self.strd2, padding=1) - gamma1), self.W2, stride=self.strd2, padding=1))) + self.b2))
gamma3 = F.relu(((self.c3 * F.conv2d(gamma2, self.W3, stride=self.strd3, padding=1)) + self.b3))
for _ in range(self.T):
gamma3 = F.relu(((gamma3 - (self.c3 * F.conv2d((F.conv_transpose2d(gamma3, self.W3, stride=self.strd3, padding=1) - gamma2), self.W3, stride=self.strd3, padding=1))) + self.b3))
gamma4 = F.relu((F.conv2d(gamma3, self.W4, stride=self.strd4, padding=1) + self.b4))
gamma5 = F.max_pool2d(F.relu((F.conv2d(gamma4, self.W5, stride=self.strd5, padding=1) + self.b5)), kernel_size=2, stride=2)
gamma6 = F.max_pool2d(F.relu((F.conv2d(gamma5, self.W6, stride=self.strd6, padding=1) + self.b6)), kernel_size=2, stride=2)
gammaGoal = gamma6
gamma = gammaGoal.view(gammaGoal.shape[0], ((gammaGoal.shape[1] * gammaGoal.shape[2]) * gammaGoal.shape[3]))
out = self.Wclass(gamma)
out = F.log_softmax(out, dim=1)
return out |
def load_template(config: CfgNode, **kwargs):
if (config.template is not None):
template_class = TEMPLATE_CLASS[config.template]
template = template_class.from_config(config=config[config.template], **kwargs)
return template |
def normalize_advantages(advantages):
return ((advantages - np.mean(advantages)) / (advantages.std() + 1e-08)) |
class LSTMModel(nn.Module):
def __init__(self, input_dim, hidden_dim, layer_dim, output_dim):
super(LSTMModel, self).__init__()
self.hidden_dim = hidden_dim
self.layer_dim = layer_dim
self.lstm = nn.LSTM(input_dim, hidden_dim, layer_dim, batch_first=True)
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, x, x_lengths):
device = x.device
h0 = torch.zeros(self.layer_dim, x.size(0), self.hidden_dim).requires_grad_()
h0 = h0.to(device)
c0 = torch.zeros(self.layer_dim, x.size(0), self.hidden_dim).requires_grad_()
c0 = c0.to(device)
x = torch.nn.utils.rnn.pack_padded_sequence(x, x_lengths, enforce_sorted=False, batch_first=True)
(out, (hn, _)) = self.lstm(x, (h0.detach(), c0.detach()))
(out, _) = torch.nn.utils.rnn.pad_packed_sequence(out, batch_first=True)
res = self.fc(hn[(- 1)])
return res |
def export_plot(fig, plot_title):
path = get_plot_name(plot_title)
print(f'saving plot in {path}')
plt.savefig(path, dpi=EXPORT_RESOLUTION)
plt.clf() |
class RMSE(BaseMetric):
def _compute(self, pred: ty.T, target: ty.T) -> ty.T:
return (pred - target).pow(2).nanmean(dim=1).sqrt() |
class LeNet(nn.Module):
def __init__(self, fc1_hidden_size=500):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(((4 * 4) * 50), fc1_hidden_size)
self.fc2 = nn.Linear(fc1_hidden_size, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view((- 1), ((4 * 4) * 50))
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1) |
def read_image(filepath):
img_bytes = FILE_CLIENT.get(filepath)
image = mmcv.imfrombytes(img_bytes, flag='color', channel_order='rgb', backend='pillow')
return image |
def _gen_spnasnet(variant, channel_multiplier=1.0, pretrained=False, **kwargs):
arch_def = [['ds_r1_k3_s1_c16_noskip'], ['ir_r3_k3_s2_e3_c24'], ['ir_r1_k5_s2_e6_c40', 'ir_r3_k3_s1_e3_c40'], ['ir_r1_k5_s2_e6_c80', 'ir_r3_k3_s1_e3_c80'], ['ir_r1_k5_s1_e6_c96', 'ir_r3_k5_s1_e3_c96'], ['ir_r4_k5_s2_e6_c192'], ['ir_r1_k3_s1_e6_c320_noskip']]
model_kwargs = dict(block_args=decode_arch_def(arch_def), stem_size=32, channel_multiplier=channel_multiplier, norm_kwargs=resolve_bn_args(kwargs), **kwargs)
model = _create_effnet(model_kwargs, variant, pretrained)
return model |
class BYTETracker(object):
def __init__(self, args, frame_rate=30):
self.args = args
self.det_thresh = args.new_thresh
self.buffer_size = int(((frame_rate / 30.0) * args.track_buffer))
self.max_time_lost = self.buffer_size
self.reset()
def init_track(self, results):
for item in results:
if ((item['score'] > self.opt.new_thresh) and (item['class'] == 1)):
self.id_count += 1
item['active'] = 1
item['age'] = 1
item['tracking_id'] = self.id_count
if (not ('ct' in item)):
bbox = item['bbox']
item['ct'] = [((bbox[0] + bbox[2]) / 2), ((bbox[1] + bbox[3]) / 2)]
self.tracks.append(item)
def reset(self):
self.frame_id = 0
self.kalman_filter = KalmanFilter()
self.tracked_stracks = []
self.lost_stracks = []
self.removed_stracks = []
self.tracks = []
self.id_count = 0
def step(self, results, public_det=None):
self.frame_id += 1
activated_starcks = []
refind_stracks = []
lost_stracks = []
removed_stracks = []
detections = []
detections_second = []
scores = np.array([item['score'] for item in results if (item['class'] == 1)], np.float32)
bboxes = np.vstack([item['bbox'] for item in results if (item['class'] == 1)])
remain_inds = (scores >= self.args.track_thresh)
dets = bboxes[remain_inds]
scores_keep = scores[remain_inds]
inds_low = (scores > self.args.out_thresh)
inds_high = (scores < self.args.track_thresh)
inds_second = np.logical_and(inds_low, inds_high)
dets_second = bboxes[inds_second]
scores_second = scores[inds_second]
if (len(dets) > 0):
detections = [STrack(STrack.tlbr_to_tlwh(tlbr), s) for (tlbr, s) in zip(dets, scores_keep)]
else:
detections = []
' Add newly detected tracklets to tracked_stracks'
unconfirmed = []
tracked_stracks = []
for track in self.tracked_stracks:
if (not track.is_activated):
unconfirmed.append(track)
else:
tracked_stracks.append(track)
' Step 2: First association, with Kalman and IOU'
strack_pool = joint_stracks(tracked_stracks, self.lost_stracks)
STrack.multi_predict(strack_pool)
dists = matching.iou_distance(strack_pool, detections)
(matches, u_track, u_detection) = matching.linear_assignment(dists, thresh=self.args.match_thresh)
for (itracked, idet) in matches:
track = strack_pool[itracked]
det = detections[idet]
if (track.state == TrackState.Tracked):
track.update(detections[idet], self.frame_id)
activated_starcks.append(track)
else:
track.re_activate(det, self.frame_id, new_id=False)
refind_stracks.append(track)
' Step 3: Second association, association the untrack to the low score detections, with IOU'
if (len(dets_second) > 0):
detections_second = [STrack(STrack.tlbr_to_tlwh(tlbr), s) for (tlbr, s) in zip(dets_second, scores_second)]
else:
detections_second = []
r_tracked_stracks = [strack_pool[i] for i in u_track if (strack_pool[i].state == TrackState.Tracked)]
dists = matching.iou_distance(r_tracked_stracks, detections_second)
(matches, u_track, u_detection_second) = matching.linear_assignment(dists, thresh=0.5)
for (itracked, idet) in matches:
track = r_tracked_stracks[itracked]
det = detections_second[idet]
if (track.state == TrackState.Tracked):
track.update(det, self.frame_id)
activated_starcks.append(track)
else:
track.re_activate(det, self.frame_id, new_id=False)
refind_stracks.append(track)
for it in u_track:
track = r_tracked_stracks[it]
if (not (track.state == TrackState.Lost)):
track.mark_lost()
lost_stracks.append(track)
'Deal with unconfirmed tracks, usually tracks with only one beginning frame'
detections = [detections[i] for i in u_detection]
dists = matching.iou_distance(unconfirmed, detections)
(matches, u_unconfirmed, u_detection) = matching.linear_assignment(dists, thresh=0.7)
for (itracked, idet) in matches:
unconfirmed[itracked].update(detections[idet], self.frame_id)
activated_starcks.append(unconfirmed[itracked])
for it in u_unconfirmed:
track = unconfirmed[it]
track.mark_removed()
removed_stracks.append(track)
' Step 4: Init new stracks'
for inew in u_detection:
track = detections[inew]
if (track.score < self.det_thresh):
continue
track.activate(self.kalman_filter, self.frame_id)
activated_starcks.append(track)
' Step 5: Update state'
for track in self.lost_stracks:
if ((self.frame_id - track.end_frame) > self.max_time_lost):
track.mark_removed()
removed_stracks.append(track)
self.tracked_stracks = [t for t in self.tracked_stracks if (t.state == TrackState.Tracked)]
self.tracked_stracks = joint_stracks(self.tracked_stracks, activated_starcks)
self.tracked_stracks = joint_stracks(self.tracked_stracks, refind_stracks)
self.lost_stracks = sub_stracks(self.lost_stracks, self.tracked_stracks)
self.lost_stracks.extend(lost_stracks)
self.lost_stracks = sub_stracks(self.lost_stracks, self.removed_stracks)
self.removed_stracks.extend(removed_stracks)
(self.tracked_stracks, self.lost_stracks) = remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks)
output_stracks = [track for track in self.tracked_stracks if track.is_activated]
ret = []
for track in output_stracks:
track_dict = {}
track_dict['score'] = track.score
track_dict['bbox'] = track.tlbr
bbox = track_dict['bbox']
track_dict['ct'] = [((bbox[0] + bbox[2]) / 2), ((bbox[1] + bbox[3]) / 2)]
track_dict['active'] = (1 if track.is_activated else 0)
track_dict['tracking_id'] = track.track_id
track_dict['class'] = 1
ret.append(track_dict)
self.tracks = ret
return ret |
class HebbianTrainer(Trainer):
def __init__(self, model: torch.nn.Sequential, learning_rule: Union[(LearningRule, Dict[(str, LearningRule)])], optimizer: Optimizer, supervised_from: int=(- 1), freeze_layers: List[str]=None, complete_forward: bool=False, single_forward: bool=False, device: Optional[Union[(str, torch.device)]]=None):
device = utils.get_device(device)
engine = self.create_hebbian_trainer(model, learning_rule, optimizer, device=device)
self.supervised_from = supervised_from
self.freeze_layers = freeze_layers
self.complete_forward = complete_forward
self.single_forward = single_forward
if (self.freeze_layers is None):
self.freeze_layers = []
Layer = namedtuple('Layer', ['idx', 'name', 'layer'])
self.layers = []
for (idx, (name, layer)) in enumerate(list(model.named_children())[:self.supervised_from]):
if (((type(layer) == torch.nn.Linear) or (type(layer) == torch.nn.Conv2d)) and (name not in self.freeze_layers)):
self.layers.append(Layer(idx, name, layer))
self.learning_rule = learning_rule
if (type(self.learning_rule) == dict):
for rule in self.learning_rule.values():
rule.init_layers(self.layers)
else:
self.learning_rule.init_layers(self.layers)
super().__init__(engine=engine, model=model, device=device)
self.logger.info('Received {} trainable layer(s): {}.'.format(len(self.layers), [lyr.name for lyr in self.layers]))
if self.single_forward:
self._hooks = {}
self._inputs = {}
self._outputs = {}
for lyr in self.layers:
self._hooks[lyr.name] = lyr.layer.register_forward_hook(partial(self._store_data_hook, layer_name=lyr.name))
def _store_data_hook(self, _, inp, output, layer_name):
self._inputs[layer_name] = inp[0]
self._outputs[layer_name] = output
def _prepare_data(self, inputs, model, layer_index):
layers = list(model.children())
layer = layers[layer_index]
if (layer_index == 0):
x = inputs
else:
x = inputs
for lyr in layers[:layer_index]:
x = lyr(x)
if self.complete_forward:
for lyr in layers[layer_index:]:
x = lyr(x)
if (type(layer) == torch.nn.Linear):
w = layer.weight
elif (type(layer) == torch.nn.Conv2d):
w = layer.weight
w = w.view((- 1), (layer.kernel_size[0] * layer.kernel_size[1]))
x = utils.extract_image_patches(x, kernel_size=layer.kernel_size, stride=layer.stride, padding=layer.padding, dilation=layer.dilation)
else:
raise TypeError('Unsupported layer type!')
x = x.view((x.shape[0], (- 1)))
self.logger.debug('Prepared inputs and weights with shapes {} and {}.'.format(list(x.shape), list(w.shape)))
return (x, w)
def _prepare_data2(self, layer, layer_name):
x = self._inputs[layer_name]
y = self._outputs[layer_name]
if (type(layer) == torch.nn.Linear):
w = layer.weight
elif (type(layer) == torch.nn.Conv2d):
w = layer.weight
w = w.view((- 1), (layer.kernel_size[0] * layer.kernel_size[1]))
x = utils.extract_image_patches(x, kernel_size=layer.kernel_size, stride=layer.stride, padding=layer.padding, dilation=layer.dilation)
else:
raise TypeError('Unsupported layer type!')
x = x.view((x.shape[0], (- 1)))
self.logger.debug('Prepared inputs and weights with shapes {} and {}.'.format(list(x.shape), list(w.shape)))
return (x, y, w)
def _forward(self, inputs, model):
if self.complete_forward:
model(inputs)
else:
layers = list(model.children())
x = inputs
for lyr in layers[:(self.supervised_from - 1)]:
x = lyr(x)
def create_hebbian_trainer(self, model: torch.nn.Module, learning_rule, optimizer, device=None, non_blocking=False, prepare_batch=utils.prepare_batch, output_transform=(lambda x, y: 0)):
def _update(_, batch: Sequence[torch.Tensor]):
model.train()
with torch.no_grad():
(x, y) = prepare_batch(batch, device=device, non_blocking=non_blocking)
if self.single_forward:
self._forward(x, model)
for (layer_index, layer_name, layer) in self.layers:
self.logger.debug("Updating layer '{}' with shape {}.".format(layer, layer.weight.shape))
if self.single_forward:
(inputs, _, weights) = self._prepare_data2(layer, layer_name)
else:
(inputs, weights) = self._prepare_data(x, model, layer_index)
if (type(learning_rule) == dict):
try:
rule = learning_rule[layer_name]
except KeyError:
self.logger.error("No learning rule was specified for layer '{}'!".format(layer_name))
raise
else:
rule = learning_rule
d_p = rule.update(inputs, weights)
d_p = d_p.view(*layer.weight.size())
optimizer.local_step(d_p, layer_name=layer_name)
return output_transform(x, y)
return Engine(_update) |
def write_new_lm(new_lm_lines, ngram_counts, ngram_diffs):
for i in range(10):
g = re.search('ngram (\\d)=(\\d+)', new_lm_lines[i])
if g:
n = int(g.group(1))
if (n in ngram_diffs):
new_num_ngrams = (ngram_counts[n] + ngram_diffs[n])
new_lm_lines[i] = 'ngram {}={}\n'.format(n, new_num_ngrams)
with io.TextIOWrapper(sys.stdout.buffer, encoding='latin-1') as output_stream:
output_stream.writelines(new_lm_lines) |
def validation(data_iter, net, save_scores=False, delta=0.8):
score_list = []
label_list = []
net.eval()
(losses, batch_num, acc, acc_num) = (0, 0, 0, 0)
criterion = nn.BCELoss()
for (batch_idx, batch) in enumerate(data_iter):
(qbatch, rbatch, qlength, rlength, label) = batch
qbatch = torch.from_numpy(qbatch)
rbatch = torch.from_numpy(rbatch)
qlength = torch.from_numpy(qlength)
rlength = torch.from_numpy(rlength)
label = torch.from_numpy(label).float()
batch_size = len(qlength)
if torch.cuda.is_available():
(qbatch, rbatch) = (qbatch.cuda(), rbatch.cuda())
(qlength, rlength) = (qlength.cuda(), rlength.cuda())
label = label.cuda()
qbatch = qbatch.transpose(0, 1)
rbatch = rbatch.transpose(0, 1)
scores = net(qbatch, qlength, rbatch, rlength)
loss = criterion(scores, label)
score_list.extend(scores.cpu().data.numpy().tolist())
label_list.extend(label.cpu().data.numpy().tolist())
s = (scores >= 0.5)
acc += torch.sum((s.float() == label)).item()
acc_num += batch_size
batch_num += 1
losses += loss.item()
score_list = np.array(score_list)
label_list = np.array(label_list)
(pbc, pval) = pointbiserialr(label_list, score_list)
acc = accuracy_score(label_list, (score_list >= 0.5))
print('PBC: {}, pval: {}'.format(pbc, pval))
if save_scores:
np.savetxt((((args.exp_dir + '/test_') + args.mode) + '_scores.txt'), score_list)
np.savetxt((((args.exp_dir + '/test_') + args.mode) + '_labels.txt'), label_list)
predicted = (score_list >= 0.5).astype(np.int32)
c_matrix = confusion_matrix(label_list, predicted)
print('confusion_matrix = ', c_matrix)
return (round((losses / batch_num), 4), acc) |
def load_data(root_path):
data = np.load(root_path)
N_test = int((0.1 * data.shape[0]))
data_test = data[(- N_test):]
data = data[0:(- N_test)]
N_validate = int((0.1 * data.shape[0]))
data_validate = data[(- N_validate):]
data_train = data[0:(- N_validate)]
return (data_train, data_validate, data_test) |
_type
def rgb_shift(image, r_shift=0.0, g_shift=0.0, b_shift=0.0):
(r, g, b) = tf.split(image, 3, axis=2)
r = (r + tf.random.uniform([], (- r_shift), r_shift))
g = (g + tf.random.uniform([], (- g_shift), g_shift))
b = (b + tf.random.uniform([], (- b_shift), b_shift))
image = tf.concat([r, g, b], axis=2)
return image |
class PostProcessCocoTf(PostProcessCoco):
def __init__(self):
super().__init__()
self.use_inv_map = True
def __call__(self, results, ids, expected=None, result_dict=None):
processed_results = []
bs = len(results[0])
for idx in range(0, bs):
self.content_ids.append(ids[idx])
processed_results.append([])
detection_boxes = results[0][idx]
detection_classes = results[1][idx]
expected_classes = expected[idx][0]
scores = results[2][idx]
for detection in range(0, len(scores)):
if (scores[detection] < 0.05):
break
detection_class = int(detection_classes[detection])
if (detection_class in expected_classes):
self.good += 1
box = detection_boxes[detection]
processed_results[idx].append([float(ids[idx]), box[0], box[1], box[2], box[3], scores[detection], float(detection_class)])
self.total += 1
return processed_results |
def read_vocab(path):
word2idx = {}
idx2word = []
with open(path, 'r', encoding='utf-8') as f:
for line in f:
word = line.split()
assert (len(word) == 2)
word = word[0]
if (word not in word2idx):
idx2word.append(word)
word2idx[word] = (len(idx2word) - 1)
return word2idx |
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--rnn_size', type=int, default=1280, help='size of the rnn in number of hidden nodes in question gru')
parser.add_argument('--num_hid', type=int, default=1280, help='size of the rnn in number of hidden nodes in question gru')
parser.add_argument('--num_layers', type=int, default=2, help='number of GCN layers')
parser.add_argument('--rnn_type', type=str, default='gru', help='rnn, gru, or lstm')
parser.add_argument('--v_dim', type=int, default=2048, help='2048 for resnet, 4096 for vgg')
parser.add_argument('--logit_layers', type=int, default=1, help='number of layers in the RNN')
parser.add_argument('--activation', type=str, default='ReLU', help='number of layers in the RNN')
parser.add_argument('--norm', type=str, default='weight', help='number of layers in the RNN')
parser.add_argument('--initializer', type=str, default='kaiming_normal', help='number of layers in the RNN')
parser.add_argument('--max_epochs', type=int, default=40, help='number of epochs')
parser.add_argument('--batch_size', type=int, default=384, help='minibatch size')
parser.add_argument('--grad_clip', type=float, default=0.25, help='clip gradients at this value')
parser.add_argument('--dropC', type=float, default=0.5, help='strength of dropout in the Language Model RNN')
parser.add_argument('--dropG', type=float, default=0.2, help='strength of dropout in the Language Model RNN')
parser.add_argument('--dropL', type=float, default=0.1, help='strength of dropout in the Language Model RNN')
parser.add_argument('--dropW', type=float, default=0.4, help='strength of dropout in the Language Model RNN')
parser.add_argument('--dropout', type=float, default=0.2, help='strength of dropout in the Language Model RNN')
parser.add_argument('--optimizer', type=str, default='adam', help='what update to use? rmsprop|sgd|sgdmom|adagrad|adam')
parser.add_argument('--learning_rate', type=float, default=0.002, help='learning rate')
parser.add_argument('--optim_alpha', type=float, default=0.9, help='alpha for adam')
parser.add_argument('--optim_beta', type=float, default=0.999, help='beta used for adam')
parser.add_argument('--optim_epsilon', type=float, default=1e-08, help='epsilon that goes into denominator for smoothing')
parser.add_argument('--weight_decay', type=float, default=0, help='weight_decay')
parser.add_argument('--seed', type=int, default=777, help='seed')
parser.add_argument('--ntokens', type=int, default=777, help='ntokens')
parser.add_argument('--checkpoint_path', type=str, default='', help='directory to store checkpointed models')
parser.add_argument('--split', type=str, default='v2cp_train', help='training split')
parser.add_argument('--split_test', type=str, default='v2cp_test', help='test split')
parser.add_argument('--num_sub', type=int, default=5, help='size of the proposal object set')
parser.add_argument('--bucket', type=int, default=4, help='bucket of predicted answers')
parser.add_argument('--hint_loss_weight', type=float, default=0, help='Influence strength loss weights')
parser.add_argument('--compare_loss_weight', type=float, default=0, help='self-critical loss weights')
parser.add_argument('--reg_loss_weight', type=float, default=0.0, help='regularization loss weights, set to zero in our paper ')
parser.add_argument('--load_hint', type=float, default=0, help='if load the model after using Influence strength loss')
parser.add_argument('--use_all', type=int, default=0, help='if use all QA pairs or excluding QA pairs in NUM category')
parser.add_argument('--load_model_states', type=str, default=0, help='which model to load')
parser.add_argument('--evaluate_every', type=int, default=300, help='which model to load')
args = parser.parse_args()
return args |
def getCharList(root):
charlist = []
for img_path in (glob.glob((root + '/*.jpg')) + glob.glob((root + '/*.png'))):
ch = os.path.basename(img_path).split('.')[0]
charlist.append(ch)
return charlist |
def echo(*args, **kwargs):
print('Received the following input:')
print(f'args = {args}')
print(f'kwargs = {kwargs}') |
def iresnet18(pretrained=False, **kwargs):
model = iResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
os.makedirs(default_cache_path, exist_ok=True)
model.load_state_dict(torch.load(download_from_url(model_urls['iresnet18'], root=default_cache_path)))
return model |
class DeiTOnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse('1.11')
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'})])
def atol_for_validation(self) -> float:
return 0.0001 |
class XTensorBoardCallback(tf.keras.callbacks.TensorBoard):
def __init__(self, log_dir, **kwargs):
super().__init__(log_dir=log_dir, **kwargs)
def on_epoch_end(self, epoch, logs=None):
logs.update({'lr': tf.keras.backend.get_value(self.model.optimizer.lr)})
super().on_epoch_end(epoch, logs)
def on_batch_end(self, batch, logs=None):
logs.update({'lr': tf.keras.backend.get_value(self.model.optimizer.lr)})
super().on_batch_end(batch, logs) |
def load_str_list(fname):
with open(fname) as f:
lines = f.readlines()
lines = [l.strip() for l in lines]
return lines |
def _optimize_clone(optimizer, clone, num_clones, regularization_losses, **kwargs):
sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
clone_grad = None
if (sum_loss is not None):
with tf.device(clone.device):
clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
return (sum_loss, clone_grad) |
def main(config):
if (is_main_process() and config.wandb.enable):
run = setup_wandb(config)
logger.info(f'''config:
{config}''')
logger.info(f'train_file: {config.train_file}')
setup_seed((config.seed + get_rank()))
device = torch.device(config.device)
cudnn.benchmark = True
(train_loader, test_name2loaders) = setup_dataloaders(config)
config.scheduler.num_training_steps = (len(train_loader) * config.scheduler.epochs)
config.scheduler.num_warmup_steps = (len(train_loader) * config.scheduler.warmup_epochs)
(model, model_without_ddp, optimizer, scheduler, scaler, tokenizer, start_epoch, global_step) = setup_model(config, model_cls=Singularity, has_decoder=True, pretrain=False, find_unused_parameters=True)
if (is_main_process() and config.wandb.enable):
wandb.watch(model)
best = 0
best_epoch = 0
has_gt = ((config.dataset_name != 'vqa') or (config.test_types[0] == 'minival'))
logger.info((('Start ' + 'evaluation') if config.evaluate else 'training'))
start_time = time.time()
for epoch in range(start_epoch, config.scheduler.epochs):
if (not config.evaluate):
global_step = train(model, train_loader, optimizer, tokenizer, epoch, global_step, device, scheduler, scaler, config)
with torch.cuda.amp.autocast(enabled=config.fp16):
eval_res = {}
pred_name2file = {}
for (test_name, test_loader) in test_name2loaders.items():
if (test_name not in config.test_types):
logger.info(f'Skip eval {test_name} split. All test_types {config.test_types}')
continue
logger.info(f'Evaluating {test_name} split...')
qa_result = evaluation(model, test_loader, tokenizer, device, config)
(pred_file, gathered_result) = sync_save_result(qa_result, config.result_dir, f'{test_name}_latest')
pred_name2file[test_name] = pred_file
if (is_main_process() and has_gt):
eval_res[test_name] = eval_qa_acc(test_loader.dataset.anno_list, gathered_result, is_vqa=(config.dataset_name == 'vqa'))
if is_main_process():
if (len(eval_res) > 0):
logger.info(f'eval_res {eval_res}')
if config.wandb.enable:
for (name, acc_dict) in eval_res.items():
log_dict_to_wandb(acc_dict, step=global_step, prefix=f'{name}/')
if ((not config.evaluate) and has_gt and (eval_res[config.stop_key]['overall'] > best)):
save_obj = {'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict(), 'scaler': scaler.state_dict(), 'config': config, 'epoch': epoch, 'global_step': global_step}
for (name, pred_file) in pred_name2file.items():
copyfile(pred_file, pred_file.replace('latest', 'best'))
save_json(eval_res, join(config.output_dir, 'eval_res_best.json'), save_pretty=True)
torch.save(save_obj, join(config.output_dir, 'ckpt_best.pth'))
best = eval_res[config.stop_key]['overall']
best_epoch = epoch
if config.evaluate:
save_json(eval_res, join(config.output_dir, 'eval_res_best.json'), save_pretty=True)
for (name, pred_file) in pred_name2file.items():
copyfile(pred_file, pred_file.replace('latest', 'eval'))
if has_gt:
save_path = join(config.output_dir, f'{name}_acc_eval.json')
save_json(eval_res[name], save_path, save_pretty=True)
if (config.evaluate or config.debug):
break
dist.barrier()
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logger.info(f'Training time {total_time_str}')
logger.info(f'best epoch {best_epoch}')
logger.info(f'Checkpoints and Logs saved at {config.output_dir}')
if (is_main_process() and config.wandb.enable):
run.finish() |
def convert(pipeline_name, use_auth_token=None, local_model_path=None):
if ((use_auth_token is None) or (len(use_auth_token) == 0)):
use_auth_token = None
try:
(model_version, optimization_methods) = pipeline_name.split('/')
precision = ('float16' if ('FP16' in optimization_methods) else 'float32')
device = ('GPU' if ('iGPU' in optimization_methods) else 'CPU')
ipex = False
low_memory = False
if ('OpenVINO' in optimization_methods):
accelerator = 'openvino'
else:
accelerator = 'jit'
if ('IPEX' in optimization_methods):
ipex = True
if ('Low-memory' in optimization_methods):
low_memory = True
if ((local_model_path is None) or (local_model_path == '')):
model_id = model_version_map[model_version]['model_id']
cache_dir = snapshot_download(model_id, cache_dir='models', ignore_patterns=['*.ckpt', '*.safetensors'], token=use_auth_token)
else:
print(f'Trying to load local model...')
cache_dir = local_model_path
print(f'Loading model from {cache_dir}')
pipe = StableDiffusionPipeline.from_pretrained(cache_dir)
nano_pipe = NanoStableDiffusionPipeline(pipe)
nano_pipe.convert_pipeline(accelerator=accelerator, device=device, precision=precision, ipex=ipex, cache=True, low_memory=low_memory, cache_dir=cache_dir)
except Exception as e:
import traceback
return traceback.format_exc()
return 'Model optimization finished.' |
def create_vocab(labels):
new_labels = []
for l in labels:
new_labels += l
unique = np.unique(new_labels)
label2id = {}
id2label = {}
counter = 0
for word in unique:
label2id[word] = counter
id2label[counter] = word
counter += 1
return (label2id, id2label) |
def test_to_ntuple():
single_number = 2
assert (mmcv.utils.to_1tuple(single_number) == (single_number,))
assert (mmcv.utils.to_2tuple(single_number) == (single_number, single_number))
assert (mmcv.utils.to_3tuple(single_number) == (single_number, single_number, single_number))
assert (mmcv.utils.to_4tuple(single_number) == (single_number, single_number, single_number, single_number))
assert (mmcv.utils.to_ntuple(5)(single_number) == (single_number, single_number, single_number, single_number, single_number))
assert (mmcv.utils.to_ntuple(6)(single_number) == (single_number, single_number, single_number, single_number, single_number, single_number)) |
class MultiDiscrete(gym.Space):
def __init__(self, array_of_param_array):
self.low = np.array([x[0] for x in array_of_param_array])
self.high = np.array([x[1] for x in array_of_param_array])
self.num_discrete_space = self.low.shape[0]
def sample(self):
random_array = prng.np_random.rand(self.num_discrete_space)
return [int(x) for x in np.floor((np.multiply(((self.high - self.low) + 1.0), random_array) + self.low))]
def contains(self, x):
return ((len(x) == self.num_discrete_space) and (np.array(x) >= self.low).all() and (np.array(x) <= self.high).all())
def shape(self):
return self.num_discrete_space
def __repr__(self):
return ('MultiDiscrete' + str(self.num_discrete_space))
def __eq__(self, other):
return (np.array_equal(self.low, other.low) and np.array_equal(self.high, other.high)) |
class FileOutput(LogOutput, metaclass=abc.ABCMeta):
def __init__(self, file_name, mode='w'):
mkdir_p(os.path.dirname(file_name))
self._log_file = open(file_name, mode)
def close(self):
if (self._log_file and (not self._log_file.closed)):
self._log_file.close()
def dump(self, step=None):
self._log_file.flush() |
def load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path, tf_inputs=None, allow_missing_keys=False, output_loading_info=False):
try:
import tensorflow as tf
import torch
except ImportError:
logger.error('Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see and for installation instructions.')
raise
import transformers
from .modeling_tf_utils import load_tf_weights
logger.info(f'Loading TensorFlow weights from {tf_checkpoint_path}')
tf_model_class_name = ('TF' + pt_model.__class__.__name__)
tf_model_class = getattr(transformers, tf_model_class_name)
tf_model = tf_model_class(pt_model.config)
if (tf_inputs is None):
tf_inputs = tf_model.dummy_inputs
if (tf_inputs is not None):
tf_model(tf_inputs, training=False)
load_tf_weights(tf_model, tf_checkpoint_path)
return load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=allow_missing_keys, output_loading_info=output_loading_info) |
def test_film_correctly_forwards_input():
batch_size = 11
in_channels = 13
seq_len = 31
film_embedding_size = 7
film = FiLM(film_embedding_size, in_channels)
x = torch.testing.make_tensor(batch_size, in_channels, seq_len, device='cpu', dtype=torch.float32)
film_embedding = torch.testing.make_tensor(batch_size, film_embedding_size, device='cpu', dtype=torch.float32, requires_grad=True)
y = film(x, film_embedding)
assert (y.shape == (batch_size, in_channels, seq_len))
(dy_dx,) = torch.autograd.grad(y.sum().square(), film_embedding)
assert (dy_dx.abs() > 0.0).all() |
def get_plugin(cuda_file, extra_nvcc_options=[]):
cuda_file_base = os.path.basename(cuda_file)
(cuda_file_name, cuda_file_ext) = os.path.splitext(cuda_file_base)
if (cuda_file in _plugin_cache):
return _plugin_cache[cuda_file]
if verbose:
print(('Setting up TensorFlow plugin "%s": ' % cuda_file_base), end='', flush=True)
try:
md5 = hashlib.md5()
with open(cuda_file, 'rb') as f:
md5.update(f.read())
md5.update(b'\n')
if (not do_not_hash_included_headers):
if verbose:
print('Preprocessing... ', end='', flush=True)
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, ((cuda_file_name + '_tmp') + cuda_file_ext))
_run_cmd(_prepare_nvcc_cli(('"%s" --preprocess -o "%s" --keep --keep-dir "%s"' % (cuda_file, tmp_file, tmp_dir))))
with open(tmp_file, 'rb') as f:
bad_file_str = (('"' + cuda_file.replace('\\', '/')) + '"').encode('utf-8')
good_file_str = (('"' + cuda_file_base) + '"').encode('utf-8')
for ln in f:
if ((not ln.startswith(b'# ')) and (not ln.startswith(b'#line '))):
ln = ln.replace(bad_file_str, good_file_str)
md5.update(ln)
md5.update(b'\n')
compile_opts = ''
if (os.name == 'nt'):
compile_opts += ('"%s"' % os.path.join(tf.sysconfig.get_lib(), 'python', '_pywrap_tensorflow_internal.lib'))
compile_opts += (' --library-path="%s"' % (os.path.dirname(__file__) + '\\..\\lib'))
elif (os.name == 'posix'):
compile_opts += ('"%s"' % os.path.join(tf.sysconfig.get_lib(), 'python', '_pywrap_tensorflow_internal.so'))
compile_opts += " --compiler-options '-fPIC -D_GLIBCXX_USE_CXX11_ABI=0'"
else:
assert False
compile_opts += (' --gpu-architecture=%s' % _get_cuda_gpu_arch_string())
compile_opts += ' --use_fast_math'
for opt in extra_nvcc_options:
compile_opts += (' ' + opt)
nvcc_cmd = _prepare_nvcc_cli(compile_opts)
md5.update((('nvcc_cmd: ' + nvcc_cmd).encode('utf-8') + b'\n'))
md5.update((('tf.VERSION: ' + tf.VERSION).encode('utf-8') + b'\n'))
md5.update((('cuda_cache_version_tag: ' + cuda_cache_version_tag).encode('utf-8') + b'\n'))
bin_file_ext = ('.dll' if (os.name == 'nt') else '.so')
cuda_cache_path = make_cache_dir_path()
bin_file = os.path.join(make_cache_dir_path(), (((cuda_file_name + '_') + md5.hexdigest()) + bin_file_ext))
if (not os.path.isfile(bin_file)):
if verbose:
print('Compiling... ', end='', flush=True)
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, ((cuda_file_name + '_tmp') + bin_file_ext))
_run_cmd((nvcc_cmd + (' "%s" --shared -o "%s" --keep --keep-dir "%s"' % (cuda_file, tmp_file, tmp_dir))))
os.makedirs(cuda_cache_path, exist_ok=True)
intermediate_file = os.path.join(cuda_cache_path, ((((cuda_file_name + '_') + uuid.uuid4().hex) + '_tmp') + bin_file_ext))
shutil.copyfile(tmp_file, intermediate_file)
os.rename(intermediate_file, bin_file)
if verbose:
print('Loading... ', end='', flush=True)
plugin = tf.load_op_library(bin_file)
_plugin_cache[cuda_file] = plugin
if verbose:
print('Done.', flush=True)
return plugin
except:
if verbose:
print('Failed!', flush=True)
raise |
def distributed():
num_gpus = (int(os.environ['WORLD_SIZE']) if ('WORLD_SIZE' in os.environ) else 1)
distributed = (num_gpus > 1)
return distributed |
class ImageSetToSample(ImagePreprocessing):
def __init__(self, input_keys=['imageTensor'], target_keys=['label'], sample_key='sample', bigdl_type='float'):
super(ImageSetToSample, self).__init__(bigdl_type, input_keys, target_keys, sample_key) |
def main(_):
if FLAGS.tune:
from neural_compressor.quantization import fit
from neural_compressor.config import PostTrainingQuantConfig
from neural_compressor import set_random_seed
set_random_seed(9527)
op_name_dict = {'average_pooling2d': {'activation': {'dtype': ['fp32']}}, 'max_pooling2d_2': {'activation': {'dtype': ['fp32']}}, 'max_pooling2d_3': {'activation': {'dtype': ['fp32']}}}
config = PostTrainingQuantConfig(backend='itex', op_name_dict=op_name_dict, calibration_sampling_size=[20, 150])
q_model = fit(model=FLAGS.input_model, conf=config, calib_dataloader=calib_dataloader, eval_func=evaluate)
q_model.save(FLAGS.output_model)
if FLAGS.benchmark:
from neural_compressor.benchmark import fit
from neural_compressor.config import BenchmarkConfig
if (FLAGS.mode == 'performance'):
conf = BenchmarkConfig(backend='itex', cores_per_instance=4, num_of_instance=7)
fit(FLAGS.input_model, conf, b_func=evaluate)
else:
from neural_compressor.model.model import Model
accuracy = evaluate(Model(FLAGS.input_model, backend='itex').model)
logger.info(('Batch size = %d' % FLAGS.batch_size))
logger.info(('Accuracy: %.5f' % accuracy)) |
class _SyncBatchNorm(_BatchNorm):
def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True):
super(_SyncBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine)
self._sync_master = SyncMaster(self._data_parallel_master)
self._parallel_id = None
self._slave_pipe = None
def forward(self, input):
if (not self.training):
return batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, self.training, self.momentum, self.eps)
input_shape = input.size()
input = input.view(input_shape[0], self.num_features, (- 1))
N = (input.size(0) * input.size(2))
(xsum, xsqsum) = sum_square(input)
if (self._parallel_id == 0):
(mean, inv_std) = self._sync_master.run_master(_ChildMessage(xsum, xsqsum, N))
else:
(mean, inv_std) = self._slave_pipe.run_slave(_ChildMessage(xsum, xsqsum, N))
return batchnormtrain(input, mean, (1.0 / inv_std), self.weight, self.bias).view(input_shape)
def extra_repr(self):
return '{}, eps={}, momentum={}, sync={}'.format(self.num_features, self.eps, self.momentum, True)
def __data_parallel_replicate__(self, ctx, copy_id):
self._parallel_id = copy_id
if (self._parallel_id == 0):
ctx.sync_master = self._sync_master
else:
self._slave_pipe = ctx.sync_master.register_slave(copy_id)
def _data_parallel_master(self, intermediates):
intermediates = sorted(intermediates, key=(lambda i: i[1].sum.get_device()))
to_reduce = [i[1][:2] for i in intermediates]
to_reduce = [j for i in to_reduce for j in i]
target_gpus = [i[1].sum.get_device() for i in intermediates]
sum_size = sum([i[1].sum_size for i in intermediates])
(sum_, ssum) = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)
(mean, inv_std) = self._compute_mean_std(sum_, ssum, sum_size)
broadcasted = Broadcast.apply(target_gpus, mean, inv_std)
outputs = []
for (i, rec) in enumerate(intermediates):
outputs.append((rec[0], _MasterMessage(*broadcasted[(i * 2):((i * 2) + 2)])))
return outputs
def _compute_mean_std(self, sum_, ssum, size):
assert (size > 1), 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'
mean = (sum_ / size)
sumvar = (ssum - (sum_ * mean))
unbias_var = (sumvar / (size - 1))
bias_var = (sumvar / size)
self.running_mean = (((1 - self.momentum) * self.running_mean) + (self.momentum * mean.data))
self.running_var = (((1 - self.momentum) * self.running_var) + (self.momentum * unbias_var.data))
return (mean, ((bias_var + self.eps) ** (- 0.5))) |
_module()
class SEResNeXt(SEResNet):
arch_settings = {50: (SEBottleneck, (3, 4, 6, 3)), 101: (SEBottleneck, (3, 4, 23, 3)), 152: (SEBottleneck, (3, 8, 36, 3))}
def __init__(self, depth, groups=32, width_per_group=4, **kwargs):
self.groups = groups
self.width_per_group = width_per_group
super().__init__(depth, **kwargs)
def make_res_layer(self, **kwargs):
return ResLayer(groups=self.groups, width_per_group=self.width_per_group, base_channels=self.base_channels, **kwargs) |
def tsv_writer(values, tsv_file_name):
ensure_directory(os.path.dirname(tsv_file_name))
tsv_file_name_tmp = (tsv_file_name + '.tmp')
with open(tsv_file_name_tmp, 'w') as fp:
assert (values is not None)
for value in values:
assert value
v = '{0}\n'.format('\t'.join(map(str, value)))
fp.write(v)
os.rename(tsv_file_name_tmp, tsv_file_name) |
class get_features(nn.Module):
def __init__(self):
super(get_features, self).__init__()
self.resnet18 = models.resnet18(pretrained=True)
self.resnet18_removed = list(self.resnet18.children())[:(- 1)]
self.resnet18 = nn.Sequential(*self.resnet18_removed)
def forward(self, inputs):
features = self.resnet18(inputs)
return features |
class dcganDataset(Dataset):
def __init__(self, root, transform=None, targte_transform=None):
super(dcganDataset, self).__init__()
self.image_dir = os.path.join(opt.data_dir, root)
self.samples = []
self.img_label = []
self.img_flag = []
self.transform = transform
self.targte_transform = targte_transform
self.train_val = root
if (root == 'train_new'):
for folder in os.listdir(self.image_dir):
fdir = ((self.image_dir + '/') + folder)
if (folder == 'gen_0000'):
(samples, img_labels, flags) = generated_images
self.samples = (self.samples + samples)
self.img_label = (self.img_label + img_labels)
self.img_flag = (self.img_flag + flags)
else:
for files in os.listdir(fdir):
temp = ((folder + '_') + files)
lbl = int(folder)
label_vec = np.zeros(shape=n_classes)
label_vec[lbl] = 1
self.img_label.append(label_vec)
self.img_flag.append(0)
self.samples.append(temp)
else:
for folder in os.listdir(self.image_dir):
fdir = ((self.image_dir + '/') + folder)
for files in os.listdir(fdir):
temp = ((folder + '_') + files)
lbl = int(folder)
label_vec = np.zeros(shape=n_classes)
label_vec[lbl] = 1
self.img_label.append(label_vec)
self.img_flag.append(0)
self.samples.append(temp)
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
temp = self.samples[idx]
if (self.img_flag[idx] == 1):
foldername = 'gen_0000'
filename = temp[9:]
else:
foldername = temp[:4]
filename = temp[5:]
img = default_loader(((((self.image_dir + '/') + foldername) + '/') + filename))
if (self.train_val == 'train_new'):
result = {'img': data_transforms['train'](img), 'label': self.img_label[idx], 'flag': self.img_flag[idx]}
else:
result = {'img': data_transforms['val'](img), 'label': self.img_label[idx], 'flag': self.img_flag[idx]}
return result |
class InceptionV3(nn.Module):
def __init__(self, inception_blocks=None, num_classes=1000, in_chans=3, drop_rate=0.0, global_pool='avg'):
super(InceptionV3, self).__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
if (inception_blocks is None):
inception_blocks = [BasicConv2d, InceptionA, InceptionB, InceptionC, InceptionD, InceptionE]
assert (len(inception_blocks) >= 6)
conv_block = inception_blocks[0]
inception_a = inception_blocks[1]
inception_b = inception_blocks[2]
inception_c = inception_blocks[3]
inception_d = inception_blocks[4]
inception_e = inception_blocks[5]
self.Conv2d_1a_3x3 = conv_block(in_chans, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = conv_block(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = conv_block(32, 64, kernel_size=3, padding=1)
self.Conv2d_3b_1x1 = conv_block(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = conv_block(80, 192, kernel_size=3)
self.Mixed_5b = inception_a(192, pool_features=32)
self.Mixed_5c = inception_a(256, pool_features=64)
self.Mixed_5d = inception_a(288, pool_features=64)
self.Mixed_6a = inception_b(288)
self.Mixed_6b = inception_c(768, channels_7x7=128)
self.Mixed_6c = inception_c(768, channels_7x7=160)
self.Mixed_6d = inception_c(768, channels_7x7=160)
self.Mixed_6e = inception_c(768, channels_7x7=192)
self.Mixed_7a = inception_d(768)
self.Mixed_7b = inception_e(1280)
self.Mixed_7c = inception_e(2048)
self.num_features = 2048
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.fc = nn.Linear(2048, num_classes)
for m in self.modules():
if (isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear)):
stddev = (m.stddev if hasattr(m, 'stddev') else 0.1)
trunc_normal_(m.weight, std=stddev)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward_features(self, x):
x = self.Conv2d_1a_3x3(x)
x = self.Conv2d_2a_3x3(x)
x = self.Conv2d_2b_3x3(x)
x = F.max_pool2d(x, kernel_size=3, stride=2)
x = self.Conv2d_3b_1x1(x)
x = self.Conv2d_4a_3x3(x)
x = F.max_pool2d(x, kernel_size=3, stride=2)
x = self.Mixed_5b(x)
x = self.Mixed_5c(x)
x = self.Mixed_5d(x)
x = self.Mixed_6a(x)
x = self.Mixed_6b(x)
x = self.Mixed_6c(x)
x = self.Mixed_6d(x)
x = self.Mixed_6e(x)
x = self.Mixed_7a(x)
x = self.Mixed_7b(x)
x = self.Mixed_7c(x)
return x
def get_classifier(self):
return self.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.num_classes = num_classes
if (self.num_classes > 0):
self.fc = nn.Linear((self.num_features * self.global_pool.feat_mult()), num_classes)
else:
self.fc = nn.Identity()
def forward(self, x):
x = self.forward_features(x)
x = self.global_pool(x).flatten(1)
if (self.drop_rate > 0):
x = F.dropout(x, p=self.drop_rate, training=self.training)
x = self.fc(x)
return x |
def ssim(img1, img2, window_size=11, window=None, size_average=True, full=False, val_range=None):
if (val_range is None):
if (torch.max(img1) > 128):
max_val = 255
else:
max_val = 1
if (torch.min(img1) < (- 0.5)):
min_val = (- 1)
else:
min_val = 0
L = (max_val - min_val)
else:
L = val_range
padd = 0
(_, channel, height, width) = img1.size()
if (window is None):
real_size = min(window_size, height, width)
window = create_window(real_size, channel=channel).to(img1.device)
mu1 = F.conv2d(F.pad(img1, (5, 5, 5, 5), mode='replicate'), window, padding=padd, groups=channel)
mu2 = F.conv2d(F.pad(img2, (5, 5, 5, 5), mode='replicate'), window, padding=padd, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = (mu1 * mu2)
sigma1_sq = (F.conv2d(F.pad((img1 * img1), (5, 5, 5, 5), 'replicate'), window, padding=padd, groups=channel) - mu1_sq)
sigma2_sq = (F.conv2d(F.pad((img2 * img2), (5, 5, 5, 5), 'replicate'), window, padding=padd, groups=channel) - mu2_sq)
sigma12 = (F.conv2d(F.pad((img1 * img2), (5, 5, 5, 5), 'replicate'), window, padding=padd, groups=channel) - mu1_mu2)
C1 = ((0.01 * L) ** 2)
C2 = ((0.03 * L) ** 2)
v1 = ((2.0 * sigma12) + C2)
v2 = ((sigma1_sq + sigma2_sq) + C2)
cs = torch.mean((v1 / v2))
ssim_map = ((((2 * mu1_mu2) + C1) * v1) / (((mu1_sq + mu2_sq) + C1) * v2))
if size_average:
ret = ssim_map.mean()
else:
ret = ssim_map.mean(1).mean(1).mean(1)
if full:
return (ret, cs)
return ret |
class ModelArguments():
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Path to directory to store the pretrained models downloaded from huggingface.co'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `huggingface-cli login` (necessary to use this script with private models).'})
do_calib: bool = field(default=False, metadata={'help': 'Whether to run calibration of quantization ranges.'})
num_calib_batch: int = field(default=4, metadata={'help': 'Number of batches for calibration. 0 will disable calibration '})
save_onnx: bool = field(default=False, metadata={'help': 'Whether to save model to onnx.'}) |
def main():
x_nodes = 784
z_dim = 36
autoencoder = AE(x_nodes, z_dim)
history = autoencoder.fit(X_train, X_train, epochs=10, batch_size=256, shuffle=True, validation_data=(X_test, X_test))
plot_acc(history, '(a) ')
plt.show()
plot_loss(history, '(b) ')
plt.show()
show_ae(autoencoder)
plt.show() |
def weights_init(m):
cname = m.__class__
if ((cname == nn.Linear) or (cname == nn.Conv2d) or (cname == nn.ConvTranspose2d)):
m.weight.data.normal_(0.0, 0.02)
elif (cname == nn.BatchNorm2d):
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
else:
print(('%s is not initialized.' % cname)) |
def get_model_parameters_number(model):
params_num = sum((p.numel() for p in model.parameters() if p.requires_grad))
return params_num |
class HypothesisHandler(ScorerHandler):
def put(self):
instance_id = int(self.get_argument('instance_id'))
list_of_tokens = self.request.body.decode('utf-8').strip().split()
self.scorer.recv_hyp(instance_id, list_of_tokens) |
class AsymBiChaFuse(nn.Module):
def __init__(self, channels=64, r=4):
super(AsymBiChaFuse, self).__init__()
self.channels = channels
self.bottleneck_channels = int((channels // r))
self.topdown = nn.Sequential(nn.AdaptiveAvgPool2d(1), nn.Conv2d(in_channels=self.channels, out_channels=self.bottleneck_channels, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(self.bottleneck_channels, momentum=0.9), nn.ReLU(inplace=True), nn.Conv2d(in_channels=self.bottleneck_channels, out_channels=self.channels, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(self.channels, momentum=0.9), nn.Sigmoid())
self.bottomup = nn.Sequential(nn.Conv2d(in_channels=self.channels, out_channels=self.bottleneck_channels, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(self.bottleneck_channels, momentum=0.9), nn.ReLU(inplace=True), nn.Conv2d(in_channels=self.bottleneck_channels, out_channels=self.channels, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(self.channels, momentum=0.9), nn.Sigmoid())
self.post = nn.Sequential(nn.Conv2d(in_channels=channels, out_channels=channels, kernel_size=3, stride=1, padding=1, dilation=1), nn.BatchNorm2d(channels, momentum=0.9), nn.ReLU(inplace=True))
def forward(self, xh, xl):
topdown_wei = self.topdown(xh)
bottomup_wei = self.bottomup(xl)
xs = ((2 * torch.mul(xl, topdown_wei)) + (2 * torch.mul(xh, bottomup_wei)))
xs = self.post(xs)
return xs |
def show_models():
model_names = models.__all__
numbers = list(range(1, (len(model_names) + 1)))
print(tabulate({'No.': numbers, 'Model Names': model_names}, headers='keys')) |
class LlamaCache():
def __init__(self, capacity_bytes: int=(2 << 30)):
self.cache_state: OrderedDict[(Tuple[(int, ...)], 'LlamaState')] = OrderedDict()
self.capacity_bytes = capacity_bytes
def cache_size(self):
return sum([state.llama_state_size for state in self.cache_state.values()])
def _find_longest_prefix_key(self, key: Tuple[(int, ...)]) -> Optional[Tuple[(int, ...)]]:
min_len = 0
min_key = None
keys = ((k, Llama.longest_token_prefix(k, key)) for k in self.cache_state.keys())
for (k, prefix_len) in keys:
if (prefix_len > min_len):
min_len = prefix_len
min_key = k
return min_key
def __getitem__(self, key: Sequence[int]) -> 'LlamaState':
key = tuple(key)
_key = self._find_longest_prefix_key(key)
invalidInputError((_key is not None), 'Key not found.')
value = self.cache_state[_key]
self.cache_state.move_to_end(_key)
return value
def __contains__(self, key: Sequence[int]) -> bool:
return (self._find_longest_prefix_key(tuple(key)) is not None)
def __setitem__(self, key: Sequence[int], value: 'LlamaState'):
key = tuple(key)
if (key in self.cache_state):
del self.cache_state[key]
self.cache_state[key] = value
while (self.cache_size > self.capacity_bytes):
self.cache_state.popitem(last=False) |
class conv_2nV1(nn.Module):
def __init__(self, in_hc=64, in_lc=256, out_c=64, main=0):
super(conv_2nV1, self).__init__()
self.main = main
mid_c = min(in_hc, in_lc)
self.relu = nn.ReLU(True)
self.h2l_pool = nn.AvgPool2d((2, 2), stride=2)
self.l2h_up = nn.Upsample(scale_factor=2, mode='nearest')
self.h2h_0 = nn.Conv2d(in_hc, mid_c, 3, 1, 1)
self.l2l_0 = nn.Conv2d(in_lc, mid_c, 3, 1, 1)
self.bnh_0 = nn.BatchNorm2d(mid_c)
self.bnl_0 = nn.BatchNorm2d(mid_c)
self.h2h_1 = nn.Conv2d(mid_c, mid_c, 3, 1, 1)
self.h2l_1 = nn.Conv2d(mid_c, mid_c, 3, 1, 1)
self.l2h_1 = nn.Conv2d(mid_c, mid_c, 3, 1, 1)
self.l2l_1 = nn.Conv2d(mid_c, mid_c, 3, 1, 1)
self.bnl_1 = nn.BatchNorm2d(mid_c)
self.bnh_1 = nn.BatchNorm2d(mid_c)
if (self.main == 0):
self.h2h_2 = nn.Conv2d(mid_c, mid_c, 3, 1, 1)
self.l2h_2 = nn.Conv2d(mid_c, mid_c, 3, 1, 1)
self.bnh_2 = nn.BatchNorm2d(mid_c)
self.h2h_3 = nn.Conv2d(mid_c, out_c, 3, 1, 1)
self.bnh_3 = nn.BatchNorm2d(out_c)
self.identity = nn.Conv2d(in_hc, out_c, 1)
elif (self.main == 1):
self.h2l_2 = nn.Conv2d(mid_c, mid_c, 3, 1, 1)
self.l2l_2 = nn.Conv2d(mid_c, mid_c, 3, 1, 1)
self.bnl_2 = nn.BatchNorm2d(mid_c)
self.l2l_3 = nn.Conv2d(mid_c, out_c, 3, 1, 1)
self.bnl_3 = nn.BatchNorm2d(out_c)
self.identity = nn.Conv2d(in_lc, out_c, 1)
else:
raise NotImplementedError
def forward(self, in_h, in_l):
h = self.relu(self.bnh_0(self.h2h_0(in_h)))
l = self.relu(self.bnl_0(self.l2l_0(in_l)))
h2h = self.h2h_1(h)
h2l = self.h2l_1(self.h2l_pool(h))
l2l = self.l2l_1(l)
l2h = self.l2h_1(self.l2h_up(l))
h = self.relu(self.bnh_1((h2h + l2h)))
l = self.relu(self.bnl_1((l2l + h2l)))
if (self.main == 0):
h2h = self.h2h_2(h)
l2h = self.l2h_2(self.l2h_up(l))
h_fuse = self.relu(self.bnh_2((h2h + l2h)))
out = self.relu((self.bnh_3(self.h2h_3(h_fuse)) + self.identity(in_h)))
elif (self.main == 1):
h2l = self.h2l_2(self.h2l_pool(h))
l2l = self.l2l_2(l)
l_fuse = self.relu(self.bnl_2((h2l + l2l)))
out = self.relu((self.bnl_3(self.l2l_3(l_fuse)) + self.identity(in_l)))
else:
raise NotImplementedError
return out |
class Stats():
def __init__(self, constitution=1, strength=1, dexterity=1, intelligence=5, aggression=1.0, armour_class=1, speed=1):
self.constitution = constitution
self.strength = strength
self.dexterity = dexterity
self.armour_class = armour_class
self.speed = speed
self.intelligence = intelligence
self.aggression = aggression
def copy(self):
return copy.deepcopy(self) |
def add_plot_parser(subparsers):
parser_plt = subparsers.add_parser('plot_curve', help='parser for plotting curves')
parser_plt.add_argument('json_logs', type=str, nargs='+', help='path of train log in json format')
parser_plt.add_argument('--keys', type=str, nargs='+', default=['mAP_0.25'], help='the metric that you want to plot')
parser_plt.add_argument('--title', type=str, help='title of figure')
parser_plt.add_argument('--legend', type=str, nargs='+', default=None, help='legend of each plot')
parser_plt.add_argument('--backend', type=str, default=None, help='backend of plt')
parser_plt.add_argument('--style', type=str, default='dark', help='style of plt')
parser_plt.add_argument('--out', type=str, default=None)
parser_plt.add_argument('--mode', type=str, default='train')
parser_plt.add_argument('--interval', type=int, default=1) |
def vgg_19(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.5, spatial_squeeze=True, scope='vgg_19', fc_conv_padding='VALID'):
with tf.variable_scope(scope, 'vgg_19', [inputs]) as sc:
end_points_collection = (sc.name + '_end_points')
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d], outputs_collections=end_points_collection):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 4, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 4, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 4, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training, scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training, scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='fc8')
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[(sc.name + '/fc8')] = net
return (net, end_points) |
class Block(nn.Module):
def __init__(self, inplanes, planes, num_reps, stride=1, dilation=1, norm_layer=None, norm_kwargs=None, start_with_relu=True, grow_first=True, is_last=False):
super(Block, self).__init__()
norm_kwargs = (norm_kwargs if (norm_kwargs is not None) else {})
if ((planes != inplanes) or (stride != 1)):
self.skip = nn.Sequential()
(self.skip.add_module('conv1', nn.Conv2d(inplanes, planes, 1, stride=stride, bias=False)),)
self.skip.add_module('bn1', norm_layer(num_features=planes, **norm_kwargs))
else:
self.skip = None
rep = OrderedDict()
l = 1
filters = inplanes
if grow_first:
if start_with_relu:
rep[('act%d' % l)] = nn.ReLU(inplace=False)
rep[('conv%d' % l)] = SeparableConv2d(inplanes, planes, 3, 1, dilation, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
rep[('bn%d' % l)] = norm_layer(num_features=planes, **norm_kwargs)
filters = planes
l += 1
for _ in range((num_reps - 1)):
if (grow_first or start_with_relu):
rep[('act%d' % l)] = nn.ReLU(inplace=(grow_first or (not start_with_relu)))
rep[('conv%d' % l)] = SeparableConv2d(filters, filters, 3, 1, dilation, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
rep[('bn%d' % l)] = norm_layer(num_features=filters, **norm_kwargs)
l += 1
if (not grow_first):
rep[('act%d' % l)] = nn.ReLU(inplace=True)
rep[('conv%d' % l)] = SeparableConv2d(inplanes, planes, 3, 1, dilation, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
rep[('bn%d' % l)] = norm_layer(num_features=planes, **norm_kwargs)
l += 1
if (stride != 1):
rep[('act%d' % l)] = nn.ReLU(inplace=True)
rep[('conv%d' % l)] = SeparableConv2d(planes, planes, 3, stride, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
rep[('bn%d' % l)] = norm_layer(num_features=planes, **norm_kwargs)
l += 1
elif is_last:
rep[('act%d' % l)] = nn.ReLU(inplace=True)
rep[('conv%d' % l)] = SeparableConv2d(planes, planes, 3, 1, dilation, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
rep[('bn%d' % l)] = norm_layer(num_features=planes, **norm_kwargs)
l += 1
self.rep = nn.Sequential(rep)
def forward(self, x):
skip = x
if (self.skip is not None):
skip = self.skip(skip)
x = (self.rep(x) + skip)
return x |
class Mass(Logged):
default_data = 'Au03'
def __init__(self, data=default_data, silent=False):
self.setup_logger(silent=silent)
path = os.getenv('KEPLER_DATA')
if (not path):
path = os.path.join(os.path.expanduser('~'), 'kepler', 'local_data')
self.logger.warning(('using default path ' + path))
filename = os.path.join(path, 'masses_audi_2003.dat')
self.comment = ()
self.iso = np.array([], dtype=np.object)
self.mass = np.array([], dtype=np.float64)
xre = re.compile('[-+a-zA-Z0-9.]+')
with open(filename, 'r') as f:
self.logger_file_info(f)
for line in f:
if (not line.startswith((';', '#'))):
xdata = xre.findall(line)
xnum = len(xdata)
if (xnum == 0):
continue
if (xnum == 2):
(xion, xabu) = tuple(xdata)
else:
print(line)
raise IOError('bad format')
self._append(isotope.ion(xion), np.double(xabu))
else:
self.comment += (line[2:],)
message = '{:3d} masses loaded in'.format(len(self.iso))
self.close_logger(timing=message)
def _append(self, iso, abu):
self.iso = np.append(self.iso, iso)
self.mass = np.append(self.mass, abu)
def append(self, iso, abu):
self._append(iso, abu)
del self.A
del self.Z
del self.N
del self.DM
del self.BE
def __getitem__(self, ion):
try:
(i,) = np.argwhere((self.iso == ion))
return self.mass[i[0]]
except:
pass
return np.double(isotope.ion(ion).A)
def __str__(self):
return (('mass(' + ', '.join(['{:s}: {:f}'.format(iso.Name(), mass) for (iso, mass) in zip(self.iso, self.mass)])) + ')')
__repr__ = __str__
def A(self):
return isotope.ufunc_A(self.iso)
def Z(self):
return isotope.ufunc_Z(self.iso)
def N(self):
return isotope.ufunc_N(self.iso)
def DM(self):
return (self.mass - self.A)
def BE(self):
mn = 1.
mp = 1.
return ((self.mass - (self.Z * mp)) - (self.N * mn)) |
class Outcome(Enum):
ParseError = 0
CompilationError = 1
TestingError = 2
Success = 3
def to_json(self) -> Any:
return OUTCOME_MAP[self]
def from_json(cls, d: str) -> 'Outcome':
return OUTCOME_REV_MAP[d] |
class MemorizedMaxPooling2D(MaxPooling2D):
def __init__(self, *args, **kwargs):
super(MemorizedMaxPooling2D, self).__init__(*args, **kwargs)
self.idx = None
def _pooling_function(self, inputs, pool_size, strides, padding, data_format):
(output, self.idx) = pool2d_argmax(inputs, pool_size, strides=strides, padding=padding, data_format=data_format)
return output |
class VerticalFlip(object):
def __call__(self, clip):
if isinstance(clip[0], np.ndarray):
return [np.flipud(img) for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
return [img.transpose(PIL.Image.FLIP_TOP_BOTTOM) for img in clip]
else:
raise TypeError(('Expected numpy.ndarray or PIL.Image' + ' but got list of {0}'.format(type(clip[0])))) |
def array_equal_lists(list1, list2):
ia.do_assert(isinstance(list1, list))
ia.do_assert(isinstance(list2, list))
if (len(list1) != len(list2)):
return False
for (a, b) in zip(list1, list2):
if (not np.array_equal(a, b)):
return False
return True |
class TFResNetEncoder(tf.keras.layers.Layer):
def __init__(self, config: ResNetConfig, **kwargs) -> None:
super().__init__(**kwargs)
self.stages = [TFResNetStage(config, config.embedding_size, config.hidden_sizes[0], stride=(2 if config.downsample_in_first_stage else 1), depth=config.depths[0], name='stages.0')]
for (i, (in_channels, out_channels, depth)) in enumerate(zip(config.hidden_sizes, config.hidden_sizes[1:], config.depths[1:])):
self.stages.append(TFResNetStage(config, in_channels, out_channels, depth=depth, name=f'stages.{(i + 1)}'))
def call(self, hidden_state: tf.Tensor, output_hidden_states: bool=False, return_dict: bool=True, training: bool=False) -> TFBaseModelOutputWithNoAttention:
hidden_states = (() if output_hidden_states else None)
for stage_module in self.stages:
if output_hidden_states:
hidden_states = (hidden_states + (hidden_state,))
hidden_state = stage_module(hidden_state, training=training)
if output_hidden_states:
hidden_states = (hidden_states + (hidden_state,))
if (not return_dict):
return tuple((v for v in [hidden_state, hidden_states] if (v is not None)))
return TFBaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=hidden_states) |
def convert_img(params):
(img_filepath, out_path, downsample) = params
img_file = os.path.split(img_filepath)[1]
out_filepath = os.path.join(out_path, img_file)
if (not os.path.exists(out_path)):
os.mkdir(out_path)
assert os.path.exists(out_path), 'Cannot create output folder: {}'.format(out_path)
img = load_image(img_filepath)
img = Image.fromarray(img)
(w, h) = (img.width, img.height)
img = img.resize(((w // downsample), (h // downsample)))
img.save(out_filepath) |
def get_vocab_list(data_root_path, vocab_root_path, text_min_count):
try:
vocab = get_vocab(vocab_root_path, text_min_count)
except FileNotFoundError:
train_all_text = get_content(data_root_path)
vocab = build_vocab(vocab_root_path, train_all_text, text_min_count)
return vocab |
def imagenet_convnext_small_in22ft1k_pretrained(output_dim):
model_args = dict(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768], num_classes=output_dim)
model = load_small_convnext('/scratch/nvg7279/convnext_models/convnext_small_22k_1k_224.pth', **model_args)
return _convnext_replace_fc(model, output_dim) |
def mbv1():
device = torch.device('cpu')
cfg_file = 'tests/configs/mobilenet/mobilenet_v1_x1_0.yaml'
cfg.merge_from_file(cfg_file)
model = build_recognizer(cfg, device)
print(model) |
def astroNNAgesPath(dr=None):
if (dr is None):
dr = _default_dr()
if (int(dr) < 14):
raise ValueError('astroNN ages catalog for DR < 14 not available')
elif (int(dr) > 14):
return astroNNPath(dr=dr)
else:
specReduxPath = apogeeSpectroReduxDirPath(dr=dr)
return os.path.join(specReduxPath, 'r8', 'stars', 'l31c', _redux_dr(dr=dr), 'astroNNBayes_ages_goodDR14.fits') |
def evaluate_regions(folder_predicted: str, folder_gt: str, regions: dict, processes=default_num_threads):
region_names = list(regions.keys())
files_in_pred = subfiles(folder_predicted, suffix='.nii.gz', join=False)
files_in_gt = subfiles(folder_gt, suffix='.nii.gz', join=False)
have_no_gt = [i for i in files_in_pred if (i not in files_in_gt)]
assert (len(have_no_gt) == 0), 'Some files in folder_predicted have not ground truth in folder_gt'
have_no_pred = [i for i in files_in_gt if (i not in files_in_pred)]
if (len(have_no_pred) > 0):
print('WARNING! Some files in folder_gt were not predicted (not present in folder_predicted)!')
files_in_gt.sort()
files_in_pred.sort()
full_filenames_gt = [join(folder_gt, i) for i in files_in_pred]
full_filenames_pred = [join(folder_predicted, i) for i in files_in_pred]
p = Pool(processes)
res = p.starmap(evaluate_case, zip(full_filenames_pred, full_filenames_gt, ([list(regions.values())] * len(files_in_gt))))
p.close()
p.join()
all_results = {r: [] for r in region_names}
with open(join(folder_predicted, 'summary.csv'), 'w') as f:
f.write('casename')
for r in region_names:
f.write((',%s' % r))
f.write('\n')
for i in range(len(files_in_pred)):
f.write(files_in_pred[i][:(- 7)])
result_here = res[i]
for (k, r) in enumerate(region_names):
dc = result_here[k]
f.write((',%02.4f' % dc))
all_results[r].append(dc)
f.write('\n')
f.write('mean')
for r in region_names:
f.write((',%02.4f' % np.nanmean(all_results[r])))
f.write('\n')
f.write('median')
for r in region_names:
f.write((',%02.4f' % np.nanmedian(all_results[r])))
f.write('\n')
f.write('mean (nan is 1)')
for r in region_names:
tmp = np.array(all_results[r])
tmp[np.isnan(tmp)] = 1
f.write((',%02.4f' % np.mean(tmp)))
f.write('\n')
f.write('median (nan is 1)')
for r in region_names:
tmp = np.array(all_results[r])
tmp[np.isnan(tmp)] = 1
f.write((',%02.4f' % np.median(tmp)))
f.write('\n') |
def layer_flops_distribution(config, model):
num_sample = config.arch.num_flops_stats_sample
repo = {}
for _ in range(num_sample):
cur_flops = (config.arch.target_flops * 10)
while ((cur_flops > (config.arch.target_flops * 1.05)) or (cur_flops < (config.arch.target_flops * 0.95))):
model.module.direct_sampling()
cur_flops = calc_model_flops(model, config.dataset.input_size)
for (n, m) in model.named_modules():
if isinstance(m, USModule):
if (n not in repo.keys()):
repo[n] = []
repo[n].append(m.cur_out_ch)
if dist.is_master():
root_dir = os.path.join(config.save_path, 'layer_flops_distribution')
if (not os.path.exists(root_dir)):
os.makedirs(root_dir)
for n in repo.keys():
save_path = os.path.join(root_dir, (n + '.pdf'))
plt.hist(repo[n], 50, density=True, facecolor='g', alpha=0.75)
pp = PdfPages(save_path)
plt.savefig(pp, format='pdf')
pp.close()
plt.gcf().clear() |
def view_samples(images):
if (type(images) == torch.tensor):
images = images.cpu().numpy()
(fig, axes) = plt.subplots(figsize=(30, 30), nrows=8, ncols=8, sharey=True, sharex=True)
for (ax, img) in zip(axes.flatten(), images):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.imshow(img) |
def main():
tf.set_random_seed(10)
with tf.Session() as sess:
rnn_cell = tf.nn.rnn_cell.BasicRNNCell(10)
initial_state = rnn_cell.zero_state(4, dtype=tf.float32)
inputs = tf.Variable(tf.random_uniform(shape=(4, 2, 100)), name='input')
inputs = tf.identity(inputs, 'input_node')
(outputs, state) = tf.nn.dynamic_rnn(rnn_cell, inputs, initial_state=initial_state, dtype=tf.float32)
y1 = tf.identity(outputs, 'outputs')
y2 = tf.identity(state, 'state')
t1 = tf.ones([4, 2, 10])
t2 = tf.ones([4, 10])
loss = (tf.reduce_sum(((y1 - t1) * (y1 - t1))) + tf.reduce_sum(((y2 - t2) * (y2 - t2))))
tf.identity(loss, name='rnn_loss')
grad = tf.identity(tf.gradients(loss, inputs), name='gradOutput')
net_outputs = map((lambda x: tf.get_default_graph().get_tensor_by_name(x)), argv[2].split(','))
run_model(net_outputs, argv[1], None, (argv[3] == 'True')) |
def combine_tricks(trick_1, trick_2):
if all([(not trick_1.tricked), (not trick_2.tricked)]):
return _TrickInfo(False, None, None)
else:
batch_size = max(filter(None, [trick_1.batch_size, trick_2.batch_size]))
group_size = sum(filter(None, [trick_1.group_size, trick_2.group_size]))
return _TrickInfo(True, batch_size, group_size) |
_module
class DoubleHeadRCNN(TwoStageDetector):
def __init__(self, reg_roi_scale_factor, **kwargs):
super().__init__(**kwargs)
self.reg_roi_scale_factor = reg_roi_scale_factor
def forward_dummy(self, img):
outs = ()
x = self.extract_feat(img)
if self.with_rpn:
rpn_outs = self.rpn_head(x)
outs = (outs + (rpn_outs,))
proposals = torch.randn(1000, 4).cuda()
rois = bbox2roi([proposals])
bbox_cls_feats = self.bbox_roi_extractor(x[:self.bbox_roi_extractor.num_inputs], rois)
bbox_reg_feats = self.bbox_roi_extractor(x[:self.bbox_roi_extractor.num_inputs], rois, roi_scale_factor=self.reg_roi_scale_factor)
if self.with_shared_head:
bbox_cls_feats = self.shared_head(bbox_cls_feats)
bbox_reg_feats = self.shared_head(bbox_reg_feats)
(cls_score, bbox_pred) = self.bbox_head(bbox_cls_feats, bbox_reg_feats)
outs += (cls_score, bbox_pred)
return outs
def forward_train(self, img, img_meta, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, proposals=None):
x = self.extract_feat(img)
losses = dict()
if self.with_rpn:
rpn_outs = self.rpn_head(x)
rpn_loss_inputs = (rpn_outs + (gt_bboxes, img_meta, self.train_cfg.rpn))
rpn_losses = self.rpn_head.loss(*rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
losses.update(rpn_losses)
proposal_cfg = self.train_cfg.get('rpn_proposal', self.test_cfg.rpn)
proposal_inputs = (rpn_outs + (img_meta, proposal_cfg))
proposal_list = self.rpn_head.get_bboxes(*proposal_inputs)
else:
proposal_list = proposals
if (self.with_bbox or self.with_mask):
bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner)
bbox_sampler = build_sampler(self.train_cfg.rcnn.sampler, context=self)
num_imgs = img.size(0)
if (gt_bboxes_ignore is None):
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
for i in range(num_imgs):
assign_result = bbox_assigner.assign(proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], gt_labels[i])
sampling_result = bbox_sampler.sample(assign_result, proposal_list[i], gt_bboxes[i], gt_labels[i], feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
if self.with_bbox:
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_cls_feats = self.bbox_roi_extractor(x[:self.bbox_roi_extractor.num_inputs], rois)
bbox_reg_feats = self.bbox_roi_extractor(x[:self.bbox_roi_extractor.num_inputs], rois, roi_scale_factor=self.reg_roi_scale_factor)
if self.with_shared_head:
bbox_cls_feats = self.shared_head(bbox_cls_feats)
bbox_reg_feats = self.shared_head(bbox_reg_feats)
(cls_score, bbox_pred) = self.bbox_head(bbox_cls_feats, bbox_reg_feats)
bbox_targets = self.bbox_head.get_target(sampling_results, gt_bboxes, gt_labels, self.train_cfg.rcnn)
loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, *bbox_targets)
losses.update(loss_bbox)
if self.with_mask:
if (not self.share_roi_extractor):
pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
mask_feats = self.mask_roi_extractor(x[:self.mask_roi_extractor.num_inputs], pos_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
else:
pos_inds = []
device = bbox_cls_feats.device
for res in sampling_results:
pos_inds.append(torch.ones(res.pos_bboxes.shape[0], device=device, dtype=torch.uint8))
pos_inds.append(torch.zeros(res.neg_bboxes.shape[0], device=device, dtype=torch.uint8))
pos_inds = torch.cat(pos_inds)
mask_feats = bbox_cls_feats[pos_inds]
mask_pred = self.mask_head(mask_feats)
mask_targets = self.mask_head.get_target(sampling_results, gt_masks, self.train_cfg.rcnn)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
loss_mask = self.mask_head.loss(mask_pred, mask_targets, pos_labels)
losses.update(loss_mask)
return losses
def simple_test_bboxes(self, x, img_meta, proposals, rcnn_test_cfg, rescale=False):
rois = bbox2roi(proposals)
bbox_cls_feats = self.bbox_roi_extractor(x[:self.bbox_roi_extractor.num_inputs], rois)
bbox_reg_feats = self.bbox_roi_extractor(x[:self.bbox_roi_extractor.num_inputs], rois, roi_scale_factor=self.reg_roi_scale_factor)
if self.with_shared_head:
bbox_cls_feats = self.shared_head(bbox_cls_feats)
bbox_reg_feats = self.shared_head(bbox_reg_feats)
(cls_score, bbox_pred) = self.bbox_head(bbox_cls_feats, bbox_reg_feats)
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
(det_bboxes, det_labels) = self.bbox_head.get_det_bboxes(rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=rescale, cfg=rcnn_test_cfg)
return (det_bboxes, det_labels) |
def shape_mergeable(x, expected_shape):
mergeable = True
if (is_array_like(x) and is_array_like(expected_shape)):
x = np.array(x)
if (len(x.shape) == len(expected_shape)):
for (s, s_ex) in zip(x.shape, expected_shape):
if ((s_ex is not None) and (s != s_ex)):
mergeable = False
break
return mergeable |
def test_interpolation_potential_vcirc_outsidegrid():
rzpot = potential.interpRZPotential(RZPot=potential.MWPotential, rgrid=(0.01, 2.0, 201), logR=False, interpvcirc=True, zsym=False)
rs = [0.005, 2.5]
for r in rs:
vcdiff = numpy.fabs(((rzpot.vcirc(r) - potential.vcirc(potential.MWPotential, r)) / potential.vcirc(potential.MWPotential, r)))
assert (vcdiff < (10.0 ** (- 10.0))), f'RZPot interpolation w/ interpRZPotential fails outside the grid at R = {r:g} by {vcdiff:g}'
return None |
def arg_to_varname(st: str):
st = trim_preceding_hyphens(st)
st = st.replace('-', '_')
return st.split('=')[0] |
def _transpose(training_targets, num_loc_list):
for im_i in range(len(training_targets)):
training_targets[im_i] = torch.split(training_targets[im_i], num_loc_list, dim=0)
targets_level_first = []
for targets_per_level in zip(*training_targets):
targets_level_first.append(torch.cat(targets_per_level, dim=0))
return targets_level_first |
def test_digits_cosine_sample():
model = SumRedundancySelection(100, 'cosine', optimizer='sample', random_state=0)
model.fit(X_digits)
assert_array_equal(model.ranking, digits_cosine_sample_ranking)
assert_array_almost_equal(model.gains, digits_cosine_sample_gains, 4)
assert_array_almost_equal(model.subset, X_digits[model.ranking]) |
def make_types(filename, no_optimization):
extension_types = {}
for other_pyxfile in all_pyxfiles:
module = other_pyxfile[:(- 4)]
with open(other_pyxfile, mode='r', encoding='utf-8') as pyxfile:
code = pyxfile.read().split('\n')
for (i, line) in enumerate(reversed(code)):
if (line == '# __pyxinfo__'):
code = code[(- i):]
break
for (i, line) in enumerate(code):
if (line == '# Extension types implemented by this module:'):
line = code[(i + 1)].lstrip('#')
for extension_type in line.split(','):
if (':' in extension_type):
(extension_type, import_str) = extension_type.split(':')
extension_types[extension_type.strip()] = import_str.strip()
else:
extension_type = extension_type.strip()
extension_types[extension_type] = 'from {} cimport {}'.format(module, extension_type)
if os.path.isfile(filename):
with open(filename, mode='r', encoding='utf-8') as types_file:
existing_extension_types_content = types_file.read()
try:
existing_extension_types = eval(existing_extension_types_content)
if (existing_extension_types == extension_types):
return
except Exception:
print(f'Warning: Could not interpret the content of "{filename}".', file=sys.stderr)
with open(filename, mode='w', encoding='utf-8') as types_file:
types_file.write(str(extension_types)) |
def indentation(logical_line, previous_logical, indent_char, indent_level, previous_indent_level):
c = (0 if logical_line else 3)
tmpl = ('E11%d %s' if logical_line else 'E11%d %s (comment)')
if (indent_level % 4):
(yield (0, (tmpl % ((1 + c), 'indentation is not a multiple of four'))))
indent_expect = previous_logical.endswith(':')
if (indent_expect and (indent_level <= previous_indent_level)):
(yield (0, (tmpl % ((2 + c), 'expected an indented block'))))
elif ((not indent_expect) and (indent_level > previous_indent_level)):
(yield (0, (tmpl % ((3 + c), 'unexpected indentation')))) |
class HybridEmbed(nn.Module):
def __init__(self, backbone, img_size=224, patch_size=1, feature_size=None, in_chans=3, embed_dim=768):
super().__init__()
assert isinstance(backbone, nn.Module)
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.backbone = backbone
if (feature_size is None):
with torch.no_grad():
training = backbone.training
if training:
backbone.eval()
o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1]))
if isinstance(o, (list, tuple)):
o = o[(- 1)]
feature_size = o.shape[(- 2):]
feature_dim = o.shape[1]
backbone.train(training)
else:
feature_size = to_2tuple(feature_size)
if hasattr(self.backbone, 'feature_info'):
feature_dim = self.backbone.feature_info.channels()[(- 1)]
else:
feature_dim = self.backbone.num_features
assert (((feature_size[0] % patch_size[0]) == 0) and ((feature_size[1] % patch_size[1]) == 0))
self.num_patches = (((feature_size[0] // patch_size[0]) * feature_size[1]) // patch_size[1])
self.proj = nn.Conv2d(feature_dim, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
x = self.backbone(x)
if isinstance(x, (list, tuple)):
x = x[(- 1)]
x = self.proj(x).flatten(2).transpose(1, 2)
return x |
class ContextGating(nn.Module):
def __init__(self, dimension, add_batch_norm=True):
super(ContextGating, self).__init__()
self.fc = nn.Linear(dimension, dimension)
self.add_batch_norm = add_batch_norm
self.batch_norm = nn.BatchNorm1d(dimension)
def forward(self, x):
x1 = self.fc(x)
if self.add_batch_norm:
x1 = self.batch_norm(x1)
x = torch.cat((x, x1), 1)
return F.glu(x, 1) |
_registry(operator_type='Max')
class Max(Operator):
def __init__(self):
super().__init__() |
def load_cifar10():
((X_train, y_train), (X_test, y_test)) = cifar10.load_data()
(X_train, X_test) = (preprocess(X_train), preprocess(X_test))
(y_train, y_test) = (to_categorical(y_train), to_categorical(y_test))
(_, img_rows, img_cols, channel) = X_train.shape
if (K.image_data_format() == 'channels_first'):
X_train = X_train.reshape(X_train.shape[0], channel, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], channel, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, channel)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, channel)
return ((X_train, y_train), (X_test, y_test)) |
class GraphPropPredDataset(object):
def __init__(self, name, root='dataset', meta_dict=None):
self.name = name
if (meta_dict is None):
self.dir_name = '_'.join(name.split('-'))
self.original_root = root
self.root = osp.join(root, self.dir_name)
master = pd.read_csv(os.path.join(os.path.dirname(__file__), 'master.csv'), index_col=0)
if (not (self.name in master)):
error_mssg = 'Invalid dataset name {}.\n'.format(self.name)
error_mssg += 'Available datasets are as follows:\n'
error_mssg += '\n'.join(master.keys())
raise ValueError(error_mssg)
self.meta_info = master[self.name]
else:
self.dir_name = meta_dict['dir_path']
self.original_root = ''
self.root = meta_dict['dir_path']
self.meta_info = meta_dict
if (osp.isdir(self.root) and (not osp.exists(osp.join(self.root, (('RELEASE_v' + str(self.meta_info['version'])) + '.txt'))))):
print((self.name + ' has been updated.'))
if (input('Will you update the dataset now? (y/N)\n').lower() == 'y'):
shutil.rmtree(self.root)
self.download_name = self.meta_info['download_name']
self.num_tasks = int(self.meta_info['num tasks'])
self.eval_metric = self.meta_info['eval metric']
self.task_type = self.meta_info['task type']
self.num_classes = self.meta_info['num classes']
self.binary = (self.meta_info['binary'] == 'True')
super(GraphPropPredDataset, self).__init__()
self.pre_process()
def pre_process(self):
processed_dir = osp.join(self.root, 'processed')
raw_dir = osp.join(self.root, 'raw')
pre_processed_file_path = osp.join(processed_dir, 'data_processed')
if os.path.exists(pre_processed_file_path):
loaded_dict = torch.load(pre_processed_file_path, 'rb')
(self.graphs, self.labels) = (loaded_dict['graphs'], loaded_dict['labels'])
else:
if self.binary:
has_necessary_file = osp.exists(osp.join(self.root, 'raw', 'data.npz'))
else:
has_necessary_file = osp.exists(osp.join(self.root, 'raw', 'edge.csv.gz'))
if (not has_necessary_file):
url = self.meta_info['url']
if decide_download(url):
path = download_url(url, self.original_root)
extract_zip(path, self.original_root)
os.unlink(path)
try:
shutil.rmtree(self.root)
except:
pass
shutil.move(osp.join(self.original_root, self.download_name), self.root)
else:
print('Stop download.')
exit((- 1))
add_inverse_edge = (self.meta_info['add_inverse_edge'] == 'True')
if (self.meta_info['additional node files'] == 'None'):
additional_node_files = []
else:
additional_node_files = self.meta_info['additional node files'].split(',')
if (self.meta_info['additional edge files'] == 'None'):
additional_edge_files = []
else:
additional_edge_files = self.meta_info['additional edge files'].split(',')
if self.binary:
self.graphs = read_binary_graph_raw(raw_dir, add_inverse_edge=add_inverse_edge)
else:
self.graphs = read_csv_graph_raw(raw_dir, add_inverse_edge=add_inverse_edge, additional_node_files=additional_node_files, additional_edge_files=additional_edge_files)
if (self.task_type == 'subtoken prediction'):
labels_joined = pd.read_csv(osp.join(raw_dir, 'graph-label.csv.gz'), compression='gzip', header=None).values
self.labels = [str(labels_joined[i][0]).split(' ') for i in range(len(labels_joined))]
elif self.binary:
self.labels = np.load(osp.join(raw_dir, 'graph-label.npz'))['graph_label']
else:
self.labels = pd.read_csv(osp.join(raw_dir, 'graph-label.csv.gz'), compression='gzip', header=None).values
print('Saving...')
torch.save({'graphs': self.graphs, 'labels': self.labels}, pre_processed_file_path, pickle_protocol=4)
def get_idx_split(self, split_type=None):
if (split_type is None):
split_type = self.meta_info['split']
path = osp.join(self.root, 'split', split_type)
if os.path.isfile(os.path.join(path, 'split_dict.pt')):
return torch.load(os.path.join(path, 'split_dict.pt'))
train_idx = pd.read_csv(osp.join(path, 'train.csv.gz'), compression='gzip', header=None).values.T[0]
valid_idx = pd.read_csv(osp.join(path, 'valid.csv.gz'), compression='gzip', header=None).values.T[0]
test_idx = pd.read_csv(osp.join(path, 'test.csv.gz'), compression='gzip', header=None).values.T[0]
return {'train': train_idx, 'valid': valid_idx, 'test': test_idx}
def __getitem__(self, idx):
if isinstance(idx, (int, np.integer)):
return (self.graphs[idx], self.labels[idx])
raise IndexError('Only integer is valid index (got {}).'.format(type(idx).__name__))
def __len__(self):
return len(self.graphs)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, len(self)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.