code stringlengths 101 5.91M |
|---|
def create_video_files_from_folder(folder: str, output_folder: str, output_filename: str='train.csv'):
if (not _HAS_PD):
raise ImportError('pandas is required to use this function.')
folder = Path(folder)
output_file = (Path(output_folder) / output_filename)
classes = sorted((f.name for f in folder.iterdir() if f.is_dir()))
class_to_idx = {classes[i]: i for i in range(len(classes))}
data = [[f'{class_folder.name}/{video_file.name}', class_to_idx[class_folder.name]] for class_folder in sorted(folder.iterdir()) if class_folder.is_dir() for video_file in sorted(class_folder.iterdir()) if video_file.is_file()]
df = pandas.DataFrame(data, columns=['video', 'class'])
df.to_csv(output_file, sep=' ', header=None, index=None) |
def move_to_device(obj, device):
if (not has_tensor(obj)):
return obj
elif isinstance(obj, torch.Tensor):
return obj.to(device)
elif isinstance(obj, dict):
return {key: move_to_device(value, device) for (key, value) in obj.items()}
elif isinstance(obj, list):
return [move_to_device(item, device) for item in obj]
elif isinstance(obj, tuple):
return tuple([move_to_device(item, device) for item in obj])
else:
return obj |
def gen_feats():
(x, y, z) = (240, 240, 155)
feats = np.stack(np.meshgrid(np.arange(x), np.arange(y), np.arange(z), indexing='ij'), (- 1)).astype('float32')
shape = np.array([x, y, z])
feats -= (shape / 2.0)
feats /= shape
return feats |
def get_feat(rssm_state: RSSMState):
return torch.cat((rssm_state.stoch, rssm_state.deter), dim=(- 1)) |
class SwishJit(nn.Module):
def __init__(self, inplace: bool=False):
super(SwishJit, self).__init__()
def forward(self, x):
return swish_jit(x) |
def demo():
with open(config_file, 'r') as f:
config = yaml.load(f)
data_set_test = prepare_test_data_set(**config['data'], **config['model'], verbose=True, test_mode=True)
myModel = build_model(config, data_set_test)
myModel.load_state_dict(torch.load(model_file)['state_dict'])
print('VQA Demo')
print('Say next to go to next image')
print('Say stop to stop demo')
im_file = get_image()
while True:
print('What question would you like to ask?')
question_str = input()
if (question_str.lower() == 'next'):
im_file = get_image()
continue
if (question_str.lower() == 'stop'):
print('Bye')
break
data_set_test.datasets[0].imdb = get_imdb(im_file, question_str)
data_reader_test = DataLoader(data_set_test, shuffle=False, batch_size=1)
ans_dic = data_set_test.answer_dict
(question_ids, soft_max_result) = run_model(myModel, data_reader_test, ans_dic.UNK_idx)
print_result(question_ids, soft_max_result, ans_dic) |
def train(optims, max_epoch, policy, bsize, env, num_clicks, recom_number, max_length, origin_reward, capacity):
outputdir = 'model_output'
policy_new = os.path.join(outputdir, 'model_free_simple.pickle')
(optim_fn, optim_params) = get_optimizer(optims)
optimizer = optim_fn(filter((lambda p: p.requires_grad), policy.parameters()), **optim_params)
n_epochs = max_epoch
max_reward = 0
epoch = 1
best_model = None
rewards = [origin_reward]
while (epoch <= n_epochs):
_ = train_gen_pg_each(policy, env, epoch, optimizer, num_clicks, recom_number, max_length, bsize, total_size=capacity)
print('saving policy at epoch {0}'.format(epoch))
if (not os.path.exists(outputdir)):
os.makedirs(outputdir)
torch.save(policy, policy_new)
(_, mean_reward) = Eval(policy_new)
rewards.append(mean_reward)
if (mean_reward >= max_reward):
best_model = policy
max_reward = mean_reward
epoch += 1
return (best_model, rewards, max_reward) |
class BaseEnvironment(ABC):
def __init__(self):
pass
def step(self, action: int):
pass
def reset(self):
pass
def render(self):
pass
def seed(self, seed):
pass
def close(self):
pass |
def get_f1_over_list(prediction, groundtruth):
if (type(groundtruth) == list):
if (len(groundtruth) == 0):
return 0
return np.max([qa_f1_score(prediction, gt) for gt in groundtruth])
return qa_f1_score(prediction, groundtruth) |
def createData():
loadData()
delex_data = {}
fin1 = open('data/multi-woz/data.json', 'r')
data = json.load(fin1)
fin2 = open('data/multi-woz/dialogue_acts.json', 'r')
data2 = json.load(fin2)
for (didx, dialogue_name) in enumerate(data):
dialogue = data[dialogue_name]
domains = []
for (dom_k, dom_v) in dialogue['goal'].items():
if (dom_v and (dom_k not in IGNORE_KEYS_IN_GOAL)):
domains.append(dom_k)
idx_acts = 1
(last_domain, last_slot_fill) = ('', [])
for (idx, turn) in enumerate(dialogue['log']):
origin_text = normalize(turn['text'], False)
dialogue['log'][idx]['text'] = origin_text
if ((idx % 2) == 1):
cur_domain = getDomain(idx, dialogue['log'], domains, last_domain)
last_domain = [cur_domain]
dialogue['log'][(idx - 1)]['domain'] = cur_domain
dialogue['log'][idx]['dialogue_acts'] = getDialogueAct(dialogue_name, dialogue, data2, idx, idx_acts)
idx_acts += 1
dialogue = fixDelex(dialogue_name, dialogue, data2, idx, idx_acts)
delex_data[dialogue_name] = dialogue
return delex_data |
def create_logger(distributed_rank=0, save_dir=None):
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
filename = ('log_%s.txt' % datetime.now().strftime('%Y_%m_%d_%H_%M_%S'))
if (distributed_rank > 0):
return logger
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s [%(asctime)s]')
ch.setFormatter(formatter)
logger.addHandler(ch)
if (save_dir is not None):
fh = logging.FileHandler(os.path.join(save_dir, filename))
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger |
def prepare_inputs(example, tokenizer, doc_stride=2048, max_length=4096, assertion=False):
example = get_strided_contexts_and_ans(example, tokenizer, doc_stride=doc_stride, max_length=max_length, assertion=assertion)
return example |
class PascalVOCDataset(torch.utils.data.Dataset):
CLASSES = ('__background__ ', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')
def __init__(self, data_dir, split, use_difficult=False, transforms=None):
self.root = data_dir
self.image_set = split
self.keep_difficult = use_difficult
self.transforms = transforms
self._annopath = os.path.join(self.root, 'Annotations', '%s.xml')
self._imgpath = os.path.join(self.root, 'JPEGImages', '%s.jpg')
self._imgsetpath = os.path.join(self.root, 'ImageSets', 'Main', '%s.txt')
with open((self._imgsetpath % self.image_set)) as f:
self.ids = f.readlines()
self.ids = [x.strip('\n') for x in self.ids]
self.id_to_img_map = {k: v for (k, v) in enumerate(self.ids)}
cls = PascalVOCDataset.CLASSES
self.class_to_ind = dict(zip(cls, range(len(cls))))
self.categories = dict(zip(range(len(cls)), cls))
def __getitem__(self, index):
img_id = self.ids[index]
img = Image.open((self._imgpath % img_id)).convert('RGB')
target = self.get_groundtruth(index)
target = target.clip_to_image(remove_empty=True)
if (self.transforms is not None):
(img, target) = self.transforms(img, target)
return (img, target, index)
def __len__(self):
return len(self.ids)
def get_groundtruth(self, index):
img_id = self.ids[index]
anno = ET.parse((self._annopath % img_id)).getroot()
anno = self._preprocess_annotation(anno)
(height, width) = anno['im_info']
target = BoxList(anno['boxes'], (width, height), mode='xyxy')
target.add_field('labels', anno['labels'])
target.add_field('difficult', anno['difficult'])
return target
def _preprocess_annotation(self, target):
boxes = []
gt_classes = []
difficult_boxes = []
TO_REMOVE = 1
for obj in target.iter('object'):
difficult = (int(obj.find('difficult').text) == 1)
if ((not self.keep_difficult) and difficult):
continue
name = obj.find('name').text.lower().strip()
bb = obj.find('bndbox')
box = [bb.find('xmin').text, bb.find('ymin').text, bb.find('xmax').text, bb.find('ymax').text]
bndbox = tuple(map((lambda x: (x - TO_REMOVE)), list(map(int, box))))
boxes.append(bndbox)
gt_classes.append(self.class_to_ind[name])
difficult_boxes.append(difficult)
size = target.find('size')
im_info = tuple(map(int, (size.find('height').text, size.find('width').text)))
res = {'boxes': torch.tensor(boxes, dtype=torch.float32), 'labels': torch.tensor(gt_classes), 'difficult': torch.tensor(difficult_boxes), 'im_info': im_info}
return res
def get_img_info(self, index):
img_id = self.ids[index]
anno = ET.parse((self._annopath % img_id)).getroot()
size = anno.find('size')
im_info = tuple(map(int, (size.find('height').text, size.find('width').text)))
return {'height': im_info[0], 'width': im_info[1]}
def map_class_id_to_class_name(self, class_id):
return PascalVOCDataset.CLASSES[class_id] |
class BaseLoader(ImageCollection):
def __init__(self, split, path, regex, load_func=None, lmdb_env=None):
if (not (lmdb_env == None)):
key_db = osp.basename(path)
with lmdb_env.begin() as txn:
_files_vec = txn.get(key_db.encode()).decode().split('|')
_files = [bytes(osp.join(path, f).encode()) for f in _files_vec]
super(BaseLoader, self).__init__(_files, load_func=load_func)
else:
super(BaseLoader, self).__init__(osp.join(((path + '/') + regex)), load_func=load_func)
self.name = osp.basename(path)
self.split = split
if (split == phase.TRAIN.value):
if (not (self.name in cfg.SEQUENCES_TRAIN)):
raise Exception("Sequence name '{}' not found.".format(self.name))
elif (split == phase.VAL.value):
if (not (self.name in cfg.SEQUENCES_VAL)):
raise Exception("Sequence name '{}' not found.".format(self.name))
elif (split == phase.TRAINVAL.value):
if (not (self.name in cfg.SEQUENCES_TRAINVAL)):
raise Exception("Sequence name '{}' not found.".format(self.name))
elif (not (self.name in cfg.SEQUENCES_TEST)):
raise Exception("Sequence name '{}' not found.".format(self.name))
def __str__(self):
return "< class: '{}' name: '{}', frames: {} >".format(type(self).__name__, self.name, len(self)) |
def meaningless_words():
stopwords_list = []
for word in stopwords.words('english'):
tokens = nltk.word_tokenize(word)
stopwords_list += tokens
stopwords_list = (list(set(stopwords_list)) + stopwords.words('english'))
return stopwords_list |
def report_memory(name):
mega_bytes = (1024.0 * 1024.0)
string = (name + ' memory (MB)')
string += ' | allocated: {:.1f}'.format((torch.cuda.memory_allocated() / mega_bytes))
string += ' | max allocated: {:.1f}'.format((torch.cuda.max_memory_allocated() / mega_bytes))
string += ' | reserved: {:.1f}'.format((torch.cuda.memory_reserved() / mega_bytes))
string += ' | max reserved: {:.1f}'.format((torch.cuda.max_memory_reserved() / mega_bytes))
if (torch.distributed.get_rank() == 0):
print('[Rank {}] {}'.format(torch.distributed.get_rank(), string), flush=True) |
def cal_fdp_power(selected, non_zero_index, r_index=False):
if (selected.size == 0):
return (0.0, 0.0)
if r_index:
selected = (selected - 1)
true_positive = [i for i in selected if (i in non_zero_index)]
false_positive = [i for i in selected if (i not in non_zero_index)]
fdp = (len(false_positive) / max(1, len(selected)))
power = (len(true_positive) / len(non_zero_index))
return (fdp, power) |
def _gen_efficientnet_condconv(variant, channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=1, pretrained=False, **kwargs):
arch_def = [['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25_cc4'], ['ir_r4_k5_s2_e6_c192_se0.25_cc4'], ['ir_r1_k3_s1_e6_c320_se0.25_cc4']]
round_chs_fn = partial(round_channels, multiplier=channel_multiplier)
model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier, experts_multiplier=experts_multiplier), num_features=round_chs_fn(1280), stem_size=32, round_chs_fn=round_chs_fn, norm_layer=(kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs))), act_layer=resolve_act_layer(kwargs, 'swish'), **kwargs)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model |
def inference_all(model, path):
print('Start inference')
imagenet_dataset = datasets.ImageFolder(path, transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]))
dataloader = DataLoader(imagenet_dataset, batch_size=256, shuffle=False, num_workers=4)
num_correct = 0
num_total = 0
with torch.no_grad():
for (ii, sample) in enumerate(dataloader):
(image, label) = (sample[0].cuda(), sample[1].numpy())
logits = model(image)
pred = torch.max(logits, 1)[1].cpu().numpy()
num_correct += np.sum((pred == label))
num_total += image.shape[0]
print(num_correct, num_total, (num_correct / num_total))
acc = (num_correct / num_total)
return acc |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('input_folder', help='path to Kaldi folder. ')
parser.add_argument('output_folder', help='folder where to write the files')
return parser.parse_args() |
_model
def regnetx_008(pretrained=False, **kwargs):
return _regnet('regnetx_008', pretrained, **kwargs) |
def make_chem_data(logic_id):
path = Path(__file__).parent
basepath = (path / 'chem_data')
outpath = (path / 'chem_data')
try:
(X_train, X_test, Y_train, Y_test) = prepare_chem_dataset('{}/logic_{}_train.csv'.format(basepath, logic_id), '{}/logic_{}_test.csv'.format(basepath, logic_id), 'logic_{}'.format(logic_id))
np.save((outpath / f'logic_{logic_id}_X_train.npy'), X_train)
np.save((outpath / f'logic_{logic_id}_X_test.npy'), X_test)
np.save((outpath / f'logic_{logic_id}_Y_train.npy'), Y_train)
np.save((outpath / f'logic_{logic_id}_Y_test.npy'), Y_test)
except FileNotFoundError:
print('Data not found, please download at and place in datasets/chem_data')
raise |
class GeneratorEBEN(nn.Module):
def __init__(self, m: int, n: int, p: int=1):
super().__init__()
self.p = p
self.pqmf = PseudoQMFBanks(decimation=m, kernel_size=n)
self.multiple = (((2 * 4) * 8) * m)
self.nl = nn.LeakyReLU(negative_slope=0.01)
self.first_conv = nn.Conv1d(in_channels=1, out_channels=32, kernel_size=3, padding='same', bias=False, padding_mode='reflect')
self.encoder_blocks = nn.ModuleList([EncBlock(out_channels=64, stride=2, nl=self.nl), EncBlock(out_channels=128, stride=4, nl=self.nl), EncBlock(out_channels=256, stride=8, nl=self.nl)])
self.latent_conv = nn.Sequential(self.nl, normalized_conv1d(in_channels=256, out_channels=64, kernel_size=7, padding='same', bias=False, padding_mode='reflect'), self.nl, normalized_conv1d(in_channels=64, out_channels=256, kernel_size=7, padding='same', bias=False, padding_mode='reflect'), self.nl)
self.decoder_blocks = nn.ModuleList([DecBlock(out_channels=128, stride=8, nl=self.nl), DecBlock(out_channels=64, stride=4, nl=self.nl), DecBlock(out_channels=32, stride=2, nl=self.nl)])
self.last_conv = nn.Conv1d(in_channels=32, out_channels=4, kernel_size=3, padding='same', bias=False, padding_mode='reflect')
def forward(self, cut_audio):
first_bands = self.pqmf(cut_audio, 'analysis', bands=self.p)
x = self.first_conv(first_bands)
x1 = self.encoder_blocks[0](self.nl(x))
x2 = self.encoder_blocks[1](self.nl(x1))
x3 = self.encoder_blocks[2](self.nl(x2))
x = self.latent_conv(x3)
x = self.decoder_blocks[0](x, x3)
x = self.decoder_blocks[1](x, x2)
x = self.decoder_blocks[2](x, x1)
x = self.last_conv(x)
(b, c, t) = first_bands.shape
fill_up_tensor = torch.zeros((b, (self.pqmf.decimation - self.p), t), requires_grad=False).type_as(first_bands)
cat_tensor = torch.cat(tensors=(first_bands, fill_up_tensor), dim=1)
enhanced_speech_decomposed = torch.tanh((x + cat_tensor))
enhanced_speech = torch.sum(self.pqmf(enhanced_speech_decomposed, 'synthesis'), 1, keepdim=True)
return (enhanced_speech, enhanced_speech_decomposed)
def cut_tensor(self, tensor):
old_len = tensor.shape[2]
new_len = (old_len - ((old_len + self.pqmf.kernel_size) % self.multiple))
tensor = torch.narrow(tensor, 2, 0, new_len)
return tensor |
def ilp_file_verify(options_parser, options, master_logger):
if (options.ilp_file is not None):
if (not os.path.exists(options.ilp_file)):
raise Exception((('ILP file ' + options.ilp_file) + ' not found')) |
def test_2_lines_together():
marker_pattern = '\\s*(?P<mark>\\[\\s*(?P<marknum>\\d+)\\s*\\])'
refs = [u'[1] hello', u'hello2 [2] foo']
rebuilt_refs = rebuild_reference_lines(refs, marker_pattern)
assert (rebuilt_refs == [u'[1] hello hello2', u'[2] foo']) |
def get_loss(pred, label, end_points, reg_weight=0.001):
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf.summary.scalar('classify loss', classify_loss)
transform = end_points['transform']
K = transform.get_shape()[1].value
mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0, 2, 1]))
mat_diff -= tf.constant(np.eye(K), dtype=tf.float32)
mat_diff_loss = tf.nn.l2_loss(mat_diff)
tf.summary.scalar('mat loss', mat_diff_loss)
return (classify_loss + (mat_diff_loss * reg_weight)) |
.register('SegmentLoss')
class SegmentLossProp(mx.operator.CustomOpProp):
def __init__(self, has_grad_scale=0, onehot_label=0, grad_scale=1):
super(SegmentLossProp, self).__init__(need_top_grad=False)
self.has_grad_scale = (int(has_grad_scale) > 0)
self.onehot_label = (int(onehot_label) > 0)
self.grad_scale = float(grad_scale)
def list_arguments(self):
if self.has_grad_scale:
return ['data', 'label', 'scale']
else:
return ['data', 'label']
def infer_shape(self, in_shape):
return (in_shape, [in_shape[0]], [])
def create_operator(self, ctx, shapes, dtypes):
return SegmentLoss(self.has_grad_scale, self.onehot_label, self.grad_scale) |
def get_activations(images, sess, batch_size=16, verbose=False):
inception_layer = _get_inception_layer(sess)
d0 = len(images)
if (batch_size > d0):
print('warning: batch size is bigger than the data size. setting batch size to data size')
batch_size = d0
n_batches = (d0 // batch_size)
n_used_imgs = (n_batches * batch_size)
pred_arr = np.empty((n_used_imgs, 2048))
for i in tqdm(range(n_batches)):
if verbose:
print(('\rPropagating batch %d/%d' % ((i + 1), n_batches)), end='', flush=True)
start = (i * batch_size)
end = (start + batch_size)
batch = images[start:end]
pred = sess.run(inception_layer, {'FID_Inception_Net/ExpandDims:0': batch})
pred_arr[start:end] = pred.reshape(batch_size, (- 1))
if verbose:
print(' done')
return pred_arr |
def run_and_plot(cond_ind_test, fig_ax, aspect=20):
pcmci = PCMCI(dataframe=dataframe, cond_ind_test=cond_ind_test)
results = pcmci.run_pcmci(tau_max=2, pc_alpha=0.2, alpha_level=0.01)
tp.plot_graph(fig_ax=fig_ax, val_matrix=results['val_matrix'], graph=results['graph'], var_names=var_names, node_aspect=aspect, node_size=0.02)
plt.show() |
def enable_wrap(auto_wrap_policy: Optional[Callable]=None, **wrapper_kwargs: Any) -> Generator[(None, None, None)]:
with ConfigAutoWrap(auto_wrap_policy, **wrapper_kwargs):
(yield) |
class SqueezeBertForMultipleChoice():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def encode_schema(schema: Dict[(str, SchemaField)]) -> str:
copy_schema = schema.copy()
for (k, v) in copy_schema.items():
copy_schema[k] = v.to_dict()
return json.dumps(copy_schema, cls=EnumEncoder) |
def replace_control(beam_lst, lst_src, int_order, map_j):
map_j_rev = {v[0]: k for (k, v) in map_j.items()}
total_captured = 0
result = []
for num in range(len(lst_src)):
fields = get_e2e_poswrds(lst_src[num].split())
temp_dict = defaultdict(list)
for ((k, idx), wrd) in fields.items():
temp_dict[k].append((idx, wrd))
fields = [wrd for ((k, idx), wrd) in fields.items()]
result.append((fields, temp_dict))
result_2 = []
x_idx = 0
score_lst = []
precision_lst = []
recall_lst = []
coverage_lst = []
for ii in range(len(beam_lst)):
try:
(x, x_dict) = result[x_idx]
y = beam_lst[ii]
except:
print('x_idx is out of range for x:', x_idx, ii)
try:
(y, _, states) = y.split('|||')
except:
continue
if True:
states = ast.literal_eval(states)
y = y.split()
filled_y = ' '.join(y)
result_2.append(filled_y)
assert (len(states) == len(y))
states_span = get_span(states)
sent_score = []
labelseq = [c for (a, b, c) in states_span]
match_ = 0
num_coverage = 0
denom_coverage = 0
denom_precise = 0
denom_recall = 0
print(np.unique([map_j_rev[yy] for yy in x_dict.keys()]), np.unique(labelseq))
for (key1, val1) in x_dict.items():
local_total = 0
local_good = 0
temp_val = map_j_rev[key1]
if (temp_val in labelseq):
num_coverage += 1
denom_coverage += 1
find_idx = labelseq.index(temp_val)
temp1 = states_span[find_idx]
(aa, bb, cc) = temp1
ref_word = [n for (m, n) in val1]
pred = y[aa:bb]
for word in ref_word:
if (word in pred):
local_good += 1
match_ += 1
denom_precise += (bb - aa)
denom_recall += len(ref_word)
local_total += len(val1)
elif (temp_val == 7):
print('caught 7')
continue
else:
denom_coverage += 1
denom_recall += len(val1)
local_good += 0
local_total += len(val1)
span_score = (local_good / local_total)
sent_score.append(span_score)
sent_score = np.array(sent_score).mean()
score_lst.append(sent_score)
precision = (match_ / denom_precise)
recall = (match_ / denom_recall)
coverage = (num_coverage / denom_coverage)
precision_lst.append(precision)
recall_lst.append(recall)
coverage_lst.append(coverage)
x_idx += 1
print('all the captured things is ', total_captured)
full_score = np.array(score_lst).mean()
print('score is ', full_score)
print('summary:')
precision_lst = np.array(precision_lst).mean()
recall_lst = np.array(recall_lst).mean()
coverage_lst = np.array(coverage_lst).mean()
print('precision is {} \t recall is {} \t coverage is {}'.format(precision_lst, recall_lst, coverage))
return result_2 |
def doc_start(implicit=False):
if implicit:
return {'emit': '', 'handle': 'OnDocumentStart(_)'}
else:
return {'emit': 'BeginDoc', 'handle': 'OnDocumentStart(_)'} |
class CogsDataset(OneShotDataset):
def __init__(self, **kwargs):
return super().__init__(self.load_split('train'), self.load_split('dev'), self.load_split('test'), **kwargs)
def load_split(self, split):
data = []
with open(os.path.join(FLAGS.cogs_dir, (split + '.tsv'))) as reader:
for line in reader:
(inp, out, _) = line.strip().split('\t')
out = out.replace(' _ ', '_')
data.append((tuple(inp.split()), tuple(out.split())))
return data
def score(self, pred, ref_out, ref_inp):
return (1 if (pred == ref_out) else 0) |
def find(function, iterable):
for x in iterable:
if (function(x) is True):
return x |
def rotate(v1, v2, v):
size_batch = tf.shape(v1)[0]
hidden_size = tf.shape(v1)[1]
U = rotation_components(v1, v2)
h = tf.reshape(v, [size_batch, hidden_size, 1])
return (v + tf.reshape((((- tf.matmul(U[0], tf.matmul(tf.transpose(U[0], [0, 2, 1]), h))) - tf.matmul(U[1], tf.matmul(tf.transpose(U[1], [0, 2, 1]), h))) + tf.matmul(tf.transpose(U[2], [0, 2, 1]), tf.matmul(U[3], tf.matmul(U[2], h)))), [size_batch, hidden_size])) |
class OS(TaskHandler):
def match(self, task_name) -> bool:
task_name = task_name.lower()
return (task_name.startswith('os') or task_name.startswith('operating'))
def get_main_metric(self, overall_result):
return overall_result['custom']['overall']['acc']
def get_order_priority(self):
return 1 |
def convert_json(obj):
if is_json_serializable(obj):
return obj
else:
if isinstance(obj, dict):
return {convert_json(k): convert_json(v) for (k, v) in obj.items()}
elif isinstance(obj, tuple):
return (convert_json(x) for x in obj)
elif isinstance(obj, list):
return [convert_json(x) for x in obj]
elif (hasattr(obj, '__name__') and (not ('lambda' in obj.__name__))):
return convert_json(obj.__name__)
elif (hasattr(obj, '__dict__') and obj.__dict__):
obj_dict = {convert_json(k): convert_json(v) for (k, v) in obj.__dict__.items()}
return {str(obj): obj_dict}
return str(obj) |
def request_trial(func, *args, **kwargs):
for i in range(MAX_REQUEST_TRIALS):
try:
response = func(*args, **kwargs)
except:
continue
else:
return response
raise SystemError |
def mobilenetv3_small_wd2(**kwargs):
return get_mobilenetv3(version='small', width_scale=0.5, model_name='mobilenetv3_small_wd2', **kwargs) |
class Swin2SRImageProcessingTester(unittest.TestCase):
def __init__(self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_rescale=True, rescale_factor=(1 / 255), do_pad=True, pad_size=8):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_pad = do_pad
self.pad_size = pad_size
def prepare_image_processor_dict(self):
return {'do_rescale': self.do_rescale, 'rescale_factor': self.rescale_factor, 'do_pad': self.do_pad, 'pad_size': self.pad_size}
def prepare_inputs(self, equal_resolution=False, numpify=False, torchify=False):
assert (not (numpify and torchify)), 'You cannot specify both numpy and PyTorch tensors at the same time'
if equal_resolution:
image_inputs = []
for i in range(self.batch_size):
image_inputs.append(np.random.randint(255, size=(self.num_channels, self.max_resolution, self.max_resolution), dtype=np.uint8))
else:
image_inputs = []
for i in range(self.batch_size):
(width, height) = np.random.choice(np.arange(self.min_resolution, self.max_resolution), 2)
image_inputs.append(np.random.randint(255, size=(self.num_channels, width, height), dtype=np.uint8))
if ((not numpify) and (not torchify)):
image_inputs = [Image.fromarray(np.moveaxis(x, 0, (- 1))) for x in image_inputs]
if torchify:
image_inputs = [torch.from_numpy(x) for x in image_inputs]
return image_inputs |
class OptimizationArguments():
tune: bool = field(default=False, metadata={'help': 'Whether or not to apply quantization.'})
quantization_approach: Optional[str] = field(default='PostTrainingStatic', metadata={'help': 'Quantization approach. Supported approach are PostTrainingStatic, PostTrainingDynamic and QuantizationAwareTraining.'}) |
def PROFILE_NonZeroTile(M=3, K=3, N=3, nbits_a=1, nbits_x=1):
A = torch.ones((M, K)).cuda()
X = torch.ones((K, N)).cuda()
bit_a = QGTC.val2bit(A, nbits_a, False, False)
bit_x = QGTC.val2bit(X, nbits_x, True, False)
QGTC.bitMM2Bit_profile(bit_a, bit_x, M, K, N, nbits_a, nbits_x, nbits_x) |
def adjust_learning_rate_pyramid(optimizer, max_epoch):
def __adjust_learning_rate_pyramid(epoch):
base_lr = C.get()['lr']
lr = ((base_lr * (0.1 ** (epoch // (max_epoch * 0.5)))) * (0.1 ** (epoch // (max_epoch * 0.75))))
return lr
return torch.optim.lr_scheduler.LambdaLR(optimizer, __adjust_learning_rate_pyramid) |
def _get_config_from_default_config(flag_values: flags.FlagValues, presets_path=None) -> ConfigDict:
base_config = train.default_config.get_default_config()
if (presets_path is not None):
presets = io.load_config_dict('', presets_path)
base_config.update(presets)
config_flags.DEFINE_config_dict('config', base_config, lock_config=False, flag_values=flag_values)
flag_values(sys.argv)
config = flag_values.config
config.model = train.default_config.choose_model_type_in_model_config(config.model)
return config |
class InteractionEnhancement(torch.nn.Module):
def __init__(self, extended=True):
super(InteractionEnhancement, self).__init__()
self.extended = extended
def forward(self, *args):
to_concat = []
to_concat.extend(args)
if self.extended:
a0 = args[0]
for a1 in args[1:]:
to_concat.append((a0 - a1))
to_concat.append((a0 * a1))
m_a = torch.cat(to_concat, dim=(- 1))
return m_a |
def drop_variable_from_dobldobl_polynomials(pols, svar):
from phcpy.phcpy2c3 import py2c_syscon_dobldobl_drop_variable_by_name
from phcpy.phcpy2c3 import py2c_syscon_remove_symbol_name
from phcpy.interface import store_dobldobl_system, load_dobldobl_system
store_dobldobl_system(pols)
py2c_syscon_dobldobl_drop_variable_by_name(len(svar), svar)
py2c_syscon_remove_symbol_name(len(svar), svar)
return load_dobldobl_system() |
def extract_process(opts, i, jobs_queue, output_queue):
global options
options = opts
createLogger(options.quiet, options.debug, options.log_file)
out = StringIO()
while True:
job = jobs_queue.get()
if job:
(id, revid, title, page, page_num) = job
try:
e = Extractor(*job[:4])
page = None
e.extract(out)
text = out.getvalue()
except:
text = ''
logging.exception('Processing page: %s %s', id, title)
output_queue.put((page_num, text))
out.truncate(0)
out.seek(0)
else:
logging.debug('Quit extractor')
break
out.close() |
def encode_image_array_as_png_str(image):
image_pil = Image.fromarray(np.uint8(image))
output = six.BytesIO()
image_pil.save(output, format='PNG')
png_string = output.getvalue()
output.close()
return png_string |
def smooth_temporal(x, kernel_size=5, pad_prev=0, pad_next=0):
orig_shape = x.shape
kernel = torch.ones(x.shape[1], 1, kernel_size, 1).to(x.device)
kernel.div_(kernel_size)
x = x.permute(1, 0, 2, 3)
x = x.view(1, x.shape[0], x.shape[1], (- 1))
if ((pad_prev > 0) or (pad_next > 0)):
x = F.pad(x, (0, 0, pad_prev, pad_next), 'reflect')
x = F.conv2d(x, kernel, groups=x.shape[1])
x = x.permute(0, 2, 1, 3)
x = x.view(((x.shape[1],) + orig_shape[1:]))
return x |
_model
def resnet32ts(pretrained=False, **kwargs):
return _create_byobnet('resnet32ts', pretrained=pretrained, **kwargs) |
def read_cherrypicker_coref(filename, gold_text):
regex = '(<COREF [^>]*>)|(</COREF> *)|( *[^< ][^< ]* *)'
mentions = {}
clusters = defaultdict((lambda : []))
unmatched_mentions = []
text = [[]]
sentence = 0
word = 0
prev = ['', '']
mapping = {}
word_convert = {'learnt': 'learned', 'learned': 'learnt'}
for line in open(filename):
for (coref_start, coref_end, token) in re.findall(regex, line.strip()):
if (token != ''):
token = token.strip()
allowed = (token == gold_text[sentence][word])
allowed = (allowed or (token in '{}[]()'))
allowed = (allowed or ((token in word_convert) and (word_convert[token] == gold_text[sentence][word])))
allowed = (allowed or ('/'.join(token.split('_')) == gold_text[sentence][word]))
if allowed:
prev = ['', '']
word += 1
text[(- 1)].append(token)
elif (len(prev[0]) == 0):
prev[0] = gold_text[sentence][word]
prev[1] = token
text[(- 1)].append(token)
elif (((prev[1] + token) == prev[0]) or ('/'.join((prev[1] + token).split('_')) == prev[0])):
if (len(text[(- 1)]) == 0):
text[(- 2)][(- 1)] = prev[0]
else:
text[(- 1)][(- 1)] = prev[0]
word += 1
prev = ['', '']
else:
prev[1] += token
if (word == len(gold_text[sentence])):
word = 0
sentence += 1
text.append([])
elif (coref_start != ''):
mention_id = int(coref_start.split('ID="')[1].split('"')[0])
if ('REF=' in coref_start):
cluster = mapping[int(coref_start.split('REF="')[1].split('"')[0])]
else:
cluster = mention_id
mapping[mention_id] = cluster
unmatched_mentions.append((cluster, sentence, word))
elif (coref_end != ''):
(cluster, msentence, start) = unmatched_mentions.pop()
end = word
if (msentence != sentence):
end = len(gold_text[msentence])
elif ((end == start) and (len(prev[0]) > 0)):
end += 1
mentions[(msentence, start, end)] = cluster
clusters[cluster].append((msentence, start, end))
if (len(text[(- 1)]) == 0):
text.pop()
return {'clusters': clusters, 'mentions': mentions, 'text': text} |
def parse_space_from_bayesmark(api_config) -> DesignSpace:
space = DesignSpace()
params = []
for param_name in api_config:
param_conf = api_config[param_name]
param_type = param_conf['type']
param_space = param_conf.get('space', None)
param_range = param_conf.get('range', None)
param_values = param_conf.get('values', None)
bo_param_conf = {'name': param_name}
if (param_type == 'int'):
bo_param_conf['type'] = 'int'
bo_param_conf['lb'] = param_range[0]
bo_param_conf['ub'] = param_range[1]
elif (param_type == 'bool'):
bo_param_conf['type'] = 'bool'
elif (param_type in ('cat', 'ordinal')):
bo_param_conf['type'] = 'cat'
bo_param_conf['categories'] = list(set(param_values))
elif (param_type == 'real'):
if (param_space in ('log', 'logit')):
bo_param_conf['type'] = 'pow'
bo_param_conf['base'] = 10
bo_param_conf['lb'] = param_range[0]
bo_param_conf['ub'] = param_range[1]
else:
bo_param_conf['type'] = 'num'
bo_param_conf['lb'] = param_range[0]
bo_param_conf['ub'] = param_range[1]
else:
assert False, ('type %s not handled in API' % param_type)
params.append(bo_param_conf)
space.parse(params)
return space |
class PassI_Bad_AP(DummyAP):
def run(self, dag):
super().run(dag)
cx_runs = dag.collect_runs(['cx'])
cx_runs_ids = set()
for run in cx_runs:
curr = []
for node in run:
curr.append(node._node_id)
cx_runs_ids.add(tuple(curr))
logging.getLogger(logger).info('cx_runs: %s', cx_runs_ids)
dag.remove_op_node(cx_runs.pop()[0])
logging.getLogger(logger).info('done removing') |
def get_model(name='AdaRNN'):
n_hiddens = [args.hidden_size for i in range(args.num_layers)]
return AdaRNN(use_bottleneck=True, bottleneck_width=64, n_input=args.d_feat, n_hiddens=n_hiddens, n_output=args.class_num, dropout=args.dropout, model_type=name, len_seq=args.len_seq, trans_loss=args.loss_type).cuda() |
def get_training_roidb(imdb):
if cfg.TRAIN.USE_FLIPPED:
print('Appending horizontally-flipped training examples...')
imdb.append_flipped_images()
print('done')
if cfg.TRAIN.USE_ROTATE:
print('Appending rotate training examples...')
imdb.append_rotate_images()
print('done')
print('Preparing training data...')
rdl_roidb.prepare_roidb(imdb)
print('done')
return imdb.roidb |
def trial_greedy_compressed(inputs, output, size_dict, **kwargs):
opt = GreedyCompressed(**kwargs)
ssa_path = opt.get_ssa_path(inputs, output, size_dict)
tree = ContractionTree.from_path(inputs, output, size_dict, ssa_path=ssa_path)
tree.set_surface_order_from_path(ssa_path)
return tree |
class ShardingClient(object):
def __init__(self, dataset_name, batch_size, num_epochs, dataset_size, shuffle=False, task_type=elastic_training_pb2.TRAINING, num_minibatches_per_shard=_DEFAULT_MINI_BATCH_NUM_PER_SHARD, storage_type=''):
self._mc = MasterClient.singleton_instance()
self._batch_size = batch_size
self._num_epochs = num_epochs
self._dataset_size = dataset_size
self._shuffle = shuffle
self._task_type = task_type
self._storage_type = storage_type
self._num_minibatches_per_shard = num_minibatches_per_shard
self._lock = threading.Lock()
self._reported_record_count = {}
self._current_task = None
self._pending_tasks = OrderedDict()
self._dataset_name = dataset_name
self._batch_count = 0
self._max_shard_count = sys.maxsize
self._shard_count = 0
self._report_sharding_params()
self._training_reporter = TFTrainingProcessReporter()
def _report_sharding_params(self):
if (self._num_epochs and self._dataset_size):
self._mc.report_dataset_shard_params(batch_size=self._batch_size, num_epochs=self._num_epochs, dataset_size=self._dataset_size, shuffle=self._shuffle, num_minibatches_per_shard=self._num_minibatches_per_shard, dataset_name=self._dataset_name, task_type=self._task_type, storage_type=self._storage_type)
def get_minibatch_count_per_epoch(self):
return (self._dataset_size // self._batch_size)
def reset_dataset(self):
self._mc.reset_dataset(self._dataset_name)
def get_current_task(self):
return self._current_task
def get_task(self):
self._training_reporter.set_start_time()
if (self._shard_count >= self._max_shard_count):
return None
for _ in range(5):
(success, task) = self._mc.get_task(self._dataset_name)
if success:
break
time.sleep(5)
if ((task.shard.end - task.shard.start) > 0):
with self._lock:
self._pending_tasks[task.task_id] = task
if (len(self._pending_tasks) == 1):
self._current_task = task
self._shard_count += 1
return task
return None
def _report_task(self, task, err_msg=''):
self._mc.report_task_result(self._dataset_name, task.task_id, err_msg)
def report_all_task_error(self, err_msg):
while self._pending_tasks:
(_, task) = self._pending_tasks.popitem()
self._report_task(task, err_msg)
def report_batch_done(self, batch_size=None, err_msg='', task_ids=[]):
reported = False
if (not self._pending_tasks):
return reported
record_count = (batch_size if batch_size else self._batch_size)
self._batch_count += 1
with self._lock:
if (not task_ids):
task_ids = list(self._pending_tasks.keys())
for task_id in task_ids:
if (record_count > 0):
task = self._pending_tasks[task_id]
task_record_count = (task.shard.end - task.shard.start)
self._reported_record_count.setdefault(task_id, 0)
cur_count = self._reported_record_count[task_id]
if ((cur_count + record_count) >= task_record_count):
self._report_task(task, err_msg)
reported = True
self._reported_record_count.pop(task_id)
self._pending_tasks.pop(task_id)
record_count = ((cur_count + record_count) - task_record_count)
self._report_training_local_step()
else:
self._reported_record_count[task_id] += record_count
record_count = 0
if self._pending_tasks:
self._current_task = next(iter(self._pending_tasks.values()))
return reported
def _report_training_local_step(self):
if (not self._training_reporter.called_in_tf_hook):
self._training_reporter.report_resource_with_step(self._batch_count)
def fetch_shard(self):
task = self.get_task()
if task:
return task.shard
return None
def get_shard_checkpoint(self):
shard_checkpoint = self._mc.get_shard_checkpoint(self._dataset_name)
return shard_checkpoint
def restore_shard_from_checkpoint(self, shard_checkpoint):
message = grpc.ShardCheckpoint(shard_checkpoint)
res = self._mc.report_shard_checkpoint(message)
return res.success
def get_total_sample_num(self):
return (self._dataset_size * self._num_epochs) |
class ResNet(nn.Module):
def __init__(self, num_classes, loss, block, layers, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None, last_stride=2, fc_dims=None, dropout_p=None, efdmix_layers=[], efdmix_p=0.5, efdmix_alpha=0.1, **kwargs):
super(ResNet, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.loss = loss
self.feature_dim = (512 * block.expansion)
self.inplanes = 64
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=last_stride, dilate=replace_stride_with_dilation[2])
self.global_avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = self._construct_fc_layer(fc_dims, (512 * block.expansion), dropout_p)
self.classifier = nn.Linear(self.feature_dim, num_classes)
self.efdmix = None
if efdmix_layers:
self.efdmix = EFDMix(p=efdmix_p, alpha=efdmix_alpha, mix='random')
print('Insert EFDMix after the following layers: {}'.format(efdmix_layers))
self.efdmix_layers = efdmix_layers
self._init_params()
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
return nn.Sequential(*layers)
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
if (fc_dims is None):
self.feature_dim = input_dim
return None
assert isinstance(fc_dims, (list, tuple)), 'fc_dims must be either list or tuple, but got {}'.format(type(fc_dims))
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if (dropout_p is not None):
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[(- 1)]
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
def featuremaps(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
if ('layer1' in self.efdmix_layers):
x = self.efdmix(x)
x = self.layer2(x)
if ('layer2' in self.efdmix_layers):
x = self.efdmix(x)
x = self.layer3(x)
if ('layer3' in self.efdmix_layers):
x = self.efdmix(x)
x = self.layer4(x)
if ('layer4' in self.efdmix_layers):
x = self.efdmix(x)
return x
def forward(self, x):
f = self.featuremaps(x)
v = self.global_avgpool(f)
v = v.view(v.size(0), (- 1))
if (self.fc is not None):
v = self.fc(v)
if (not self.training):
return v
y = self.classifier(v)
if (self.loss == 'softmax'):
return y
elif (self.loss == 'triplet'):
return (y, v)
else:
raise KeyError('Unsupported loss: {}'.format(self.loss)) |
class ImageDataset(object):
def __init__(self, dataset, task, root_dir, domain_name, domain_label=(- 1), labels=None, transform=None, target_transform=None, indices=None, test_envs=[], mode='Default'):
self.imgs = ImageFolder((root_dir + domain_name)).imgs
self.domain_num = 0
self.task = task
self.dataset = dataset
imgs = [item[0] for item in self.imgs]
labels = [item[1] for item in self.imgs]
self.labels = np.array(labels)
self.x = imgs
self.transform = transform
self.target_transform = target_transform
if (indices is None):
self.indices = np.arange(len(imgs))
else:
self.indices = indices
if (mode == 'Default'):
self.loader = default_loader
elif (mode == 'RGB'):
self.loader = rgb_loader
elif (mode == 'L'):
self.loader = l_loader
self.dlabels = (np.ones(self.labels.shape) * (domain_label - Nmax(test_envs, domain_label)))
def set_labels(self, tlabels=None, label_type='domain_label'):
assert (len(tlabels) == len(self.x))
if (label_type == 'domain_label'):
self.dlabels = tlabels
elif (label_type == 'class_label'):
self.labels = tlabels
def target_trans(self, y):
if (self.target_transform is not None):
return self.target_transform(y)
else:
return y
def input_trans(self, x):
if (self.transform is not None):
return self.transform(x)
else:
return x
def __getitem__(self, index):
index = self.indices[index]
img = self.input_trans(self.loader(self.x[index]))
ctarget = self.target_trans(self.labels[index])
dtarget = self.target_trans(self.dlabels[index])
return (img, ctarget, dtarget)
def __len__(self):
return len(self.indices) |
def train_data_creator(config, batch_size):
def get_training_set(upscale_factor):
root_dir = download_bsd300()
train_dir = join(root_dir, 'train')
crop_size = calculate_valid_crop_size(256, upscale_factor)
return DatasetFromFolder(train_dir, input_transform=input_transform(crop_size, upscale_factor), target_transform=target_transform(crop_size))
train_set = get_training_set(config.get('upscale_factor', 3))
training_data_loader = DataLoader(dataset=train_set, batch_size=batch_size, num_workers=0, shuffle=True)
return training_data_loader |
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model=288, nhead=8, dim_feedforward=2048, dropout=0.1, activation='relu', self_posembed=None):
super().__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.self_posembed = self_posembed
def with_pos_embed(self, tensor, pos=None):
return (tensor if (pos is None) else (tensor + pos))
def forward_post(self, src, pos=None):
if ((self.self_posembed is not None) and (pos != None)):
pos_embed = self.self_posembed(pos).permute(2, 0, 1)
else:
pos_embed = None
src = src.permute(2, 0, 1)
q = k = v = self.with_pos_embed(src, pos_embed)
src2 = self.self_attn(q, k, value=v, attention_type='')[0]
src = (src + self.dropout1(src2))
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = (src + self.dropout2(src2))
src = self.norm2(src)
src = src.permute(1, 2, 0)
return src
def forward(self, src, pos=None):
return self.forward_post(src, pos) |
def exp_post(t, y, t_star, decay, scale, log_noise, asymptote):
fit = (asymptote + (scale * np.exp(((- decay) * np.array(t_star)))))
return (fit, np.zeros((fit.size, fit.size))) |
def get_linear_data(a=2, b=5, size=None):
x = np.arange(0, 10, (10 / size), dtype=np.float32)
y = ((a * x) + b)
return (x, y) |
class ConcatTable(Container):
def __init__(self, bigdl_type='float'):
super(ConcatTable, self).__init__(None, bigdl_type) |
def find_suffix(seq_a, seq_b):
(pointer_a, pointer_b) = ((len(seq_a) - 1), (len(seq_b) - 1))
while ((pointer_a >= 0) and (pointer_b >= 0)):
a = seq_a[pointer_a]
b = seq_b[pointer_b]
if (a != b):
return [pointer_a, pointer_b]
else:
pointer_a -= 1
pointer_b -= 1
return [pointer_a, pointer_b] |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, help='cfg file path', required=True)
parser.add_argument('--test_dataset', type=str, help='Test dataset type', default='')
parser.add_argument('--checkpoint', type=str, help='Checkpoint to load', default='')
args = parser.parse_args()
print(args, end='\n\n')
cfg = get_cfg_defaults()
if (args.cfg is not None):
cfg_file = args.cfg
cfg = update_cfg(cfg, args.cfg)
cfg.cfg_file = cfg_file
return (cfg, args) |
class GraphColoringViewer(Viewer):
def __init__(self, name: str='GraphColoring') -> None:
self._name = name
self._animation: Optional[animation.Animation] = None
def render(self, state: State, save_path: Optional[str]=None, ax: Optional[plt.Axes]=None) -> None:
num_nodes = state.adj_matrix.shape[0]
self.node_scale = self._calculate_node_scale(num_nodes)
self._color_mapping = self._create_color_mapping(num_nodes)
self._clear_display()
(fig, ax) = self._get_fig_ax(ax)
pos = self._spring_layout(state.adj_matrix, num_nodes)
self._render_nodes(ax, pos, state.colors)
self._render_edges(ax, pos, state.adj_matrix, num_nodes)
ax.set_xlim((- 0.5), 0.5)
ax.set_ylim((- 0.5), 0.5)
ax.set_aspect('equal')
ax.axis('off')
if save_path:
fig.savefig(save_path, bbox_inches='tight', pad_inches=0.2)
self._display_human(fig)
def animate(self, states: Sequence[State], interval: int=500, save_path: Optional[str]=None) -> animation.FuncAnimation:
num_nodes = states[0].adj_matrix.shape[0]
self.node_scale = self._calculate_node_scale(num_nodes)
self._color_mapping = self._create_color_mapping(num_nodes)
(fig, ax) = self._get_fig_ax(ax=None)
plt.title(f'{self._name}')
def make_frame(state_index: int) -> None:
state = states[state_index]
self.render(state, ax=ax)
_animation = animation.FuncAnimation(fig, make_frame, frames=len(states), interval=interval, blit=False)
if save_path:
_animation.save(save_path)
return _animation
def close(self) -> None:
plt.close(self._name)
def _display_human(self, fig: plt.Figure) -> None:
if plt.isinteractive():
fig.canvas.draw()
if jumanji.environments.is_colab():
plt.show(self._name)
else:
fig.canvas.draw_idle()
fig.canvas.flush_events()
def _clear_display(self) -> None:
if jumanji.environments.is_colab():
import IPython.display
IPython.display.clear_output(True)
def _compute_repulsive_forces(self, repulsive_forces: np.ndarray, pos: np.ndarray, k: float, num_nodes: int) -> np.ndarray:
for i in range(num_nodes):
for j in range((i + 1), num_nodes):
delta = (pos[i] - pos[j])
distance = np.linalg.norm(delta)
direction = (delta / (distance + 1e-06))
force = ((k * k) / (distance + 1e-06))
repulsive_forces[i] += (direction * force)
repulsive_forces[j] -= (direction * force)
return repulsive_forces
def _compute_attractive_forces(self, graph: chex.Array, attractive_forces: np.ndarray, pos: np.ndarray, k: float, num_nodes: int) -> np.ndarray:
for i in range(num_nodes):
for j in range(num_nodes):
if graph[(i, j)]:
delta = (pos[i] - pos[j])
distance = np.linalg.norm(delta)
direction = (delta / (distance + 1e-06))
force = ((distance * distance) / k)
attractive_forces[i] -= (direction * force)
attractive_forces[j] += (direction * force)
return attractive_forces
def _spring_layout(self, graph: chex.Array, num_nodes: int, seed: int=42) -> List[Tuple[(float, float)]]:
rng = np.random.default_rng(seed)
pos = ((rng.random((num_nodes, 2)) * 2) - 1)
iterations = 100
k = np.sqrt((5 / num_nodes))
temperature = 2.0
for _ in range(iterations):
repulsive_forces = self._compute_repulsive_forces(np.zeros((num_nodes, 2)), pos, k, num_nodes)
attractive_forces = self._compute_attractive_forces(graph, np.zeros((num_nodes, 2)), pos, k, num_nodes)
pos += ((repulsive_forces + attractive_forces) * temperature)
temperature *= 0.9
pos = np.clip(pos, (- 1), 1)
return [(float(p[0]), float(p[1])) for p in pos]
def _get_fig_ax(self, ax: Optional[plt.Axes]) -> Tuple[(plt.Figure, plt.Axes)]:
if (ax is None):
(fig, ax) = plt.subplots(figsize=(self.node_scale, self.node_scale))
plt.title(f'{self._name}')
else:
fig = ax.figure
ax.clear()
return (fig, ax)
def _render_nodes(self, ax: plt.Axes, pos: List[Tuple[(float, float)]], colors: chex.Array) -> None:
node_radius = ((0.05 * 5) / self.node_scale)
for (i, (x, y)) in enumerate(pos):
ax.add_artist(plt.Circle((x, y), node_radius, color=self._color_mapping[colors[i]], fill=(colors[i] != (- 1))))
ax.text(x, y, str(i), color='white', ha='center', va='center', weight='bold')
def _render_edges(self, ax: plt.Axes, pos: List[Tuple[(float, float)]], adj_matrix: chex.Array, num_nodes: int) -> None:
for i in range(num_nodes):
for j in range((i + 1), num_nodes):
if adj_matrix[(i, j)]:
ax.plot([pos[i][0], pos[j][0]], [pos[i][1], pos[j][1]], color=self._color_mapping[(- 1)], linewidth=0.5)
def _calculate_node_scale(self, num_nodes: int) -> int:
return (5 + int(np.sqrt(num_nodes)))
def _create_color_mapping(self, num_nodes: int) -> List[Tuple[(float, float, float, float)]]:
colormap_indices = np.arange(0, 1, (1 / num_nodes))
colormap = cm.get_cmap('hsv', (num_nodes + 1))
color_mapping = []
for colormap_idx in colormap_indices:
color_mapping.append(colormap(colormap_idx))
color_mapping.append((0.0, 0.0, 0.0, 1.0))
return color_mapping |
def isect_seg_seg_v2_point(v1, v2, v3, v4, bias=NUM_ZERO):
if (v1 > v2):
(v1, v2) = (v2, v1)
if (v3 > v4):
(v3, v4) = (v4, v3)
if ((v1, v2) > (v3, v4)):
(v1, v2, v3, v4) = (v3, v4, v1, v2)
div = (((v2[0] - v1[0]) * (v4[1] - v3[1])) - ((v2[1] - v1[1]) * (v4[0] - v3[0])))
if (div == NUM_ZERO):
return None
vi = (((((v3[0] - v4[0]) * ((v1[0] * v2[1]) - (v1[1] * v2[0]))) - ((v1[0] - v2[0]) * ((v3[0] * v4[1]) - (v3[1] * v4[0])))) / div), ((((v3[1] - v4[1]) * ((v1[0] * v2[1]) - (v1[1] * v2[0]))) - ((v1[1] - v2[1]) * ((v3[0] * v4[1]) - (v3[1] * v4[0])))) / div))
fac = line_point_factor_v2(vi, v1, v2, default=(- NUM_ONE))
if ((fac < (NUM_ZERO - bias)) or (fac > (NUM_ONE + bias))):
return None
fac = line_point_factor_v2(vi, v3, v4, default=(- NUM_ONE))
if ((fac < (NUM_ZERO - bias)) or (fac > (NUM_ONE + bias))):
return None
return vi |
class VOTVideo(Video):
def __init__(self, name, root, video_dir, init_rect, img_names, gt_rect, camera_motion, illum_change, motion_change, size_change, occlusion, load_img=False):
super(VOTVideo, self).__init__(name, root, video_dir, init_rect, img_names, gt_rect, None, load_img)
self.tags = {'all': ([1] * len(gt_rect))}
self.tags['camera_motion'] = camera_motion
self.tags['illum_change'] = illum_change
self.tags['motion_change'] = motion_change
self.tags['size_change'] = size_change
self.tags['occlusion'] = occlusion
all_tag = [v for (k, v) in self.tags.items() if (len(v) > 0)]
self.tags['empty'] = np.all((1 - np.array(all_tag)), axis=1).astype(np.int32).tolist()
self.tag_names = list(self.tags.keys())
if (not load_img):
img_name = self.img_names[0]
img = np.array(Image.open(img_name), np.uint8)
self.width = img.shape[1]
self.height = img.shape[0]
def select_tag(self, tag, start=0, end=0):
if (tag == 'empty'):
return self.tags[tag]
return self.tags[tag][start:end]
def load_tracker(self, path, tracker_names=None, store=True):
if (not tracker_names):
tracker_names = [x.split('/')[(- 1)] for x in glob(path) if os.path.isdir(x)]
if isinstance(tracker_names, str):
tracker_names = [tracker_names]
for name in tracker_names:
traj_files = glob(os.path.join(path, name, 'baseline', self.name, '*0*.txt'))
if (len(traj_files) == 15):
traj_files = traj_files
else:
traj_files = traj_files[0:1]
pred_traj = []
for traj_file in traj_files:
with open(traj_file, 'r') as f:
traj = [list(map(float, x.strip().split(','))) for x in f.readlines()]
pred_traj.append(traj)
if store:
self.pred_trajs[name] = pred_traj
else:
return pred_traj |
def plant_seeds(random_seed=False):
if random_seed:
print('Randomized seed')
manualSeed = random.randint(1, 10000)
print('Random Seed: ', manualSeed)
else:
manualSeed = 1
random.seed(manualSeed)
torch.manual_seed(manualSeed)
np.random.seed(manualSeed) |
def extra_trees_regression(name, criterion='mse', **kwargs):
def _name(msg):
return ('%s.%s_%s' % (name, 'etr', msg))
hp_space = _trees_hp_space(_name, **kwargs)
hp_space['criterion'] = criterion
return scope.sklearn_ExtraTreesRegressor(**hp_space) |
class NoRepeatNGramLogitsProcessor(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def training_batch_2nd_item_task_fbne(fbne_data, batch_index, model, sess, train_data, is_training):
for index in batch_index:
(b_target_item, b_k_shot_user, b_second_order_items, b_third_order_users, b_oracle_item_ebd, b_mask_num_second_order_item, b_mask_num_third_order_user, b_intra_2nd_item, b_intra_3rd_item) = fbne_data.batch_gen_3rd_item_task(train_data, index)
feed_dict = {model.target_item: b_oracle_item_ebd, model.support_user_1st_pos: b_k_shot_user, model.training_phrase_user_task: is_training, model.support_item_2nd_pos: b_second_order_items, model.inter_support_2nd_item_pos: b_intra_2nd_item, model.training_phrase_item_task: is_training}
sess.run([model.loss_2nd_item_pos, model.optimizer_2nd_item_task_pos], feed_dict) |
def unwrap_and_save_reload_schedule(scheduler, num_steps=10):
lrs = []
for step in range(num_steps):
lrs.append(scheduler.get_lr()[0])
scheduler.step()
if (step == (num_steps // 2)):
with tempfile.TemporaryDirectory() as tmpdirname:
file_name = os.path.join(tmpdirname, 'schedule.bin')
torch.save(scheduler.state_dict(), file_name)
state_dict = torch.load(file_name)
scheduler.load_state_dict(state_dict)
return lrs |
_config
def cfg_docker():
cfg = {'task': 'keypoints3d', 'model_base_path': '/mnt/models/', 'store_representation': False, 'store_prediction': True, 'split_to_convert': 'splits.taskonomy_no_midlevel["fullplus"]', 'data_dir': '/mnt/data', 'save_dir': '/mnt/data', 'folders_to_convert': None, 'batch_size': 64, 'n_dataloader_workers': 8} |
class AbstractEnvRunner(ABC):
def __init__(self, *, env, model, nsteps):
self.env = env
self.model = model
self.nenv = nenv = (env.num_envs if hasattr(env, 'num_envs') else 1)
self.batch_ob_shape = (((nenv * nsteps),) + env.observation_space.shape)
self.obs = np.zeros(((nenv,) + env.observation_space.shape), dtype=env.observation_space.dtype.name)
self.obs[:] = env.reset()
self.nsteps = nsteps
self.states = model.initial_state
self.dones = [False for _ in range(nenv)]
def run(self):
raise NotImplementedError |
def load_data(file):
data = pd.read_csv((file + '.csv'), sep='\t')
data.sort_values(by=['SessionId', 'Time'], inplace=True)
data_start = datetime.fromtimestamp(data.Time.min(), timezone.utc)
data_end = datetime.fromtimestamp(data.Time.max(), timezone.utc)
print('Loaded data set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}\n\n'.format(len(data), data.SessionId.nunique(), data.ItemId.nunique(), data_start.date().isoformat(), data_end.date().isoformat()))
return data |
class EventStorage():
def __init__(self, start_iter=0):
self._history = defaultdict(HistoryBuffer)
self._smoothing_hints = {}
self._latest_scalars = {}
self._iter = start_iter
self._current_prefix = ''
self._vis_data = []
self._histograms = []
def put_image(self, img_name, img_tensor):
self._vis_data.append((img_name, img_tensor, self._iter))
def put_scalar(self, name, value, smoothing_hint=True):
name = (self._current_prefix + name)
history = self._history[name]
value = float(value)
history.update(value, self._iter)
self._latest_scalars[name] = value
existing_hint = self._smoothing_hints.get(name)
if (existing_hint is not None):
assert (existing_hint == smoothing_hint), 'Scalar {} was put with a different smoothing_hint!'.format(name)
else:
self._smoothing_hints[name] = smoothing_hint
def put_scalars(self, *, smoothing_hint=True, **kwargs):
for (k, v) in kwargs.items():
self.put_scalar(k, v, smoothing_hint=smoothing_hint)
def put_histogram(self, hist_name, hist_tensor, bins=1000):
(ht_min, ht_max) = (hist_tensor.min().item(), hist_tensor.max().item())
hist_counts = torch.histc(hist_tensor, bins=bins)
hist_edges = torch.linspace(start=ht_min, end=ht_max, steps=(bins + 1), dtype=torch.float32)
hist_params = dict(tag=hist_name, min=ht_min, max=ht_max, num=len(hist_tensor), sum=float(hist_tensor.sum()), sum_squares=float(torch.sum((hist_tensor ** 2))), bucket_limits=hist_edges[1:].tolist(), bucket_counts=hist_counts.tolist(), global_step=self._iter)
self._histograms.append(hist_params)
def history(self, name):
ret = self._history.get(name, None)
if (ret is None):
raise KeyError('No history metric available for {}!'.format(name))
return ret
def histories(self):
return self._history
def latest(self):
return self._latest_scalars
def latest_with_smoothing_hint(self, window_size=20):
result = {}
for (k, v) in self._latest_scalars.items():
result[k] = (self._history[k].median(window_size) if self._smoothing_hints[k] else v)
return result
def smoothing_hints(self):
return self._smoothing_hints
def step(self):
self._iter += 1
self._latest_scalars = {}
def iter(self):
return self._iter
def iteration(self):
return self._iter
def __enter__(self):
_CURRENT_STORAGE_STACK.append(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
assert (_CURRENT_STORAGE_STACK[(- 1)] == self)
_CURRENT_STORAGE_STACK.pop()
def name_scope(self, name):
old_prefix = self._current_prefix
self._current_prefix = (name.rstrip('/') + '/')
(yield)
self._current_prefix = old_prefix
def clear_images(self):
self._vis_data = []
def clear_histograms(self):
self._histograms = [] |
def _get_learningrate_scheduler(optim, decay):
if (decay is None):
return None
if (isinstance(decay, torch.optim.lr_scheduler._LRScheduler) or (decay.__class__.__name__ == 'ReduceLROnPlateau')):
return decay
if (decay[0] == 'step'):
return torch.optim.lr_scheduler.StepLR(optim, step_size=decay[1], gamma=decay[2])
raise NotImplementedError(f'{decay[0]} learning rate scheduler to be implemented for backend pytorch.') |
_action_space_configuration(name='v0')
class HabitatSimV0ActionSpaceConfiguration(ActionSpaceConfiguration):
def get(self):
return {HabitatSimActions.STOP: habitat_sim.ActionSpec('stop'), HabitatSimActions.MOVE_FORWARD: habitat_sim.ActionSpec('move_forward', habitat_sim.ActuationSpec(amount=self.config.FORWARD_STEP_SIZE)), HabitatSimActions.TURN_LEFT: habitat_sim.ActionSpec('turn_left', habitat_sim.ActuationSpec(amount=self.config.TURN_ANGLE)), HabitatSimActions.TURN_RIGHT: habitat_sim.ActionSpec('turn_right', habitat_sim.ActuationSpec(amount=self.config.TURN_ANGLE))} |
class RNNDecoder(nn.Module):
def __init__(self, n_vocab, ans_n_vocab, d_word_vec, d_model, n_layer, rnn, d_k, feat_vocab, d_feat_vec, d_enc_model, n_enc_layer, input_feed, copy, answer, separate, coverage, layer_attn, maxout_pool_size, dropout, device=None, encoder_word_emb=None):
self.name = 'rnn'
super(RNNDecoder, self).__init__()
self.n_layer = n_layer
self.layer_attn = layer_attn
self.separate = separate
self.coverage = coverage
self.copy = copy
self.maxout_pool_size = maxout_pool_size
self.n_vocab_size = n_vocab
input_size = d_word_vec
self.input_feed = input_feed
if input_feed:
input_size += d_enc_model
self.ans_emb_weight = encoder_word_emb
self.answer = answer
tmp_in = (d_word_vec if answer else d_enc_model)
self.decInit = DecInit(d_enc=tmp_in, d_dec=d_model, n_enc_layer=n_enc_layer)
self.feature = (False if (not feat_vocab) else True)
if self.feature:
self.feat_embs = nn.ModuleList([nn.Embedding(n_f_vocab, d_feat_vec, padding_idx=Constants.PAD) for n_f_vocab in feat_vocab])
feat_size = ((len(feat_vocab) * d_feat_vec) if self.feature else 0)
self.d_enc_model = d_enc_model
self.word_emb_type = (ans_n_vocab == n_vocab)
self.word_emb = nn.Embedding(n_vocab, d_word_vec, padding_idx=Constants.PAD)
self.rnn = StackedRNN(n_layer, input_size, d_model, dropout, rnn=rnn)
self.attn = ConcatAttention((d_enc_model + feat_size), d_model, d_k, coverage)
self.readout = nn.Linear(((d_word_vec + d_model) + self.d_enc_model), d_model)
self.maxout = MaxOut(maxout_pool_size)
if copy:
self.copy_switch = nn.Linear((d_enc_model + d_model), 1)
self.hidden_size = d_model
self.dropout = nn.Dropout(dropout)
self.device = device
def from_opt(cls, opt):
return cls(opt['n_vocab'], opt['ans_n_vocab'], opt['d_word_vec'], opt['d_model'], opt['n_layer'], opt['rnn'], opt['d_k'], opt['feat_vocab'], opt['d_feat_vec'], opt['d_enc_model'], opt['n_enc_layer'], opt['input_feed'], opt['copy'], opt['answer'], opt['separate'], opt['coverage'], opt['layer_attn'], opt['maxout_pool_size'], opt['dropout'], opt['device'], opt['encoder_word_emb'])
def attn_init(self, context):
if isinstance(context, list):
context = context[(- 1)]
if isinstance(context, tuple):
context = torch.cat(context, dim=(- 1))
batch_size = context.size(0)
hidden_sizes = (batch_size, self.d_enc_model)
return Variable(context.data.new(*hidden_sizes).zero_(), requires_grad=False)
def forward(self, inputs, max_length=300, rl_type='', generator=None):
(tgt_seq, src_seq, src_indexes) = (inputs['tgt_seq'], inputs['src_seq'], inputs['src_indexes'])
if self.answer:
ans_seq = inputs['ans_seq']
(enc_output, hidden, feat_seqs) = (inputs['enc_output'], inputs['hidden'], inputs['feat_seqs'])
src_pad_mask = Variable(src_seq.data.eq(50256).float(), requires_grad=False, volatile=False)
if self.layer_attn:
n_enc_layer = len(enc_output)
src_pad_mask = src_pad_mask.repeat(1, n_enc_layer)
enc_output = torch.cat(enc_output, dim=1)
feat_inputs = None
if self.feature:
feat_inputs = [feat_emb(feat_seq) for (feat_seq, feat_emb) in zip(feat_seqs, self.feat_embs)]
feat_inputs = torch.cat(feat_inputs, dim=2)
if self.layer_attn:
feat_inputs = feat_inputs.repeat(1, n_enc_layer, 1)
cur_context = self.attn_init(enc_output)
if self.answer:
ans_words = torch.sum(F.embedding(ans_seq, self.ans_emb_weight), dim=1)
hidden = self.decInit(ans_words).unsqueeze(0)
else:
hidden = self.decInit(hidden).unsqueeze(0)
self.attn.apply_mask(src_pad_mask)
if rl_type:
return self.rl_forward(rl_type, generator, tgt_seq, cur_context, hidden, enc_output, feat_inputs, src_indexes)
else:
return self.nll_forward(tgt_seq, cur_context, hidden, enc_output, feat_inputs, src_indexes)
def nll_forward(self, tgt_seq, cur_context, hidden, enc_output, feat_inputs, src_indexes):
(tmp_context, tmp_coverage) = (None, None)
(dec_outputs, coverage_output, copy_output, copy_gate_output) = ([], [], [], [])
dec_input = self.word_emb(tgt_seq)
dec_input = dec_input.transpose(0, 1)
for (seq_idx, dec_input_emb) in enumerate(dec_input.split(1)):
dec_input_emb = dec_input_emb.squeeze(0)
raw_dec_input_emb = dec_input_emb
if self.input_feed:
dec_input_emb = torch.cat((dec_input_emb, cur_context), dim=1)
(dec_output, hidden) = self.rnn(dec_input_emb, hidden)
if self.coverage:
if (tmp_coverage is None):
tmp_coverage = Variable(torch.zeros((enc_output.size(0), enc_output.size(1))))
if self.device:
tmp_coverage = tmp_coverage.to(self.device)
(cur_context, attn, tmp_context, next_coverage) = self.attn(dec_output, enc_output, precompute=tmp_context, coverage=tmp_coverage, feat_inputs=feat_inputs, feature=self.feature)
avg_tmp_coverage = (tmp_coverage / max(1, seq_idx))
coverage_loss = torch.sum(torch.min(attn, avg_tmp_coverage), dim=1)
tmp_coverage = next_coverage
coverage_output.append(coverage_loss)
else:
(cur_context, attn, tmp_context) = self.attn(dec_output, enc_output, precompute=tmp_context, feat_inputs=feat_inputs, feature=self.feature)
if self.copy:
copy_prob = self.copy_switch(torch.cat((dec_output, cur_context), dim=1))
copy_prob = torch.sigmoid(copy_prob)
if self.layer_attn:
attn = attn.view(attn.size(0), len(enc_output), (- 1))
attn = attn.sum(1)
if self.separate:
out = torch.zeros([len(attn), max_length], device=(self.device if self.device else None))
for i in range(len(attn)):
data_length = src_indexes[i]
out[i].narrow(0, 1, (data_length - 1)).copy_(attn[i][1:src_indexes[i]])
attn = out
norm_term = attn.sum(1, keepdim=True)
attn = (attn / norm_term)
copy_output.append(attn)
copy_gate_output.append(copy_prob)
readout = self.readout(torch.cat((raw_dec_input_emb, dec_output, cur_context), dim=1))
maxout = self.maxout(readout)
output = self.dropout(maxout)
dec_outputs.append(output)
dec_output = torch.stack(dec_outputs).transpose(0, 1)
rst = {}
(rst['pred'], rst['attn'], rst['context']) = (dec_output, attn, cur_context)
if self.copy:
copy_output = torch.stack(copy_output).transpose(0, 1)
copy_gate_output = torch.stack(copy_gate_output).transpose(0, 1)
(rst['copy_pred'], rst['copy_gate']) = (copy_output, copy_gate_output)
if self.coverage:
coverage_output = torch.stack(coverage_output).transpose(0, 1)
rst['coverage_pred'] = coverage_output
return rst
def rl_forward(self, rl_type, generator, tgt_seq, cur_context, hidden, enc_output, feat_inputs, src_indexes):
(tmp_context, tmp_coverage, seq_idx) = (None, None, 0)
(dec_outputs, coverage_output, copy_output, copy_gate_output) = ([], [], [], [])
(max_length, input_seq) = (tgt_seq.size((- 1)), tgt_seq.transpose(0, 1)[0])
rand_input_seq = input_seq.clone().detach()
(decoded_text, rand_decoded_text) = ([], [])
init_tokens = torch.zeros(input_seq.size(), device=input_seq.device).long()
rand_tokens = torch.zeros(input_seq.size(), device=input_seq.device).long()
rand_choice_list = ([0, 102] + [idd for idd in range(1001, self.n_vocab_size)])
for i in range(max_length):
decoded_text.append(input_seq.long())
rand_decoded_text.append(rand_input_seq.long())
dec_input_emb = self.word_emb(input_seq.long())
raw_dec_input_emb = dec_input_emb
if self.input_feed:
dec_input_emb = torch.cat((dec_input_emb, cur_context), dim=1)
(dec_output, hidden) = self.rnn(dec_input_emb, hidden)
if self.coverage:
if (tmp_coverage is None):
tmp_coverage = Variable(torch.zeros((enc_output.size(0), enc_output.size(1))))
if self.device:
tmp_coverage = tmp_coverage.to(self.device)
(cur_context, attn, tmp_context, next_coverage) = self.attn(dec_output, enc_output, precompute=tmp_context, coverage=tmp_coverage, feat_inputs=feat_inputs, feature=self.feature)
avg_tmp_coverage = (tmp_coverage / max(1, seq_idx))
coverage_loss = torch.sum(torch.min(attn, avg_tmp_coverage), dim=1)
tmp_coverage = next_coverage
coverage_output.append(coverage_loss)
else:
(cur_context, attn, tmp_context) = self.attn(dec_output, enc_output, precompute=tmp_context, feat_inputs=feat_inputs, feature=self.feature)
if self.copy:
copy_prob = self.copy_switch(torch.cat((dec_output, cur_context), dim=1))
copy_prob = torch.sigmoid(copy_prob)
if self.layer_attn:
attn = attn.view(attn.size(0), len(enc_output), (- 1))
attn = attn.sum(1)
if self.separate:
out = torch.zeros([len(attn), max_length], device=(self.device if self.device else None))
for i in range(len(attn)):
data_length = src_indexes[i]
out[i].narrow(0, 1, (data_length - 1)).copy_(attn[i][1:src_indexes[i]])
attn = out
norm_term = attn.sum(1, keepdim=True)
attn = (attn / norm_term)
copy_output.append(attn)
copy_gate_output.append(copy_prob)
readout = self.readout(torch.cat((raw_dec_input_emb, dec_output, cur_context), dim=1))
maxout = self.maxout(readout)
output = self.dropout(maxout)
dec_outputs.append(output)
paddings = (input_seq.eq(Constants.PAD).float() + input_seq.eq(102).float()).eq(0).float()
rand_paddings = (rand_input_seq.eq(Constants.PAD).float() + rand_input_seq.eq(102).float()).eq(0).float()
token_dict = F.softmax(generator(output), dim=(- 1))
for b in range(input_seq.size(0)):
selected_idx = token_dict[b].multinomial(1, replacement=False).view((- 1)).data[0]
init_tokens[b] = selected_idx.item()
rand_tokens[b] = random.choice(rand_choice_list)
input_seq = torch.where((paddings > 0), init_tokens, paddings.long())
rand_input_seq = torch.where((rand_paddings > 0), rand_tokens, rand_paddings.long())
seq_idx += 1
decoded_text.append(input_seq)
rand_decoded_text.append(rand_input_seq)
dec_output = torch.stack(dec_outputs).transpose(0, 1)
rst = {}
(rst['pred'], rst['attn'], rst['context']) = (dec_output, attn, cur_context)
rst['decoded_text'] = torch.stack(decoded_text).transpose(0, 1)
rst['rand_decoded_text'] = torch.stack(rand_decoded_text).transpose(0, 1)
if self.copy:
copy_output = torch.stack(copy_output).transpose(0, 1)
copy_gate_output = torch.stack(copy_gate_output).transpose(0, 1)
(rst['copy_pred'], rst['copy_gate']) = (copy_output, copy_gate_output)
if self.coverage:
coverage_output = torch.stack(coverage_output).transpose(0, 1)
rst['coverage_pred'] = coverage_output
return rst |
class GraphConverterWithoutCalib():
def __init__(self, model, data_loader=None, recover_config=None, new_api=False, performance_only=False, use_bf16=False):
self.model = model
self.output_tensor_names = self.model.output_tensor_names
self.input_tensor_names = self.model.input_tensor_names
self.op_wise_config = recover_config['op_wise_config']
self.advance_config = deep_get(recover_config, 'advance')
self.device = (recover_config['device'] if ('device' in recover_config) else 'cpu')
self.int8_sequences = recover_config['int8_sequences']
self.fp32_ops = recover_config['fp32_ops']
self.bf16_ops = recover_config['bf16_ops']
self.recipes = recover_config['recipes']
self.quantized_node_info = []
self._calibration_data = []
self._fp32_print_data = []
self.data_loader = data_loader
self.recover_config = recover_config
self._check_tf_version()
self._check_args()
self._gen_tmp_filenames()
self.new_api = new_api
self.performance_only = performance_only
self.use_bf16 = use_bf16
self._tmp_graph_def = copy.deepcopy(self.model.graph_def)
def _check_tf_version(self):
is_supported_version = False
is_sprbase_version = False
try:
from tensorflow import python
if (hasattr(python, 'pywrap_tensorflow') and hasattr(python.pywrap_tensorflow, 'IsMklEnabled')):
from tensorflow.python.pywrap_tensorflow import IsMklEnabled
elif hasattr(python.util, '_pywrap_util_port'):
from tensorflow.python.util._pywrap_util_port import IsMklEnabled
else:
from tensorflow.python._pywrap_util_port import IsMklEnabled
if (IsMklEnabled() and version1_lte_version2(TF_SUPPORTED_MIN_VERSION, tf.version.VERSION)):
is_supported_version = True
if (version1_gte_version2(tf.version.VERSION, '2.6.0') and (os.getenv('TF_ENABLE_ONEDNN_OPTS') == '1')):
is_supported_version = True
if version1_gte_version2(tf.version.VERSION, '2.9.0'):
is_supported_version = True
if (tf.version.VERSION == '1.15.0-up3'):
is_supported_version = True
if (tf.version.VERSION in TF_SPR_BASE_VERSIONS):
is_supported_version = True
is_sprbase_version = True
except Exception as e:
raise ValueError(e)
finally:
if (version1_gt_version2(tf.version.VERSION, TF_SUPPORTED_MAX_VERSION) and (not is_sprbase_version)):
logger.warning(str('Please note the {} version of TensorFlow is not fully verified! Suggest to use the versions between {} and {} if meet problem.').format(tf.version.VERSION, TF_SUPPORTED_MIN_VERSION, TF_SUPPORTED_MAX_VERSION))
if (version1_eq_version2(tf.version.VERSION, '2.5.0') and (os.getenv('TF_ENABLE_MKL_NATIVE_FORMAT') != '0')):
logger.fatal('Please set environment variable TF_ENABLE_MKL_NATIVE_FORMAT=0 when TensorFlow 2.5.0 installed.')
if (version1_gte_version2(tf.version.VERSION, '2.6.0') and version1_lt_version2(tf.version.VERSION, '2.9.0') and (os.getenv('TF_ENABLE_ONEDNN_OPTS') != '1')):
logger.fatal('Please set environment variable TF_ENABLE_ONEDNN_OPTS=1 when TensorFlow >= 2.6.0 and < 2.9.0 installed.')
if (not is_supported_version):
raise ValueError(str('Please install TensorFlow within version >={} and <={}.').format(TF_SUPPORTED_MIN_VERSION, TF_SUPPORTED_MAX_VERSION))
def _check_args(self):
if (self.model.workspace_path and (not os.path.isdir(self.model.workspace_path)) and (not os.path.exists(os.path.dirname(self.model.workspace_path)))):
raise ValueError('"output_graph" directory does not exist.')
self._output_path = self.model.workspace_path
def _gen_tmp_filenames(self):
self._int8_dynamic_range_model_path = os.path.join(self._output_path, 'int8_dynamic_range_graph')
self._int8_logged_model_path = os.path.join(self._output_path, 'int8_logged_graph')
self._fp32_logged_model_path = os.path.join(self._output_path, 'fp32_logged_graph')
self._int8_frozen_range_model_path = os.path.join(self._output_path, 'int8_frozen_range_graph')
self._bf16_mixed_precision_model_path = os.path.join(self._output_path, 'int8_bf16_mixed_precision_graph')
self.output_graph = os.path.join(self._output_path, 'int8_final_fused_graph')
self._tmp_model = Model(self.model._model, **self.model.kwargs)
self._tmp_model.output_tensor_names = self.output_tensor_names
self._tmp_model.input_tensor_names = self.input_tensor_names
def convert_without_calib(self):
model = self._tmp_model
if (len(self.op_wise_config) > 0):
model = self.quantize_without_calib()
if (len(self.bf16_ops) > 0):
model = self.bf16_convert()
post_cse_graph_def = PostCseOptimizer(model.graph_def).do_transformation()
post_cse_graph_def.library.CopyFrom(self.model.graph_def.library)
model.graph_def = post_cse_graph_def
if debug:
model.save(self.output_graph)
return model
def _analysis_rnn_model(self):
g = GraphAnalyzer()
g.graph = self._tmp_graph_def
graph_info = g.parse_graph()
rnn_pattern = [['TensorArrayV3'], ['Enter'], ['TensorArrayReadV3'], ['MatMul'], ['BiasAdd']]
target_nodes = g.query_fusion_pattern_nodes(rnn_pattern)
res = {}
for i in target_nodes:
if ((i[(- 3)] not in self.bf16_ops) and (i[(- 3)] not in self.fp32_ops)):
res[(i[(- 3)], i[(- 2)])] = graph_info[i[1]].node.attr['frame_name'].s.decode()
return res
def quantize_without_calib(self):
try:
self._quantize_graph()
self._rnn_details = self._analysis_rnn_model()
self._freeze_requantization_ranges_without_calib()
self._fuse_requantize_with_fused_quantized_node()
except Exception as e:
import traceback
traceback.print_exc()
self._tmp_model = None
logger.error('Fail to quantize graph due to {}.'.format(str(e)))
finally:
if (not debug):
self._post_clean()
return self._tmp_model
def bf16_convert(self):
try:
self._tmp_model.graph_def = BF16Convert(self._tmp_model.graph_def, self.fp32_ops, self.bf16_ops).do_transformation()
except Exception as e:
self._tmp_model = None
logger.error('Fail to convert graph due to {}.'.format(str(e)))
finally:
if debug:
self._tmp_model.save(self._bf16_mixed_precision_model_path)
return self._tmp_model
def _quantize_graph(self):
non_pad_ops = list(list(set(self.fp32_ops).union(set(self.bf16_ops))))
self._tmp_graph_def = FusePadWithConv2DOptimizer(self._tmp_graph_def, non_pad_ops, self._tmp_model.input_node_names, self.op_wise_config, self.new_api).do_transformation()
self._tmp_graph_def = QuantizeGraphHelper().get_sorted_graph(self._tmp_graph_def, self._tmp_model.input_node_names, self._tmp_model.output_node_names)
(self._tmp_graph_def, self.quantized_node_info, _) = QuantizeGraphForIntel(self._tmp_graph_def, self._tmp_model.input_node_names, self._tmp_model.output_node_names, self.op_wise_config, self.int8_sequences, self.device, False, self.new_api, self.performance_only).do_transform()
self._tmp_graph_def.library.CopyFrom(self.model.graph_def.library)
if debug:
self._tmp_model.graph_def = self._tmp_graph_def
self._tmp_model.save(self._int8_dynamic_range_model_path)
def _freeze_requantization_ranges_without_calib(self):
self._tmp_graph_def = FreezeValueWithoutCalibTransformer(self._tmp_graph_def, self.recover_config, postfix='__min').do_transformation_without_calib()
self._tmp_graph_def = FreezeValueWithoutCalibTransformer(self._tmp_graph_def, self.recover_config, postfix='__max').do_transformation_without_calib()
self._tmp_graph_def = FreezeValueWithoutCalibTransformer(self._tmp_graph_def, self.recover_config, postfix='__requant_min_max', device=self.device).do_transformation_without_calib()
self._tmp_graph_def = QuantizedRNNConverter(self._tmp_graph_def, self._calibration_data, self._rnn_details).do_transformation()
if (('scale_propagation_max_pooling' in self.recipes) and self.recipes['scale_propagation_max_pooling']):
self._tmp_graph_def = ScaleProPagationTransformer(self._tmp_graph_def).do_transformation()
if debug:
self._tmp_graph_def.library.CopyFrom(self.model.graph_def.library)
self._tmp_model.graph_def = self._tmp_graph_def
self._tmp_model.save(self._int8_frozen_range_model_path)
def _fuse_requantize_with_fused_quantized_node(self):
self._tmp_graph_def = FuseConvRequantizeTransformer(self._tmp_graph_def, self.device, self.new_api).do_transformation()
self._tmp_graph_def = FuseMatMulRequantizeTransformer(self._tmp_graph_def).do_transformation()
self._tmp_graph_def = FuseMatMulRequantizeDequantizeTransformer(self._tmp_graph_def).do_transformation()
self._tmp_graph_def = StripUnusedNodesOptimizer(self._tmp_graph_def, self._tmp_model.input_node_names, self._tmp_model.output_node_names).do_transformation()
self._tmp_graph_def = RemoveTrainingNodesOptimizer(self._tmp_graph_def, protected_nodes=self._tmp_model.output_node_names).do_transformation()
self._tmp_graph_def = FoldBatchNormNodesOptimizer(self._tmp_graph_def).do_transformation()
if (('scale_propagation_concat' in self.recipes) and self.recipes['scale_propagation_concat']):
self._tmp_graph_def = RerangeQuantizedConcat(self._tmp_graph_def, self.device).do_transformation()
self._tmp_graph_def = MetaInfoChangingMemOpOptimizer(self._tmp_graph_def).do_transformation()
if ((self.advance_config is not None) and (deep_get(self.advance_config, 'bias_correction') is not None)):
self._tmp_graph_def = BiasCorrection(self._tmp_graph_def, self.model.graph_def).do_transformation()
self._tmp_graph_def.library.CopyFrom(self.model.graph_def.library)
self._tmp_model.graph_def = self._tmp_graph_def
def _post_clean(self):
if (os.path.exists(self._int8_logged_model_path) and os.path.isdir(self._int8_logged_model_path)):
import shutil
shutil.rmtree(self._int8_logged_model_path)
elif gfile.Exists((self._int8_logged_model_path + '.pb')):
os.remove((self._int8_logged_model_path + '.pb')) |
.parametrize('model_name', ['wide', 'tabmlp'])
.parametrize('return_samples', [True, False])
def test_regression(model_name, return_samples):
bsz = 32
n_samples = 5
if (model_name == 'wide'):
X_tab = X_wide
model = BayesianWide(np.unique(X_wide).shape[0], 1)
elif (model_name == 'tabmlp'):
X_tab = X_tabmlp
model = BayesianTabMlp(column_idx=column_idx, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):], mlp_hidden_dims=[32, 16], pred_dim=1)
trainer = BayesianTrainer(model, objective='regression', verbose=0)
trainer.fit(X_tab=X_tab, target=target_regres, batch_size=16)
preds = trainer.predict(X_tab=X_tab, return_samples=return_samples, batch_size=16)
out = []
if return_samples:
out.append((preds.shape[0] == n_samples))
out.append((preds.shape[1] == bsz))
else:
out.append((preds.shape[0] == bsz))
assert all(out) |
def resnet152_mpncov_160(pretrained=False, progress=True, **kwargs):
return _resnet_mpncov_160('resnet152_mpncov_160', Bottleneck, [3, 8, 36, 3], pretrained, progress, **kwargs) |
class DatasetConfig(FairseqDataclass):
num_workers: int = field(default=1, metadata={'help': 'how many subprocesses to use for data loading'})
skip_invalid_size_inputs_valid_test: bool = field(default=False, metadata={'help': 'ignore too long or too short lines in valid and test set'})
max_tokens: Optional[int] = field(default=None, metadata={'help': 'maximum number of tokens in a batch'})
batch_size: Optional[int] = field(default=None, metadata={'help': 'number of examples in a batch', 'argparse_alias': '--max-sentences'})
required_batch_size_multiple: int = field(default=8, metadata={'help': 'batch size will be a multiplier of this value'})
required_seq_len_multiple: int = field(default=1, metadata={'help': 'maximum sequence length in batch will be a multiplier of this value'})
dataset_impl: Optional[DATASET_IMPL_CHOICES] = field(default=None, metadata={'help': 'output dataset implementation'})
data_buffer_size: int = field(default=10, metadata={'help': 'Number of batches to preload'})
train_subset: str = field(default='train', metadata={'help': 'data subset to use for training (e.g. train, valid, test)'})
valid_subset: str = field(default='valid', metadata={'help': 'comma separated list of data subsets to use for validation (e.g. train, valid, test)'})
combine_valid_subsets: Optional[bool] = field(default=None, metadata={'help': 'comma separated list of data subsets to use for validation (e.g. train, valid, test)', 'argparse_alias': '--combine-val'})
ignore_unused_valid_subsets: Optional[bool] = field(default=False, metadata={'help': 'do not raise error if valid subsets are ignored'})
validate_interval: int = field(default=1, metadata={'help': 'validate every N epochs'})
validate_interval_updates: int = field(default=0, metadata={'help': 'validate every N updates'})
validate_after_updates: int = field(default=0, metadata={'help': 'dont validate until reaching this many updates'})
fixed_validation_seed: Optional[int] = field(default=None, metadata={'help': 'specified random seed for validation'})
disable_validation: bool = field(default=False, metadata={'help': 'disable validation'})
max_tokens_valid: Optional[int] = field(default=II('dataset.max_tokens'), metadata={'help': 'maximum number of tokens in a validation batch (defaults to --max-tokens)'})
batch_size_valid: Optional[int] = field(default=II('dataset.batch_size'), metadata={'help': 'batch size of the validation batch (defaults to --batch-size)', 'argparse_alias': '--max-sentences-valid'})
max_valid_steps: Optional[int] = field(default=None, metadata={'help': 'How many batches to evaluate', 'argparse_alias': '--nval'})
curriculum: int = field(default=0, metadata={'help': "don't shuffle batches for first N epochs"})
gen_subset: str = field(default='test', metadata={'help': 'data subset to generate (train, valid, test)'})
num_shards: int = field(default=1, metadata={'help': 'shard generation over N shards'})
shard_id: int = field(default=0, metadata={'help': 'id of the shard to generate (id < num_shards)'}) |
def download_file(url, local_filename):
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=None):
f.write(chunk)
return local_filename |
class TransformersDecoderInvocationLayer(PromptModelInvocationLayer):
def __init__(self, model_name_or_path: str='mpt-7b-chat', max_length: Optional[int]=256, use_auth_token: Optional[Union[(str, bool)]]=None, use_gpu: Optional[bool]=True, devices: Optional[List[Union[(str, torch.device)]]]=None, **kwargs):
super().__init__(model_name_or_path, max_length)
self.use_auth_token = use_auth_token
(self.devices, _) = initialize_device_settings(devices=devices, use_cuda=use_gpu, multi_gpu=False)
self.use_gpu = use_gpu
if (len(self.devices) > 1):
logger.warning('Multiple devices are not supported in %s inference, using the first device %s.', self.__class__.__name__, self.devices[0])
model_input_kwargs = {key: kwargs[key] for key in ['model_kwargs', 'trust_remote_code', 'revision', 'feature_extractor', 'tokenizer', 'config', 'use_fast', 'torch_dtype', 'device_map'] if (key in kwargs)}
if ('model_kwargs' in model_input_kwargs):
mkwargs = model_input_kwargs.pop('model_kwargs')
model_input_kwargs.update(mkwargs)
torch_dtype = model_input_kwargs.get('torch_dtype')
if (torch_dtype is not None):
if isinstance(torch_dtype, str):
if ('torch.' not in torch_dtype):
raise ValueError(f"torch_dtype should be a torch.dtype or a string with 'torch.' prefix, got {torch_dtype}")
torch_dtype_resolved = getattr(torch, torch_dtype.strip('torch.'))
elif isinstance(torch_dtype, torch.dtype):
torch_dtype_resolved = torch_dtype
else:
raise ValueError(f'Invalid torch_dtype value {torch_dtype}')
model_input_kwargs['torch_dtype'] = torch_dtype_resolved
if (len(model_input_kwargs) > 0):
logger.info('Using model input kwargs %s in %s', model_input_kwargs, self.__class__.__name__)
self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True, use_fast=True)
self.tokenizer.pad_token = self.tokenizer.eos_token
self.tokenizer.add_special_tokens({'pad_token': '[PAD]'})
self.model = AutoModelForCausalLM.from_pretrained(model_name_or_path, low_cpu_mem_usage=True, return_dict=True, torch_dtype=torch.bfloat16, max_seq_len=8192, trust_remote_code=True)
self.model = ipex.optimize(self.model, dtype=torch.bfloat16)
if self.use_gpu:
self.model = self.model.to('cuda')
self.model.config.pad_token_id = self.model.config.eos_token_id
self.model = self.model.eval()
def invoke(self, *args, **kwargs):
generate_kwargs = dict(use_cache=True, bos_token_id=0, eos_token_id=1, pad_token_id=0)
generate_kwargs.update(dict(min_new_tokens=kwargs.pop('min_new_tokens', 1), max_new_tokens=kwargs.pop('max_new_tokens', self.max_length), temperature=kwargs.pop('temperature', 0.3), top_p=kwargs.pop('top_p', 0.9), top_k=kwargs.pop('top_k', 1), num_beams=kwargs.pop('beams', 1), return_dict_in_generate=True, early_stopping=kwargs.pop('early_stopping', True)))
decode_mode = (kwargs.pop('decode_mode', 'Greedy'),)
if ('Greedy' in decode_mode):
generate_kwargs.update(dict(num_beams=1, do_sample=False))
elif ('Beam' in decode_mode):
generate_kwargs.update(dict(do_sample=True))
else:
pass
output: List[Dict[(str, str)]] = []
start_time = time.time()
if (kwargs and ('prompt' in kwargs)):
prompt = kwargs.pop('prompt')
if (detect_language(prompt) == 'Chinese'):
generate_kwargs['max_new_tokens'] = 512
input_tokens = self.tokenizer.batch_encode_plus([prompt], return_tensors='pt', padding=True)
input_token_len = input_tokens.input_ids.shape[(- 1)]
stop_token_ids = self.tokenizer.convert_tokens_to_ids(['<|im_end|>', '<|endoftext|>'])
stop_token_ids.append(self.model.generation_config.eos_token_id)
stop_token_ids.append(self.tokenizer('.', return_tensors='pt').input_ids)
stop_token_ids.append(self.tokenizer('!', return_tensors='pt').input_ids)
stop_token_ids.append(self.tokenizer('', return_tensors='pt').input_ids)
stop_token_ids.append(self.tokenizer('!', return_tensors='pt').input_ids)
stop = StopOnTokensWithPeriod(min_length=108, start_length=input_token_len, stop_token_id=stop_token_ids)
generate_kwargs['stopping_criteria'] = StoppingCriteriaList([stop])
with torch.no_grad():
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16, cache_enabled=True):
output = self.model.generate(**input_tokens, **generate_kwargs)
generated_texts = self.tokenizer.decode(output.sequences[0], skip_special_tokens=True)
print('The inference time======', (time.time() - start_time))
if ('Response:' in generated_texts):
result = generated_texts.split('Response:')[1].strip()
return [result]
def supports(cls, model_name_or_path: str) -> bool:
try:
config = AutoConfig.from_pretrained(model_name_or_path, trust_remote_code=True)
except OSError:
return False
supported_models = list(MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values())
supported_models.append('MPTForCausalLM')
return (config.architectures[0] in supported_models) |
class XLMOnnxConfig(OnnxConfig):
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
if (self.task == 'multiple-choice'):
dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
dynamic_axis = {0: 'batch', 1: 'sequence'}
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)]) |
def seresnet50b(**kwargs):
return get_seresnet(blocks=50, conv1_stride=False, model_name='seresnet50b', **kwargs) |
def res_block(input, expansion_ratio, output_dim, stride, is_train, name, bias=False, shortcut=True):
with tf.name_scope(name), tf.variable_scope(name):
bottleneck_dim = round((expansion_ratio * input.get_shape().as_list()[(- 1)]))
net = conv_1x1(input, bottleneck_dim, name='pw', bias=bias)
net = batch_norm(net, train=is_train, name='pw_bn')
net = relu(net)
net = dwise_conv(net, strides=[1, stride, stride, 1], name='dw', bias=bias)
net = batch_norm(net, train=is_train, name='dw_bn')
net = relu(net)
net = conv_1x1(net, output_dim, name='pw_linear', bias=bias)
net = batch_norm(net, train=is_train, name='pw_linear_bn')
if (shortcut and (stride == 1)):
in_dim = int(input.get_shape().as_list()[(- 1)])
if (in_dim != output_dim):
ins = conv_1x1(input, output_dim, name='ex_dim')
net = (ins + net)
else:
net = (input + net)
return net |
def train(cfg_file: str) -> None:
cfg = load_config(cfg_file)
model_dir = make_model_dir(cfg['training']['model_dir'], overwrite=cfg['training'].get('overwrite', False))
_ = make_logger(model_dir, mode='train')
set_seed(seed=cfg['training'].get('random_seed', 42))
(train_data, dev_data, test_data, src_vocab, trg_vocab) = load_data(data_cfg=cfg['data'])
model = build_model(cfg['model'], src_vocab=src_vocab, trg_vocab=trg_vocab)
trainer = TrainManager(model=model, config=cfg)
shutil.copy2(cfg_file, (model_dir + '/config.yaml'))
log_cfg(cfg)
log_data_info(train_data=train_data, valid_data=dev_data, test_data=test_data, src_vocab=src_vocab, trg_vocab=trg_vocab)
logger.info(str(model))
src_vocab_file = '{}/src_vocab.txt'.format(cfg['training']['model_dir'])
src_vocab.to_file(src_vocab_file)
trg_vocab_file = '{}/trg_vocab.txt'.format(cfg['training']['model_dir'])
trg_vocab.to_file(trg_vocab_file)
trainer.train_and_validate(train_data=train_data, valid_data=dev_data) |
class ClapConfig(PretrainedConfig):
model_type = 'clap'
is_composition = True
def __init__(self, text_config=None, audio_config=None, logit_scale_init_value=(1 / 0.07), projection_dim=512, projection_hidden_act='relu', initializer_factor=1.0, **kwargs):
super().__init__(**kwargs)
if (text_config is None):
text_config = {}
logger.info('text_config is None. Initializing the ClapTextConfig with default values.')
if (audio_config is None):
audio_config = {}
logger.info('audio_config is None. initializing the ClapAudioConfig with default values.')
self.text_config = ClapTextConfig(**text_config)
self.audio_config = ClapAudioConfig(**audio_config)
self.text_config.projection_dim = projection_dim
self.audio_config.projection_dim = projection_dim
self.text_config.projection_hidden_act = projection_hidden_act
self.audio_config.projection_hidden_act = projection_hidden_act
self.projection_dim = projection_dim
self.projection_hidden_act = projection_hidden_act
self.hidden_size = self.text_config.hidden_size
self.logit_scale_init_value = logit_scale_init_value
self.initializer_factor = initializer_factor
self.num_hidden_layers = (self.text_config.num_hidden_layers + len(self.audio_config.depths))
def from_text_audio_configs(cls, text_config: ClapTextConfig, audio_config: ClapAudioConfig, **kwargs):
return cls(text_config=text_config.to_dict(), audio_config=audio_config.to_dict(), **kwargs)
def to_dict(self):
output = copy.deepcopy(self.__dict__)
output['text_config'] = self.text_config.to_dict()
output['audio_config'] = self.audio_config.to_dict()
output['model_type'] = self.__class__.model_type
return output |
def compare_match(funct, g, sent_id, pred_dictionary, easy, diff):
(hold_gold, tgt_gold, exp_gold, polarity_gold, intensity_gold, txt) = g
majority_vote = (len(pred_dictionary) / 2)
match_hte = 0
for team in pred_dictionary.keys():
try:
p_tpls = opinion_to_tuple(pred_dictionary[team][sent_id])
matching_pred = align_gold_pred(funct, g, p_tpls)
except KeyError:
majority_vote -= 1
if (len(matching_pred) != 0):
(hold_pred, tgt_pred, exp_pred, polarity_pred, intensity_pred) = matching_pred
match_hte += 1
spars_value = label_sparsity([hold_gold, tgt_gold, exp_gold], txt)
if (match_hte > majority_vote):
match_hte = 1
if (funct == 'hte_sparsity'):
easy.append(spars_value)
else:
match_hte = 0
if (funct == 'hte_sparsity'):
diff.append(spars_value)
return (easy, diff, match_hte) |
def cut(graph, node):
if (not isinstance(node, Node)):
node = graph[node]
for e in graph.edges:
if (node in (e.node1, e.node2)):
for n in node.links:
if ((e.node1 == node) and (e.node2 != n)):
graph._add_edge_copy(e, node1=n, node2=e.node2)
if ((e.node2 == node) and (e.node1 != n)):
graph._add_edge_copy(e, node1=e.node1, node2=n)
unlink(graph, node) |
class CompositeCrossover(Crossover[(CompositeSolution, CompositeSolution)]):
__EPS = 1e-14
def __init__(self, crossover_operator_list: [Crossover]):
super(CompositeCrossover, self).__init__(probability=1.0)
Check.is_not_none(crossover_operator_list)
Check.collection_is_not_empty(crossover_operator_list)
self.crossover_operators_list = []
for operator in crossover_operator_list:
Check.that(issubclass(operator.__class__, Crossover), 'Object is not a subclass of Crossover')
self.crossover_operators_list.append(operator)
def execute(self, solutions: List[CompositeSolution]) -> List[CompositeSolution]:
Check.is_not_none(solutions)
Check.that((len(solutions) == 2), ('The number of parents is not two: ' + str(len(solutions))))
offspring1 = []
offspring2 = []
number_of_solutions_in_composite_solution = solutions[0].number_of_variables
for i in range(number_of_solutions_in_composite_solution):
parents = [solutions[0].variables[i], solutions[1].variables[i]]
children = self.crossover_operators_list[i].execute(parents)
offspring1.append(children[0])
offspring2.append(children[1])
return [CompositeSolution(offspring1), CompositeSolution(offspring2)]
def get_number_of_parents(self) -> int:
return 2
def get_number_of_children(self) -> int:
return 2
def get_name(self) -> str:
return 'Composite crossover' |
.register('mean_squared_error_with_ohem_for_one_class_detection')
class mean_squared_error_with_ohem_for_one_class_detection_Prop(mx.operator.CustomOpProp):
def __init__(self, ohem_ratio=0.25):
super(mean_squared_error_with_ohem_for_one_class_detection_Prop, self).__init__(need_top_grad=False)
self.ohem_ratio = ohem_ratio
def list_arguments(self):
return ['pred', 'label']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
pred_shape = in_shape[0]
label_shape = in_shape[0]
output_shape = in_shape[0]
return ([pred_shape, label_shape], [output_shape], [])
def create_operator(self, ctx, shapes, dtypes):
return mean_squared_error_with_ohem_for_one_class_detection(self.ohem_ratio) |
def running_of_queue(identity, queue):
def has_queue_tag(instance):
if ('Tags' not in instance):
return False
for tag in instance['Tags']:
if ((tag['Key'] == 'QueueName') and (tag['Value'] == queue)):
return True
return False
instances_json = json.loads(subprocess.check_output(['aws', 'ec2', 'describe-instances', '--filters', 'Name=instance-state-name,Values=pending,running']))
instances = [i for res in instances_json['Reservations'] for i in res['Instances'] if has_queue_tag(i)]
for instance in instances:
out = subprocess.check_output([os.path.join(_DIRNAME, 'connect_instance.py'), identity, instance['InstanceId'], '--com', os.path.join('${ITHEMAL_HOME}', 'aws', 'aws_utils', 'get_running_queue_command.sh')], stderr=open('/dev/null', 'w')).strip()
if out:
print('{} || {}'.format(instance['InstanceId'], out)) |
def save_checkpoint(state, args, is_best, filename):
model_dir = args.train_url
model_filename = (model_dir + filename)
best_filename = (model_dir + 'model_best.pth.tar')
print("=> saving checkpoint '{}'".format(model_filename))
torch.save(state, model_filename)
if is_best:
shutil.copyfile(model_filename, best_filename)
print("=> saved checkpoint '{}'".format(model_filename))
return |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.