code stringlengths 101 5.91M |
|---|
class ProbMPS(nn.Module):
def __init__(self, seq_len: int, input_dim: int, bond_dim: int, complex_params: bool=False, use_bias: bool=False, init_method: str='near_eye', embed_fun: Optional[Callable]=None, domain: Optional[DataDomain]=None) -> None:
super().__init__()
assert (min(seq_len, input_dim, bond_dim) > 0)
assert (init_method in ('near_eye', 'normal'))
init_fun = (near_eye_init if (init_method == 'near_eye') else normal_init)
core_tensors = init_fun((seq_len, input_dim, bond_dim, bond_dim), is_complex=complex_params)
rand_vec = (torch.randn(bond_dim) / sqrt(bond_dim))
edge_vecs = torch.stack(((rand_vec,) * 2))
if complex_params:
edge_vecs = phaseify(edge_vecs)
self.core_tensors = nn.Parameter(core_tensors)
self.edge_vecs = nn.Parameter(edge_vecs)
if use_bias:
bias_mat = torch.zeros(bond_dim, bond_dim)
if complex_params:
bias_mat = phaseify(bias_mat)
self.bias_mat = nn.Parameter(bias_mat)
self.complex_params = complex_params
self.embedding = None
if isinstance(embed_fun, (FixedEmbedding, TrainableEmbedding)):
self.embedding = embed_fun
if hasattr(embed_fun, 'emb_dim'):
assert (self.embedding.emb_dim == input_dim)
elif (embed_fun is not None):
assert (domain is not None)
self.embedding = FixedEmbedding(embed_fun, domain)
assert (self.embedding.emb_dim == input_dim)
def forward(self, input_data: Tensor, slim_eval: bool=False, parallel_eval: bool=False) -> Tensor:
if (self.embedding is not None):
input_data = self.embedding(input_data)
if slim_eval:
if self.use_bias:
raise ValueError('Bias matrices not supported for slim_eval')
(psi_vals, log_scales) = slim_eval_fun(input_data, self.core_tensors, self.edge_vecs)
else:
mat_slices = get_mat_slices(input_data, self.core_tensors)
if self.use_bias:
mat_slices = (mat_slices + self.bias_mat[(None, None)])
(psi_vals, log_scales) = contract_matseq(mat_slices, self.edge_vecs[0], self.edge_vecs[1], parallel_eval, log_format=True)
log_norm = self.log_norm()
assert log_norm.isfinite()
assert torch.all(psi_vals.isfinite())
log_uprobs = (torch.log(torch.abs(psi_vals)) + log_scales)
return ((2 * log_uprobs) - log_norm)
def loss(self, input_data: Tensor, slim_eval: bool=False, parallel_eval: bool=False) -> Tensor:
return (- torch.mean(self.forward(input_data, slim_eval=slim_eval, parallel_eval=parallel_eval)))
def log_norm(self) -> Tensor:
if self.use_bias:
core_tensors = (self.core_tensors + self.bias_mat[(None, None)])
else:
core_tensors = self.core_tensors
lamb_mat = (None if (self.embedding is None) else self.embedding.lamb_mat)
return get_log_norm(core_tensors, self.edge_vecs, lamb_mat=lamb_mat)
def seq_len(self):
return self.core_tensors.shape[0]
def input_dim(self):
return self.core_tensors.shape[1]
def bond_dim(self):
return self.core_tensors.shape[2]
def use_bias(self):
return hasattr(self, 'bias_mat') |
def compose_fieldmap(rf1, rf2):
if (rf1 == None):
import pdb
pdb.set_trace()
(offset1, size1, step1) = rf1
(offset2, size2, step2) = rf2
size = tuple(((((size2c - 1) * step1c) + size1c) for (size1c, step1c, size2c) in zip(size1, step1, size2)))
offset = tuple((((offset2c * step1c) + offset1c) for (offset2c, step1c, offset1c) in zip(offset2, step1, offset1)))
step = tuple(((step2c * step1c) for (step1c, step2c) in zip(step1, step2)))
return (offset, size, step) |
def main(prior_name, name, max_samples, diversity_picker, oracle, w_min):
prior_model = model_from_json(prior_name)
search_model = model_from_json(prior_name)
model_weights_path = os.path.join(script_dir, 'results', name, 'weights.pth')
search_model.load(model_weights_path)
(samples, weights) = get_samples(prior_model, search_model, max=max_samples, w_min=w_min)
if (0 < diversity_picker < max_samples):
mols = [Chem.MolFromSmiles(s) for s in samples]
fps = [GetMorganFingerprint(x, 3) for x in mols]
picker = MaxMinPicker()
def distij(i, j, fps=fps):
return (1 - DataStructs.DiceSimilarity(fps[i], fps[j]))
pickIndices = picker.LazyPick(distij, max_samples, diversity_picker)
idces = list(pickIndices)
samples = [samples[i] for i in idces]
weights = [weights[i] for i in idces]
if ((oracle != 'docking') or True):
dump_path = os.path.join(script_dir, 'results', name, 'docker_samples.p')
pickle.dump(samples, open(dump_path, 'wb'))
dump_path = os.path.join(script_dir, 'results', name, 'samples.p')
pickle.dump((samples, weights), open(dump_path, 'wb'))
else:
whole_path = os.path.join(script_dir, '..', 'data', 'drd3_scores.pickle')
docking_whole_results = pickle.load(open(whole_path, 'rb'))
filtered_smiles = list()
already_smiles = list()
already_scores = list()
for (i, smile) in enumerate(samples):
if (smile in docking_whole_results):
already_smiles.append(smile)
already_scores.append(docking_whole_results[smile])
else:
filtered_smiles.append(smile)
dump_path = os.path.join(script_dir, 'results', name, 'docking_small_results', 'simili.csv')
df = pd.DataFrame.from_dict({'smile': already_smiles, 'score': already_scores})
df.to_csv(dump_path)
dump_path = os.path.join(script_dir, 'results', name, 'docker_samples.p')
pickle.dump(filtered_smiles, open(dump_path, 'wb'))
dump_path = os.path.join(script_dir, 'results', name, 'samples.p')
pickle.dump((samples, weights), open(dump_path, 'wb')) |
class JNU(object):
num_classes = 4
inputchannel = 1
def __init__(self, data_dir, transfer_task, normlizetype='0-1'):
self.data_dir = data_dir
self.source_N = transfer_task[0]
self.target_N = transfer_task[1]
self.normlizetype = normlizetype
self.data_transforms = {'train': Compose([Reshape(), Normalize(self.normlizetype), Retype()]), 'val': Compose([Reshape(), Normalize(self.normlizetype), Retype()])}
def data_split(self, transfer_learning=True):
if transfer_learning:
list_data = get_files(self.data_dir, self.source_N)
data_pd = pd.DataFrame({'data': list_data[0], 'label': list_data[1]})
(train_pd, val_pd) = train_test_split(data_pd, test_size=0.2, random_state=40, stratify=data_pd['label'])
source_train = dataset(list_data=train_pd, transform=self.data_transforms['train'])
source_val = dataset(list_data=val_pd, transform=self.data_transforms['val'])
list_data = get_files(self.data_dir, self.target_N)
data_pd = pd.DataFrame({'data': list_data[0], 'label': list_data[1]})
(train_pd, val_pd) = train_test_split(data_pd, test_size=0.2, random_state=40, stratify=data_pd['label'])
target_train = dataset(list_data=train_pd, transform=self.data_transforms['train'])
target_val = dataset(list_data=val_pd, transform=self.data_transforms['val'])
return (source_train, source_val, target_train, target_val)
else:
list_data = get_files(self.data_dir, self.source_N)
data_pd = pd.DataFrame({'data': list_data[0], 'label': list_data[1]})
(train_pd, val_pd) = train_test_split(data_pd, test_size=0.2, random_state=40, stratify=data_pd['label'])
source_train = dataset(list_data=train_pd, transform=self.data_transforms['train'])
source_val = dataset(list_data=val_pd, transform=self.data_transforms['val'])
list_data = get_files(self.data_dir, self.target_N)
data_pd = pd.DataFrame({'data': list_data[0], 'label': list_data[1]})
target_val = dataset(list_data=data_pd, transform=self.data_transforms['val'])
return (source_train, source_val, target_val) |
class Scatter(object):
def forward(target_gpus, input):
input_device = get_input_device(input)
streams = None
if (input_device == (- 1)):
streams = [_get_stream(device) for device in target_gpus]
outputs = scatter(input, target_gpus, streams)
if (streams is not None):
synchronize_stream(outputs, target_gpus, streams)
return tuple(outputs) |
def in_distance_range_pose(ego_center: np.ndarray, pose: np.ndarray, d_min: float, d_max: float) -> bool:
dist = float(np.linalg.norm((pose[0:3] - ego_center[0:3])))
return ((dist > d_min) and (dist < d_max)) |
class AverageMeter():
def __init__(self, *keys):
self.__data = dict()
for k in keys:
self.__data[k] = [0.0, 0]
def add(self, dict):
for (k, v) in dict.items():
if (k not in self.__data):
self.__data[k] = [0.0, 0]
self.__data[k][0] += v
self.__data[k][1] += 1
def get(self, *keys):
if (len(keys) == 1):
return (self.__data[keys[0]][0] / self.__data[keys[0]][1])
else:
v_list = [(self.__data[k][0] / self.__data[k][1]) for k in keys]
return tuple(v_list)
def pop(self, key=None):
if (key is None):
for k in self.__data.keys():
self.__data[k] = [0.0, 0]
else:
v = self.get(key)
self.__data[key] = [0.0, 0]
return v |
def adjust_learning_rate_resnet(optimizer):
if (C.get()['epoch'] == 90):
return torch.optim.lr_scheduler.MultiStepLR(optimizer, [30, 60, 80])
elif (C.get()['epoch'] == 270):
return torch.optim.lr_scheduler.MultiStepLR(optimizer, [90, 180, 240])
elif (C.get()['epoch'] == 300):
return torch.optim.lr_scheduler.MultiStepLR(optimizer, [75, 150, 225])
else:
raise ValueError(('invalid epoch=%d for resnet scheduler' % C.get()['epoch'])) |
class Model(nn.Module):
def __init__(self):
super().__init__()
self.block = Block()
self.conv = nn.Conv2d(3, 3, 1) |
class InstructionParameter(ModelTypeValidator):
valid_types = (complex, int, float, str, numpy.integer, numpy.float, sympy.Basic, sympy.Symbol, list, numpy.ndarray)
default_error_messages = {'invalid': '{input} cannot be parsed as a parameter.', 'format': '"{input}" cannot be formatted as a parameter.'}
def _serialize(self, value, attr, obj):
if is_collection(value):
return [self._serialize(item, attr, obj) for item in value]
if isinstance(value, complex):
return [value.real, value.imag]
if isinstance(value, numpy.integer):
return int(value)
if isinstance(value, numpy.float):
return float(value)
if isinstance(value, (float, int, str)):
return value
if isinstance(value, sympy.Symbol):
return str(value)
if isinstance(value, sympy.Basic):
if value.is_imaginary:
return [float(sympy.re(value)), float(sympy.im(value))]
if value.is_Integer:
return int(value.evalf())
else:
return float(value.evalf())
if hasattr(value, 'as_dict'):
return value.as_dict()
return self.fail('format', input=value)
def _deserialize(self, value, attr, data):
if is_collection(value):
return [self._deserialize(item, attr, data) for item in value]
if isinstance(value, (float, int, str)):
return value
return self.fail('invalid', input=value)
def check_type(self, value, attr, data):
root_value = super().check_type(value, attr, data)
if is_collection(value):
_ = [super(InstructionParameter, self).check_type(item, attr, data) for item in value]
return root_value |
class Recorder():
def __init__(self, env, directory, save_stats=True, save_video=True, save_episode=True, video_size=(512, 512)):
if (directory and save_stats):
env = StatsRecorder(env, directory)
if (directory and save_video):
env = VideoRecorder(env, directory, video_size)
if (directory and save_episode):
env = EpisodeRecorder(env, directory)
self._env = env
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
return getattr(self._env, name) |
class Item():
mode: str
split: str
scene: str
scan: str
stem: str
def get_split_file(cls, mode: str, split: str) -> Path:
return ((PATHS['diode'] / 'data_list') / f'{mode}_{split}.csv')
def load_split(cls, mode: str, split: str) -> list['Item']:
lines = io.readlines(cls.get_split_file(mode, split))
lines = [Path(l.split(',')[0]) for l in lines]
items = [Item(mode=parts[(- 5)], split=parts[(- 4)], scene=parts[(- 3)], scan=parts[(- 2)], stem=f.stem) for f in lines if (parts := f.parts)]
return items
def get_img_file(self) -> Path:
return (((((PATHS['diode'] / self.mode) / self.split) / self.scene) / self.scan) / f'{self.stem}.png')
def get_depth_file(self) -> Path:
return (((((PATHS['diode'] / self.mode) / self.split) / self.scene) / self.scan) / f'{self.stem}_depth.npy')
def get_mask_file(self) -> Path:
return (((((PATHS['diode'] / self.mode) / self.split) / self.scene) / self.scan) / f'{self.stem}_depth_mask.npy')
def load_img(self) -> Image:
return Image.open(self.get_img_file())
def load_depth(self) -> ty.A:
return np.load(self.get_depth_file()).astype(np.float32)
def load_mask(self) -> ty.A:
return np.load(self.get_mask_file()).astype(bool) |
class TFDPRPretrainedQuestionEncoder():
def __init__(self, *args, **kwargs):
requires_tf(self) |
def dict_of_list__to__list_of_dicts(dict, n_items):
new_dicts = [{} for _ in range(n_items)]
for (key, values) in dict.items():
for i in range(n_items):
new_dicts[i][key] = values[i]
return new_dicts |
class exkp(nn.Module):
def __init__(self, n, nstack, dims, modules, heads, pre=None, cnv_dim=256, make_tl_layer=None, make_br_layer=None, make_cnv_layer=make_cnv_layer, make_heat_layer=make_kp_layer, make_tag_layer=make_kp_layer, make_regr_layer=make_kp_layer, make_poly_layer=make_poly_layer, make_up_layer=make_layer, make_low_layer=make_layer, make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr, make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer, make_merge_layer=make_merge_layer, make_inter_layer=make_inter_layer, kp_layer=residual):
super(exkp, self).__init__()
self.nstack = nstack
self.heads = heads
curr_dim = dims[0]
self.pre = (nn.Sequential(convolution(7, 3, 128, stride=2), residual(3, 128, 256, stride=2)) if (pre is None) else pre)
self.kps = nn.ModuleList([kp_module(n, dims, modules, layer=kp_layer, make_up_layer=make_up_layer, make_low_layer=make_low_layer, make_hg_layer=make_hg_layer, make_hg_layer_revr=make_hg_layer_revr, make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer, make_merge_layer=make_merge_layer) for _ in range(nstack)])
self.cnvs = nn.ModuleList([make_cnv_layer(curr_dim, cnv_dim) for _ in range(nstack)])
self.inters = nn.ModuleList([make_inter_layer(curr_dim) for _ in range((nstack - 1))])
self.inters_ = nn.ModuleList([nn.Sequential(nn.Conv2d(curr_dim, curr_dim, (1, 1), bias=False), nn.BatchNorm2d(curr_dim)) for _ in range((nstack - 1))])
self.cnvs_ = nn.ModuleList([nn.Sequential(nn.Conv2d(cnv_dim, curr_dim, (1, 1), bias=False), nn.BatchNorm2d(curr_dim)) for _ in range((nstack - 1))])
for head in heads.keys():
if ('hm' in head):
module = nn.ModuleList([make_heat_layer(cnv_dim, curr_dim, heads[head]) for _ in range(nstack)])
self.__setattr__(head, module)
for heat in self.__getattr__(head):
heat[(- 1)].bias.data.fill_((- 2.19))
elif ('poly' in head):
module = nn.ModuleList([make_poly_layer(cnv_dim, curr_dim, heads[head]) for _ in range(nstack)])
self.__setattr__(head, module)
else:
module = nn.ModuleList([make_regr_layer(cnv_dim, curr_dim, heads[head]) for _ in range(nstack)])
self.__setattr__(head, module)
self.relu = nn.ReLU(inplace=True)
def forward(self, image):
inter = self.pre(image)
outs = []
for ind in range(self.nstack):
(kp_, cnv_) = (self.kps[ind], self.cnvs[ind])
kp = kp_(inter)
cnv = cnv_(kp)
out = {}
for head in self.heads:
layer = self.__getattr__(head)[ind]
y = layer(cnv)
out[head] = y
outs.append(out)
if (ind < (self.nstack - 1)):
inter = (self.inters_[ind](inter) + self.cnvs_[ind](cnv))
inter = self.relu(inter)
inter = self.inters[ind](inter)
return outs |
(before=[init], after=[post])
def con_train_wbcluster():
USR.set('dataset', 'data/wb_aligned/')
USR.set('decoder', 'crf')
USR.set('L', '8')
USR.set('layers', '2')
USR.set('min_epochs', '8')
USR.set('weight_decay', '0.0')
USR.set('posterior_reg', '1')
command = ('%(S_python_itrptr)s %(S_python_dir)s/train.py --data %(U_dataset)s --save %(S_model)s/{config} --save_out %(S_output)s/{config} --epoch 55 --data_mode real --optim_algo 1 --L %(U_L)s --decoder %(U_decoder)s --cuda --one_rnn --sep_attn --option train' % ALL())
command += ' --posterior_reg 1 --layers 2 --train_q_epoch 5 --weight_decay 0.0 --full_independence 3'
search_list = [('pr_reg_style', 'wb:cluster'), ('bsz', '10'), ('pr_coef', '25|15|5'), ('hard_code', 'no'), ('decoder_constraint', 'no'), ('encoder_constraint', 'yes'), ('tagset_size', '70'), ('max_mbs_per_epoch', '25000|10000'), ('use_elmo', 'no'), ('elmo_style', '1'), ('seed', '0'), ('thresh', '1000'), ('hidden_dim', '500'), ('embedding_dim', '400'), ('lr_p', '0.0005'), ('lr_q', '0.001'), ('sample_size', '2'), ('dual_attn', 'yes'), ('trans_unif', 'yes')]
grid_search((lambda map: basic_func(command, map)), search_list, seed=1)
return |
class Kadid10k(data.Dataset):
def __init__(self, root, index, transform, patch_num):
refpath = os.path.join(root, 'reference_images')
refname = getTIDFileName(refpath, '.png.PNG')
imgnames = []
target = []
refnames_all = []
csv_file = os.path.join(root, 'dmos.csv')
with open(csv_file) as f:
reader = csv.DictReader(f)
for row in reader:
imgnames.append(row['dist_img'])
refnames_all.append(row['ref_img'][1:3])
mos = np.array(float(row['dmos'])).astype(np.float32)
target.append(mos)
labels = np.array(target).astype(np.float32)
refnames_all = np.array(refnames_all)
refname.sort()
sample = []
for (i, item) in enumerate(index):
train_sel = (refname[index[i]] == refnames_all)
train_sel = np.where((train_sel == True))
train_sel = train_sel[0].tolist()
for (j, item) in enumerate(train_sel):
for aug in range(patch_num):
sample.append((os.path.join(root, 'distorted_images', imgnames[item]), labels[item]))
self.samples = sample
self.transform = transform
def __getitem__(self, index):
(path, target) = self.samples[index]
sample = pil_loader(path)
sample = self.transform(sample)
return (sample, target)
def __len__(self):
length = len(self.samples)
return length |
class ChineseBertDataset(Dataset):
def __init__(self, data_path, chinese_bert_path, max_length: int=512):
super().__init__()
self.vocab_file = os.path.join(chinese_bert_path, 'vocab.txt')
self.config_path = os.path.join(chinese_bert_path, 'config')
self.data_path = data_path
self.max_length = max_length
self.tokenizer = BertWordPieceTokenizer(self.vocab_file)
with open(os.path.join(self.config_path, 'pinyin_map.json'), encoding='utf8') as fin:
self.pinyin_dict = json.load(fin)
with open(os.path.join(self.config_path, 'id2pinyin.json'), encoding='utf8') as fin:
self.id2pinyin = json.load(fin)
with open(os.path.join(self.config_path, 'pinyin2tensor.json'), encoding='utf8') as fin:
self.pinyin2tensor = json.load(fin)
def get_lines(self):
raise NotImplementedError
def convert_sentence_to_pinyin_ids(self, sentence: str, tokenizer_output: tokenizers.Encoding) -> List[List[int]]:
pinyin_list = pinyin(sentence, style=Style.TONE3, heteronym=True, errors=(lambda x: [['not chinese'] for _ in x]))
pinyin_locs = {}
for (index, item) in enumerate(pinyin_list):
pinyin_string = item[0]
if (pinyin_string == 'not chinese'):
continue
if (pinyin_string in self.pinyin2tensor):
pinyin_locs[index] = self.pinyin2tensor[pinyin_string]
else:
ids = ([0] * 8)
for (i, p) in enumerate(pinyin_string):
if (p not in self.pinyin_dict['char2idx']):
ids = ([0] * 8)
break
ids[i] = self.pinyin_dict['char2idx'][p]
pinyin_locs[index] = ids
pinyin_ids = []
for (idx, (token, offset)) in enumerate(zip(tokenizer_output.tokens, tokenizer_output.offsets)):
if ((offset[1] - offset[0]) != 1):
pinyin_ids.append(([0] * 8))
continue
if (offset[0] in pinyin_locs):
pinyin_ids.append(pinyin_locs[offset[0]])
else:
pinyin_ids.append(([0] * 8))
return pinyin_ids
def convert_sentence_to_shengmu_yunmu_shengdiao_ids(self, sentence: str, tokenizer_output: tokenizers.Encoding) -> List[List[int]]:
pinyin_list = pinyin(sentence, style=Style.TONE3, neutral_tone_with_five=True, heteronym=True, errors=(lambda x: [['not chinese'] for _ in x]))
pinyin_locs = {}
for (index, item) in enumerate(pinyin_list):
pinyin_string = item[0]
if (pinyin_string == 'not chinese'):
continue
pinyin_locs[index] = pho_convertor.get_sm_ym_sd_labels(pinyin_string)
pinyin_labels = []
for (idx, (token, offset)) in enumerate(zip(tokenizer_output.tokens, tokenizer_output.offsets)):
if ((offset[1] - offset[0]) != 1):
pinyin_labels.append((0, 0, 0))
continue
if (offset[0] in pinyin_locs):
pinyin_labels.append(pinyin_locs[offset[0]])
else:
pinyin_labels.append((0, 0, 0))
return pinyin_labels |
_task('speech_to_text')
class SpeechToTextTask(LegacyFairseqTask):
def add_args(parser):
parser.add_argument('data', help='manifest root path')
parser.add_argument('--config-yaml', type=str, default='config.yaml', help='Configuration YAML filename (under manifest root)')
parser.add_argument('--max-audio-positions', default=6000, type=int, metavar='N', help='max number of tokens in the audio sequence')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence')
parser.add_argument('--s2t-task', choices=['asr', 'st', 'mt', 'stack'], help='specific task (asr/st/mt/curriculum schedule sampling)')
def __init__(self, args, tgt_dict):
super().__init__(args)
self.tgt_dict = tgt_dict
self.data_cfg = S2TDataConfig(op.join(args.data, args.config_yaml))
def setup_task(cls, args, **kwargs):
data_cfg = S2TDataConfig(op.join(args.data, args.config_yaml))
dict_path = op.join(args.data, data_cfg.vocab_filename)
if (not op.isfile(dict_path)):
raise FileNotFoundError(f'Dict not found: {dict_path}')
tgt_dict = Dictionary.load(dict_path)
logger.info(f'dictionary size ({data_cfg.vocab_filename}): {len(tgt_dict):,}')
if (getattr(args, 'train_subset', None) is not None):
if (not all((s.startswith('train') for s in args.train_subset.split(',')))):
raise ValueError('Train splits should be named like "train*".')
return cls(args, tgt_dict)
def build_criterion(self, args):
from fairseq import criterions
if (self.data_cfg.prepend_tgt_lang_tag and (args.ignore_prefix_size != 1)):
raise ValueError('Please set "--ignore-prefix-size 1" since target language ID token is prepended as BOS.')
return criterions.build_criterion(args, self)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
is_train_split = split.startswith('train')
pre_tokenizer = self.build_tokenizer(self.args)
bpe_tokenizer = self.build_bpe(self.args)
self.datasets[split] = SpeechToTextDatasetCreator.from_tsv(self.args.data, self.args.s2t_task, self.data_cfg, split, self.tgt_dict, pre_tokenizer, bpe_tokenizer, is_train_split=is_train_split, epoch=epoch, seed=self.args.seed)
def target_dictionary(self):
return self.tgt_dict
def source_dictionary(self):
return None
def max_positions(self):
return ((self.args.max_source_positions if (self.args.s2t_task == 'mt') else self.args.max_audio_positions), self.args.max_target_positions)
def build_model(self, args):
args.input_feat_per_channel = self.data_cfg.input_feat_per_channel
args.input_channels = self.data_cfg.input_channels
self.tokenizer = self.build_tokenizer(args)
self.bpe = self.build_bpe(args)
return super(SpeechToTextTask, self).build_model(args)
def build_generator(self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None):
if (self.data_cfg.prepend_tgt_lang_tag and (args.prefix_size != 1)):
raise ValueError('Please set "--prefix-size 1" since target language ID token is prepended as BOS.')
lang_token_ids = {i for (s, i) in self.tgt_dict.indices.items() if SpeechToTextDataset.is_lang_tag(s)}
extra_gen_cls_kwargs = {'symbols_to_strip_from_output': lang_token_ids}
return super().build_generator(models, args, seq_gen_cls=None, extra_gen_cls_kwargs=extra_gen_cls_kwargs)
def build_tokenizer(self, args):
logger.info(f'pre-tokenizer: {self.data_cfg.pre_tokenizer}')
return encoders.build_tokenizer(Namespace(**self.data_cfg.pre_tokenizer))
def build_bpe(self, args):
logger.info(f'tokenizer: {self.data_cfg.bpe_tokenizer}')
return encoders.build_bpe(Namespace(**self.data_cfg.bpe_tokenizer))
def get_interactive_tokens_and_lengths(self, lines, encode_fn):
n_frames = [get_features_or_waveform(p).shape[0] for p in lines]
return (lines, n_frames)
def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs):
return SpeechToTextDataset('interactive', False, self.data_cfg, src_tokens, src_lengths) |
class BertModelTest(unittest.TestCase):
class BertModelTester(object):
def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, scope=None):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = BertModelTest.ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = BertModelTest.ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = BertModelTest.ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
if self.use_labels:
sequence_labels = BertModelTest.ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = BertModelTest.ids_tensor([self.batch_size, self.seq_length], self.num_labels)
config = BertConfig(vocab_size_or_config_json_file=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range)
return (config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels)
def check_loss_output(self, result):
self.parent.assertListEqual(list(result['loss'].size()), [])
def create_bert_model(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
model = BertModel(config=config)
model.eval()
(all_encoder_layers, pooled_output) = model(input_ids, token_type_ids, input_mask)
outputs = {'sequence_output': all_encoder_layers[(- 1)], 'pooled_output': pooled_output, 'all_encoder_layers': all_encoder_layers}
return outputs
def check_bert_model_output(self, result):
self.parent.assertListEqual([size for layer in result['all_encoder_layers'] for size in layer.size()], ([self.batch_size, self.seq_length, self.hidden_size] * self.num_hidden_layers))
self.parent.assertListEqual(list(result['sequence_output'].size()), [self.batch_size, self.seq_length, self.hidden_size])
self.parent.assertListEqual(list(result['pooled_output'].size()), [self.batch_size, self.hidden_size])
def create_bert_for_masked_lm(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
model = BertForMaskedLM(config=config)
model.eval()
loss = model(input_ids, token_type_ids, input_mask, token_labels)
prediction_scores = model(input_ids, token_type_ids, input_mask)
outputs = {'loss': loss, 'prediction_scores': prediction_scores}
return outputs
def check_bert_for_masked_lm_output(self, result):
self.parent.assertListEqual(list(result['prediction_scores'].size()), [self.batch_size, self.seq_length, self.vocab_size])
def create_bert_for_next_sequence_prediction(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
model = BertForNextSentencePrediction(config=config)
model.eval()
loss = model(input_ids, token_type_ids, input_mask, sequence_labels)
seq_relationship_score = model(input_ids, token_type_ids, input_mask)
outputs = {'loss': loss, 'seq_relationship_score': seq_relationship_score}
return outputs
def check_bert_for_next_sequence_prediction_output(self, result):
self.parent.assertListEqual(list(result['seq_relationship_score'].size()), [self.batch_size, 2])
def create_bert_for_pretraining(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
model = BertForPreTraining(config=config)
model.eval()
loss = model(input_ids, token_type_ids, input_mask, token_labels, sequence_labels)
(prediction_scores, seq_relationship_score) = model(input_ids, token_type_ids, input_mask)
outputs = {'loss': loss, 'prediction_scores': prediction_scores, 'seq_relationship_score': seq_relationship_score}
return outputs
def check_bert_for_pretraining_output(self, result):
self.parent.assertListEqual(list(result['prediction_scores'].size()), [self.batch_size, self.seq_length, self.vocab_size])
self.parent.assertListEqual(list(result['seq_relationship_score'].size()), [self.batch_size, 2])
def create_bert_for_question_answering(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
model = BertForQuestionAnswering(config=config)
model.eval()
loss = model(input_ids, token_type_ids, input_mask, sequence_labels, sequence_labels)
(start_logits, end_logits) = model(input_ids, token_type_ids, input_mask)
outputs = {'loss': loss, 'start_logits': start_logits, 'end_logits': end_logits}
return outputs
def check_bert_for_question_answering_output(self, result):
self.parent.assertListEqual(list(result['start_logits'].size()), [self.batch_size, self.seq_length])
self.parent.assertListEqual(list(result['end_logits'].size()), [self.batch_size, self.seq_length])
def create_bert_for_sequence_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
model = BertForSequenceClassification(config=config, num_labels=self.num_labels)
model.eval()
loss = model(input_ids, token_type_ids, input_mask, sequence_labels)
logits = model(input_ids, token_type_ids, input_mask)
outputs = {'loss': loss, 'logits': logits}
return outputs
def check_bert_for_sequence_classification_output(self, result):
self.parent.assertListEqual(list(result['logits'].size()), [self.batch_size, self.num_labels])
def create_bert_for_token_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
model = BertForTokenClassification(config=config, num_labels=self.num_labels)
model.eval()
loss = model(input_ids, token_type_ids, input_mask, token_labels)
logits = model(input_ids, token_type_ids, input_mask)
outputs = {'loss': loss, 'logits': logits}
return outputs
def check_bert_for_token_classification_output(self, result):
self.parent.assertListEqual(list(result['logits'].size()), [self.batch_size, self.seq_length, self.num_labels])
def test_default(self):
self.run_tester(BertModelTest.BertModelTester(self))
def test_config_to_json_string(self):
config = BertConfig(vocab_size_or_config_json_file=99, hidden_size=37)
obj = json.loads(config.to_json_string())
self.assertEqual(obj['vocab_size'], 99)
self.assertEqual(obj['hidden_size'], 37)
def run_tester(self, tester):
config_and_inputs = tester.prepare_config_and_inputs()
output_result = tester.create_bert_model(*config_and_inputs)
tester.check_bert_model_output(output_result)
output_result = tester.create_bert_for_masked_lm(*config_and_inputs)
tester.check_bert_for_masked_lm_output(output_result)
tester.check_loss_output(output_result)
output_result = tester.create_bert_for_next_sequence_prediction(*config_and_inputs)
tester.check_bert_for_next_sequence_prediction_output(output_result)
tester.check_loss_output(output_result)
output_result = tester.create_bert_for_pretraining(*config_and_inputs)
tester.check_bert_for_pretraining_output(output_result)
tester.check_loss_output(output_result)
output_result = tester.create_bert_for_question_answering(*config_and_inputs)
tester.check_bert_for_question_answering_output(output_result)
tester.check_loss_output(output_result)
output_result = tester.create_bert_for_sequence_classification(*config_and_inputs)
tester.check_bert_for_sequence_classification_output(output_result)
tester.check_loss_output(output_result)
output_result = tester.create_bert_for_token_classification(*config_and_inputs)
tester.check_bert_for_token_classification_output(output_result)
tester.check_loss_output(output_result)
def ids_tensor(cls, shape, vocab_size, rng=None, name=None):
if (rng is None):
rng = random.Random()
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.randint(0, (vocab_size - 1)))
return torch.tensor(data=values, dtype=torch.long).view(shape).contiguous() |
def load_latest_parameters(folder):
yaml_file = get_latest_parameter_file(folder)
logging.info('using {}'.format(yaml_file))
param = load_from_yaml_file(yaml_file)
return param |
_torch
class ScheduleInitTest(unittest.TestCase):
m = (torch.nn.Linear(50, 50) if is_torch_available() else None)
optimizer = (AdamW(m.parameters(), lr=10.0) if is_torch_available() else None)
num_steps = 10
def assertListAlmostEqual(self, list1, list2, tol):
self.assertEqual(len(list1), len(list2))
for (a, b) in zip(list1, list2):
self.assertAlmostEqual(a, b, delta=tol)
def test_constant_scheduler(self):
scheduler = get_constant_schedule(self.optimizer)
lrs = unwrap_schedule(scheduler, self.num_steps)
expected_learning_rates = ([10.0] * self.num_steps)
self.assertEqual(len(lrs[0]), 1)
self.assertListEqual([l[0] for l in lrs], expected_learning_rates)
scheduler = get_constant_schedule(self.optimizer)
lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps)
self.assertListEqual([l[0] for l in lrs], [l[0] for l in lrs_2])
def test_warmup_constant_scheduler(self):
scheduler = get_constant_schedule_with_warmup(self.optimizer, num_warmup_steps=4)
lrs = unwrap_schedule(scheduler, self.num_steps)
expected_learning_rates = [2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0]
self.assertEqual(len(lrs[0]), 1)
self.assertListEqual([l[0] for l in lrs], expected_learning_rates)
scheduler = get_constant_schedule_with_warmup(self.optimizer, num_warmup_steps=4)
lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps)
self.assertListEqual([l[0] for l in lrs], [l[0] for l in lrs_2])
def test_warmup_linear_scheduler(self):
scheduler = get_linear_schedule_with_warmup(self.optimizer, num_warmup_steps=2, num_training_steps=10)
lrs = unwrap_schedule(scheduler, self.num_steps)
expected_learning_rates = [5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25, 0.0]
self.assertEqual(len(lrs[0]), 1)
self.assertListEqual([l[0] for l in lrs], expected_learning_rates)
scheduler = get_linear_schedule_with_warmup(self.optimizer, num_warmup_steps=2, num_training_steps=10)
lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps)
self.assertListEqual([l[0] for l in lrs], [l[0] for l in lrs_2])
def test_warmup_cosine_scheduler(self):
scheduler = get_cosine_schedule_with_warmup(self.optimizer, num_warmup_steps=2, num_training_steps=10)
lrs = unwrap_schedule(scheduler, self.num_steps)
expected_learning_rates = [5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38, 0.0]
self.assertEqual(len(lrs[0]), 1)
self.assertListAlmostEqual([l[0] for l in lrs], expected_learning_rates, tol=0.01)
scheduler = get_cosine_schedule_with_warmup(self.optimizer, num_warmup_steps=2, num_training_steps=10)
lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps)
self.assertListEqual([l[0] for l in lrs], [l[0] for l in lrs_2])
def test_warmup_cosine_hard_restart_scheduler(self):
scheduler = get_cosine_with_hard_restarts_schedule_with_warmup(self.optimizer, num_warmup_steps=2, num_cycles=2, num_training_steps=10)
lrs = unwrap_schedule(scheduler, self.num_steps)
expected_learning_rates = [5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46, 0.0]
self.assertEqual(len(lrs[0]), 1)
self.assertListAlmostEqual([l[0] for l in lrs], expected_learning_rates, tol=0.01)
scheduler = get_cosine_with_hard_restarts_schedule_with_warmup(self.optimizer, num_warmup_steps=2, num_cycles=2, num_training_steps=10)
lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps)
self.assertListEqual([l[0] for l in lrs], [l[0] for l in lrs_2]) |
def train(args):
global local_rank
data_module = GPTDataModule(data_dir=args.data_dir, batch_size=args.batch_size, block_size=args.block_size)
model = Nanogpt(args, ctx=None)
checkpoint_callback = pl.callbacks.ModelCheckpoint(save_top_k=1, verbose=True, every_n_train_steps=1000, monitor='train_loss', mode='min', save_last=True)
callbacks = [UpdateDataStepCallback(), checkpoint_callback]
trainer = pl.Trainer(max_steps=args.max_iters, accumulate_grad_batches=args.gradient_accumulation_steps, callbacks=callbacks)
trainer.fit(model, data_module) |
def voxelized_pointcloud(model, kdtree, res):
occupancies = np.zeros((res ** 3), dtype=np.int8)
(_, idx) = kdtree.query(model)
occupancies[idx] = 1
compressed_occupancies = np.packbits(occupancies)
return compressed_occupancies |
class RemoteNXDOManagerClient(NXDOManager):
def __init__(self, n_players, port=4545, remote_server_host='127.0.0.1'):
self._stub = NXDOManagerStub(channel=grpc.insecure_channel(target=f'{remote_server_host}:{port}', options=[('grpc.max_send_message_length', GRPC_MAX_MESSAGE_LENGTH), ('grpc.max_receive_message_length', GRPC_MAX_MESSAGE_LENGTH)]))
self._n_players = n_players
def n_players(self) -> int:
return self._n_players
def get_log_dir(self) -> str:
return self._stub.GetLogDir(Empty()).string
def get_manager_metadata(self) -> dict:
response: NXDOMetadata = self._stub.GetManagerMetaData(Empty())
return json.loads(response.json_metadata)
def claim_new_active_policy_for_player(self, player) -> Union[(Tuple[(Dict[(int, StrategySpec)], Dict[(int, List[StrategySpec])], int)], Tuple[(None, None, None)])]:
request = NXDOPlayer(player=player)
response: NXDONewBestResponseParams = self._stub.ClaimNewActivePolicyForPlayer(request)
if (response.policy_num == (- 1)):
return (None, None, None)
assert (len(response.metanash_specs_for_players.policy_spec_list) in [self.n_players(), 0])
assert (len(response.delegate_specs_for_players) in [self.n_players(), 0])
metanash_json_specs_for_other_players = [elem.policy_spec_json for elem in response.metanash_specs_for_players.policy_spec_list]
metanash_specs_for_players = {player: StrategySpec.from_json(json_spec) for (player, json_spec) in enumerate(metanash_json_specs_for_other_players)}
delegate_json_spec_lists_for_other_players = [[elem.policy_spec_json for elem in player_delegate_list.policy_spec_list] for player_delegate_list in response.delegate_specs_for_players]
delegate_specs_for_players = {player: [StrategySpec.from_json(json_spec) for json_spec in player_delegate_json_list] for (player, player_delegate_json_list) in enumerate(delegate_json_spec_lists_for_other_players)}
if (len(metanash_specs_for_players) == 0):
metanash_specs_for_players = None
if (len(delegate_specs_for_players) == 0):
delegate_specs_for_players = None
return (metanash_specs_for_players, delegate_specs_for_players, response.policy_num)
def submit_final_br_policy(self, player, policy_num, metadata_dict):
try:
metadata_json = json.dumps(obj=metadata_dict)
except (TypeError, OverflowError) as json_err:
raise ValueError(f'''metadata_dict must be JSON serializable.When attempting to serialize, got this error:
{json_err}''')
request = NXDOPolicyMetadataRequest(player=player, policy_num=policy_num, metadata_json=metadata_json)
self._stub.SubmitFinalBRPolicy(request)
def is_policy_fixed(self, player, policy_num):
response: NXDOConfirmation = self._stub.IsPolicyFixed(NXDOPlayerAndPolicyNum(player=player, policy_num=policy_num))
return response.result |
def _make_model(args, device):
logger.info(f'Using {args.model_type} Model ......')
logger.info(f'Use {args.smooth_loss} Smooth Loss')
logger.info(f'Use {args.smooth_mask} Smooth Mask')
if (args.model_type == 'mask_raft'):
model = mask_RAFT(args.smooth_loss, args.smooth_mask, args.semantic_loss, args.seq_gamma).to(device)
logger.info(f'seq_gamma : {args.seq_gamma}')
model_parameters = filter((lambda p: p.requires_grad), model.parameters())
n_params = sum([p.numel() for p in model_parameters])
logger.info('Model Setting ...')
logger.info(f'Number of params: {n_params}')
if (args.pretrained_model is not None):
logger.info(f'Load Pretrained model from {args.pretrained_model}')
model.load_state_dict(torch.load(args.pretrained_model)['modelstate'])
if (args.sync_bn and (args.local_rank != (- 1))):
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
logger.info('Using SyncBatchNorm')
if (not args.cpu):
if ((args.local_rank == (- 1)) and (torch.cuda.device_count() > 1)):
logger.warning('For multi gpus, you will use DataParallel. To spped up, you can try to use torch.distributed.launch for distribution')
model = torch.nn.DataParallel(model)
elif (args.local_rank != (- 1)):
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=[args.local_rank])
return model |
def query_task_status(task_id, db_path):
res = None
if os.path.isfile(db_path):
conn = sqlite3.connect(db_path)
cursor = conn.cursor()
cursor.execute('select status, result, q_model_path from task where id=?', (task_id,))
res = cursor.fetchone()
cursor.close()
conn.close()
return {'status': res[0], 'optimized_result': dict_to_str((deserialize(res[1]) if res[1] else res[1])), 'result_path': res[2]} |
def _setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True |
def view_optimized_epoch_diff_of_models(acc_threshold):
model_diffs = []
for model in model_names:
output_dict = analyze_hp_grid_data(model=model, acc_threshold=acc_threshold)
epoch_diff = (output_dict['avg_std_epoch'] - output_dict['avg_new_epoch'])
model_diffs.append(epoch_diff)
plt.bar(model_names, model_diffs)
plt.show() |
def dynpEnsemble(cost, data, num_agg_func, true_cp=true_cp):
predicted_cp = []
for dataset in data:
stsc = StandardScaler()
signal = stsc.fit_transform(dataset)
algo = rpt.DynpEnsembling(custom_cost=cost, jump=1, ensembling=num_agg_func)
single_predicted_cp = pd.Series(data=0, index=true_cp[0].index)
try:
algo.fit(signal)
my_bkps = algo.predict(n_bkps=1)
single_predicted_cp[single_predicted_cp.index[my_bkps[:(- 1)]]] = 1
predicted_cp.append(single_predicted_cp)
except:
predicted_cp.append(single_predicted_cp)
nab = evaluating_change_point(true_cp, predicted_cp, metric='nab', numenta_time='288 min')
return nab |
def GetCommandOutput(command):
f = os.popen(command, 'r')
lines = [line.strip() for line in f.readlines()]
f.close()
return lines |
class KGProcessor(DataProcessor):
def __init__(self, data_args, tokenizer, is_world_master, must_load=False):
self.data_dir = data_args.data_dir
self.data_split = data_args.data_split
self.rank = data_args.rank
self.num_split = data_args.num_split
self.no_mid = data_args.no_mid
self.tokenizer = tokenizer
self.is_world_master = is_world_master
self.only_corrupt_entity = data_args.only_corrupt_entity
self.vocab_size = len(tokenizer)
self.max_seq_length = data_args.max_seq_length
self.no_text = data_args.no_text
self.text_sep_token = data_args.text_sep_token
if self.no_text:
self.max_seq_length = 5
self.data_cache_dir = (data_args.data_cache_dir if (data_args.data_cache_dir is not None) else data_args.data_dir)
os.makedirs(self.data_cache_dir, exist_ok=True)
self.num_neg = data_args.num_neg
self.must_load = must_load
if must_load:
self.check_all_data_saved()
self.build_ent()
self.build_rel()
self.ent_size = len(self.ent_list)
self.rel_size = len(self.rel_list)
self.name2id = {e: (i + self.vocab_size) for (i, e) in enumerate(self.ent_list)}
self.id2name = {(i + self.vocab_size): e for (i, e) in enumerate(self.ent_list)}
self.name2id.update({r: ((i + self.vocab_size) + self.ent_size) for (i, r) in enumerate(self.rel_list)})
self.id2name.update({((i + self.vocab_size) + self.ent_size): r for (i, r) in enumerate(self.rel_list)})
assert (len(self.name2id) == len(self.id2name) == (self.ent_size + self.rel_size))
if data_args.type_constrain:
self.build_type_constrain()
self.train_file = data_args.train_file
def check_file_exists(self, file_path):
assert os.path.exists(file_path), f"expected to load data from {file_path} but it doesn't exist, please run generate_data.py (with the same args and --do_train, --do_eval, --do_predict) first"
def check_all_data_saved(self):
ent_cache_file = os.path.join(self.data_cache_dir, 'entity.pt')
rel_cache_file = os.path.join(self.data_cache_dir, 'relation.pt')
self.check_file_exists(ent_cache_file)
self.check_file_exists(rel_cache_file)
train_data_file = f'train_dataset_{self.num_neg}_{self.max_seq_length}_{self.text_sep_token}.pt'
dev_data_file = f'dev_dataset_{self.num_neg}_{self.max_seq_length}_{self.text_sep_token}.pt'
test_data_file = f'test_dataset_{self.num_neg}_{self.max_seq_length}_{self.text_sep_token}.pt'
train_data_file = os.path.join(self.data_cache_dir, train_data_file)
dev_data_file = os.path.join(self.data_cache_dir, dev_data_file)
test_data_file = os.path.join(self.data_cache_dir, test_data_file)
self.check_file_exists(train_data_file)
self.check_file_exists(dev_data_file)
self.check_file_exists(test_data_file)
def get_train_examples(self, epoch, train_file):
data_dir = self.data_dir
cached_example_path = os.path.join(self.data_cache_dir, f'cached_train_examples_neg{self.num_neg}_epoch{epoch}')
os.makedirs(cached_example_path, exist_ok=True)
(examples, features) = self._create_examples_and_features(os.path.join(data_dir, train_file), cached_example_path, self.num_neg)
return (examples, features)
def get_dev_examples(self):
data_dir = self.data_dir
cached_example_path = os.path.join(self.data_cache_dir, f'cached_dev_examples_{self.num_neg}')
os.makedirs(cached_example_path, exist_ok=True)
(examples, features) = self._create_examples_and_features(os.path.join(data_dir, 'dev.tsv'), cached_example_path, self.num_neg)
return (examples, features)
def get_test_examples(self):
data_dir = self.data_dir
cached_example_path = os.path.join(self.data_cache_dir, 'cached_test_examples')
os.makedirs(cached_example_path, exist_ok=True)
if self.data_split:
(examples, features) = self._create_examples_and_features(os.path.join(data_dir, f'test-p{(self.rank + 1)}-of-{self.num_split}.tsv'), cached_example_path)
else:
(examples, features) = self._create_examples_and_features(os.path.join(data_dir, 'test.tsv'), cached_example_path)
return (examples, features)
def build_ent(self):
ent_cache_file = os.path.join(self.data_cache_dir, 'entity.pt')
if os.path.exists(ent_cache_file):
logger.info('loading entity data from {}'.format(ent_cache_file))
(self.ent2text, self.ent2tokens) = torch.load(ent_cache_file)
else:
logger.info('building entity data')
self.ent2text = {}
self.ent2tokens = {}
with open(os.path.join(self.data_dir, 'entity2text.txt'), 'r') as f:
ent_lines = f.readlines()
for line in tqdm(ent_lines, disable=(not self.is_world_master)):
tmp = line.strip().split('\t')
if (len(tmp) == 2):
self.ent2text[tmp[0]] = tmp[1]
self.ent2tokens[tmp[0]] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(tmp[1]))
if (self.data_dir.find('FB15') != (- 1)):
with open(os.path.join(self.data_dir, 'entity2textlong.txt'), 'r') as f:
ent_lines = f.readlines()
for line in tqdm(ent_lines, disable=(not self.is_world_master)):
tmp = line.strip().split('\t')
self.ent2text[tmp[0]] = tmp[1]
self.ent2tokens[tmp[0]] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(tmp[1]))
logger.info('saving entity data to {}'.format(ent_cache_file))
if self.is_world_master:
torch.save((self.ent2text, self.ent2tokens), ent_cache_file)
self.ent_list = sorted(self.ent2text.keys())
def build_rel(self):
rel_cache_file = os.path.join(self.data_cache_dir, 'relation.pt')
if os.path.exists(rel_cache_file):
logger.info('loading relation data from {}'.format(rel_cache_file))
(self.rel2text, self.rel2tokens) = torch.load(rel_cache_file)
else:
logger.info('building relation data')
self.rel2text = {}
self.rel2tokens = {}
with open(os.path.join(self.data_dir, 'relation2text.txt'), 'r') as f:
rel_lines = f.readlines()
for line in tqdm(rel_lines, disable=(not self.is_world_master)):
temp = line.strip().split('\t')
self.rel2text[temp[0]] = temp[1]
self.rel2tokens[temp[0]] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(temp[1]))
logger.info('saving relation data to {}'.format(rel_cache_file))
if self.is_world_master:
torch.save((self.rel2text, self.rel2tokens), rel_cache_file)
self.rel_list = sorted(self.rel2text.keys())
def build_type_constrain(self):
KE_id2ent = {}
with open(os.path.join(self.data_dir, 'entity2id.txt'), 'r') as f:
lines = f.readlines()
for line in lines[1:]:
(emid, ent_id) = line.strip().split('\t')
KE_id2ent[ent_id] = emid
KE_id2rel = {}
with open(os.path.join(self.data_dir, 'relation2id.txt'), 'r') as f:
lines = f.readlines()
for line in lines[1:]:
(rmid, rel_id) = line.strip().split('\t')
KE_id2rel[rel_id] = rmid
with open(os.path.join(self.data_dir, 'type_constrain.txt'), 'r') as f:
lines = f.readlines()
(self.rel2valid_head, self.rel2valid_tail) = ({}, {})
for (num_line, line) in enumerate(lines[1:]):
line = line.strip().split('\t')
relation = KE_id2rel[line[0]]
ents = [KE_id2ent[ent] for ent in line[2:]]
assert (len(ents) == int(line[1]))
if ((num_line % 2) == 0):
self.rel2valid_head[relation] = ents
else:
self.rel2valid_tail[relation] = ents
def get_name2id(self):
return self.name2id
def get_id2name(self):
return self.id2name
def get_ent2text(self):
return self.ent2text
def get_rel2text(self):
return self.rel2text
def get_labels(self):
return ['0', '1']
def get_entities(self):
return self.ent_list
def get_relations(self):
return self.rel_list
def get_train_triples(self, train_file):
return self._read_tsv(os.path.join(self.data_dir, train_file))
def get_dev_triples(self, return_label=False):
return self._read_tsv(os.path.join(self.data_dir, 'dev.tsv'), return_label=return_label)
def get_test_triples(self, return_label=False):
if self.data_split:
return self._read_tsv(os.path.join(self.data_dir, f'test-p{(self.rank + 1)}-of-{self.num_split}.tsv'), return_label=return_label)
else:
return self._read_tsv(os.path.join(self.data_dir, 'test.tsv'), return_label=return_label)
def create_examples(self, lines, num_corr, print_info=True):
if isinstance(lines, str):
lines = self._read_tsv(lines)
ent2text = self.ent2text
entities = self.ent_list
rel2text = self.rel2text
relations = self.rel_list
lines_str_set = set(['\t'.join(line) for line in lines])
examples = []
for (i, line) in enumerate(tqdm(lines, disable=((not self.is_world_master) or (not print_info)))):
(head, rel, tail) = line
head_ent_text = ent2text[head]
tail_ent_text = ent2text[tail]
relation_text = rel2text[rel]
guid = [i, 0, 0]
text_a = head_ent_text
text_b = relation_text
text_c = tail_ent_text
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, text_c=text_c, head=head, rel=rel, tail=tail, label='1'))
if (num_corr == 0):
continue
if self.only_corrupt_entity:
assert (num_corr == 1), 'should use only 1 negative sample when only corrupt entity'
rnd = random.random()
if ((not self.only_corrupt_entity) or (rnd <= 0.5)):
guid = [i, 1]
for j in range(num_corr):
while True:
tmp_head = random.choice(self.ent_list)
tmp_triple_str = ((((tmp_head + '\t') + rel) + '\t') + tail)
if (tmp_triple_str not in lines_str_set):
break
tmp_head_text = ent2text[tmp_head]
examples.append(InputExample(guid=(guid + [j]), text_a=tmp_head_text, text_b=text_b, text_c=text_c, head=tmp_head, rel=rel, tail=tail, label='0'))
if (not self.only_corrupt_entity):
guid = [i, 2]
for j in range(num_corr):
while True:
tmp_rel = random.choice(self.rel_list)
tmp_triple_str = ((((head + '\t') + tmp_rel) + '\t') + tail)
if (tmp_triple_str not in lines_str_set):
break
tmp_rel_text = rel2text[tmp_rel]
examples.append(InputExample(guid=(guid + [j]), text_a=text_a, text_b=tmp_rel_text, text_c=text_c, head=head, rel=tmp_rel, tail=tail, label='0'))
if ((not self.only_corrupt_entity) or (rnd > 0.5)):
guid = [i, 3]
for j in range(num_corr):
while True:
tmp_tail = random.choice(self.ent_list)
tmp_triple_str = ((((head + '\t') + rel) + '\t') + tmp_tail)
if (tmp_triple_str not in lines_str_set):
break
tmp_tail_text = ent2text[tmp_tail]
examples.append(InputExample(guid=(guid + [j]), text_a=text_a, text_b=text_b, text_c=tmp_tail_text, head=head, rel=rel, tail=tmp_tail, label='0'))
return examples
def _create_examples_and_features(self, lines, cache_path=None, num_corr=0):
if (cache_path is None):
examples = self.create_examples(lines, num_corr, print_info=False)
features = self.convert_examples_to_features(examples, print_info=False)
return (examples, features)
cache_example_file = os.path.join(cache_path, 'example.pt')
if self.no_text:
cache_feature_file = os.path.join(cache_path, 'feature_notext.pt')
else:
cache_feature_file = os.path.join(cache_path, f'feature_{self.max_seq_length}_{self.text_sep_token}.pt')
if os.path.exists(cache_example_file):
logger.info('loading examples from {}'.format(cache_example_file))
if self.must_load:
examples = None
else:
examples = torch.load(cache_example_file)
logger.info('load examples done')
else:
examples = self.create_examples(lines, num_corr)
logger.info('saving examples to {}'.format(cache_example_file))
if self.is_world_master:
torch.save(examples, cache_example_file)
logger.info('save examples done')
if os.path.exists(cache_feature_file):
logger.info('loading features from {}'.format(cache_feature_file))
features = torch.load(cache_feature_file)
logger.info('load features done')
else:
features = self.convert_examples_to_features(examples)
logger.info('saving features to {}'.format(cache_feature_file))
if self.is_world_master:
torch.save(features, cache_feature_file)
logger.info('save features done')
return (examples, features)
def tokenize(self, example):
tokens_a = copy.deepcopy(self.ent2tokens[example.head])
tokens_b = copy.deepcopy(self.rel2tokens[example.rel])
tokens_c = copy.deepcopy(self.ent2tokens[example.tail])
return (tokens_a, tokens_b, tokens_c)
def convert_examples_to_features(self, examples, print_info=True):
label_list = self.get_labels()
max_seq_length = self.max_seq_length
tokenizer = self.tokenizer
name2id = self.name2id
no_text = self.no_text
label_map = {label: i for (i, label) in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(tqdm(examples, disable=((not self.is_world_master) or (not print_info)))):
corrupted_part = (example.guid[1] - 1)
head_id = name2id[example.head]
tail_id = name2id[example.tail]
rel_id = name2id[example.rel]
SEP_id = tokenizer.sep_token_id
CLS_id = tokenizer.cls_token_id
if self.text_sep_token:
SPACE_id = tokenizer.convert_tokens_to_ids(self.text_sep_token)
else:
SPACE_id = None
if self.no_mid:
triplet_ids = []
else:
triplet_ids = [head_id, rel_id, tail_id, SEP_id]
pos_indicator = (0, 0, 0, 0, 0, 0)
if no_text:
input_ids = ([CLS_id] + triplet_ids)
else:
(tokens_a, tokens_b, tokens_c) = self.tokenize(example)
if SPACE_id:
if self.no_mid:
_truncate_seq_triple(tokens_a, tokens_b, tokens_c, (max_seq_length - 4))
elif tokenizer.__class__.__name__.startswith('Roberta'):
_truncate_seq_triple(tokens_a, tokens_b, tokens_c, (max_seq_length - 9))
elif tokenizer.__class__.__name__.startswith('Bert'):
_truncate_seq_triple(tokens_a, tokens_b, tokens_c, (max_seq_length - 8))
else:
raise NotImplementedError()
input_ids = (((((([CLS_id] + tokens_a) + [SPACE_id]) + tokens_b) + [SPACE_id]) + tokens_c) + [SEP_id])
pos_indicator = (0, (1 + len(tokens_a)), (1 + len(tokens_a)), ((2 + len(tokens_a)) + len(tokens_b)), ((2 + len(tokens_a)) + len(tokens_b)), (((3 + len(tokens_a)) + len(tokens_b)) + len(tokens_c)))
else:
if self.no_mid:
_truncate_seq_triple(tokens_a, tokens_b, tokens_c, (max_seq_length - 2))
elif tokenizer.__class__.__name__.startswith('Roberta'):
_truncate_seq_triple(tokens_a, tokens_b, tokens_c, (max_seq_length - 7))
elif tokenizer.__class__.__name__.startswith('Bert'):
_truncate_seq_triple(tokens_a, tokens_b, tokens_c, (max_seq_length - 6))
else:
raise NotImplementedError()
input_ids = (((([CLS_id] + tokens_a) + tokens_b) + tokens_c) + [SEP_id])
pos_indicator = (0, (1 + len(tokens_a)), len(tokens_a), ((1 + len(tokens_a)) + len(tokens_b)), (len(tokens_a) + len(tokens_b)), (((1 + len(tokens_a)) + len(tokens_b)) + len(tokens_c)))
if (not self.no_mid):
if tokenizer.__class__.__name__.startswith('Roberta'):
input_ids += [SEP_id]
input_ids += triplet_ids
label_id = label_map[example.label]
input_ids += ([0] * (max_seq_length - len(input_ids)))
if ((ex_index < 5) and print_info):
logger.info('*** Example ***')
logger.info(('guid: %s' % example.guid))
logger.info(('input_ids: %s' % ' '.join([str(x) for x in input_ids])))
logger.info(('label: %s (id = %d)' % (example.label, label_id)))
features.append(InputFeatures(input_ids=input_ids, label_id=label_id, pos_indicator=pos_indicator, corrupted_part=corrupted_part))
return features
def get_dataset(self, args):
(train_dataset, eval_dataset, predict_dataset) = (None, None, None)
train_data_file = f'train_dataset_{self.num_neg}_{self.max_seq_length}_{self.text_sep_token}.pt'
dev_data_file = f'dev_dataset_{self.num_neg}_{self.max_seq_length}_{self.text_sep_token}.pt'
test_data_file = f'test_dataset_{self.num_neg}_{self.max_seq_length}_{self.text_sep_token}.pt'
train_data_file = os.path.join(self.data_cache_dir, train_data_file)
dev_data_file = os.path.join(self.data_cache_dir, dev_data_file)
test_data_file = os.path.join(self.data_cache_dir, test_data_file)
if args.do_train:
if os.path.exists(train_data_file):
logger.info(f'loading train dataset from{train_data_file}')
train_dataset = torch.load(train_data_file)
logger.info('loading done')
else:
train_dataset = []
for epoch in range((int(args.num_train_epochs) + 1)):
logger.info(f'getting train features epoch {epoch}')
(train_examples, train_features) = self.get_train_examples(epoch, self.train_file)
logger.info(f'building train tensors epoch {epoch}')
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
all_pos_indicator = torch.tensor([f.pos_indicator for f in train_features], dtype=torch.long)
all_corrupted_part = torch.tensor([f.corrupted_part for f in train_features], dtype=torch.long)
logger.info(f'buiding train dataset epoch {epoch}')
train_dataset.append(DictDataset(input_ids=all_input_ids, labels=all_label_ids, pos_indicator=all_pos_indicator, corrupted_part=all_corrupted_part))
train_dataset = AlternateDataset(*train_dataset)
if self.is_world_master:
torch.save(train_dataset, train_data_file)
logger.info('build done')
if args.do_eval:
if os.path.exists(dev_data_file):
eval_dataset = torch.load(dev_data_file)
else:
(eval_examples, eval_features) = self.get_dev_examples()
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
all_pos_indicator = torch.tensor([f.pos_indicator for f in eval_features], dtype=torch.long)
eval_dataset = DictDataset(input_ids=all_input_ids, labels=all_label_ids, pos_indicator=all_pos_indicator)
if self.is_world_master:
torch.save(eval_dataset, dev_data_file)
if args.do_predict:
if os.path.exists(test_data_file):
predict_dataset = torch.load(test_data_file)
else:
(eval_examples, eval_features) = self.get_test_examples()
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
all_pos_indicator = torch.tensor([f.pos_indicator for f in eval_features], dtype=torch.long)
predict_dataset = DictDataset(input_ids=all_input_ids, labels=all_label_ids, pos_indicator=all_pos_indicator)
if self.is_world_master:
torch.save(predict_dataset, test_data_file)
return (train_dataset, eval_dataset, predict_dataset) |
def calc_mean_invstddev(feature):
if (len(feature.size()) != 2):
raise ValueError('We expect the input feature to be 2-D tensor')
mean = feature.mean(0)
var = feature.var(0)
eps = 1e-08
if (var < eps).any():
return (mean, (1.0 / (torch.sqrt(var) + eps)))
return (mean, (1.0 / torch.sqrt(var))) |
def get_top_level_modules(num_levels=1):
mod_dir = Path(import_mod('fastai').__file__).parent
filtered_n = filter((lambda x: (x.count('.') <= num_levels)), get_module_names(mod_dir))
return sorted(filtered_n, key=(lambda s: s.count('.')), reverse=True) |
def data_prep(data_path, dataset='MNIST', size=10000):
if (dataset == 'MNIST'):
X = np.load((data_path + '/mnist_images.npy'), allow_pickle=True).reshape(70000, (28 * 28))
labels = np.load((data_path + '/mnist_labels.npy'), allow_pickle=True)
elif (dataset == 'FMNIST'):
X = np.load((data_path + '/fmnist_images.npy'), allow_pickle=True).reshape(70000, (28 * 28))
labels = np.load((data_path + '/fmnist_labels.npy'), allow_pickle=True)
elif (dataset == 'coil_20'):
X = np.load((data_path + '/coil_20.npy'), allow_pickle=True).reshape(1440, (128 * 128))
labels = np.load((data_path + '/coil_20_labels.npy'), allow_pickle=True)
elif (dataset == 'coil_100'):
X = np.load((data_path + '/coil_100.npy'), allow_pickle=True).reshape(7200, (- 1))
labels = np.load((data_path + '/usr/xtmp/hyhuang/MNIST/coil_100_labels.npy'), allow_pickle=True)
elif (dataset == 'mammoth'):
with open((data_path + '/mammoth_3d.json'), 'r') as f:
X = json.load(f)
X = np.array(X)
with open((data_path + '/mammoth_umap.json'), 'r') as f:
labels = json.load(f)
labels = labels['labels']
labels = np.array(labels)
elif (dataset == 'mammoth_50k'):
with open((data_path + '/mammoth_3d_50k.json'), 'r') as f:
X = json.load(f)
X = np.array(X)
labels = np.zeros(10)
elif (dataset == 'Flow_cytometry'):
X = FlowCal.io.FCSData((data_path + '/11-12-15_314.fcs'))
labels = np.zeros(10)
elif (dataset == 'Mouse_scRNA'):
data = pd.read_csv((data_path + '/GSE93374_Merged_all_020816_BatchCorrected_LNtransformed_doubletsremoved_Data.txt'), sep='\t')
X = data.to_numpy()
labels = pd.read_csv((data_path + '/GSE93374_cell_metadata.txt'), sep='\t')
elif (dataset == 'swiss_roll'):
(X, labels) = make_swiss_roll(n_samples=size, random_state=)
elif (dataset == 's_curve'):
(X, labels) = make_s_curve(n_samples=size, random_state=)
elif (dataset == 's_curve_hole'):
(X, labels) = make_s_curve(n_samples=size, random_state=)
anchor = np.array([0, 1, 0])
indices = (np.sum(np.square((X - anchor)), axis=1) > 0.3)
(X, labels) = (X[indices], labels[indices])
elif (dataset == 'swiss_roll_hole'):
(X, labels) = make_swiss_roll(n_samples=size, random_state=)
anchor = np.array([(- 10), 10, 0])
indices = (np.sum(np.square((X - anchor)), axis=1) > 20)
(X, labels) = (X[indices], labels[indices])
elif (dataset == 'kddcup99'):
X = np.load((data_path + '/KDDcup99_float.npy'), allow_pickle=True)
labels = np.load((data_path + '/KDDcup99_labels_int.npy'), allow_pickle=True)
elif (dataset == '20NG'):
X = np.load((data_path + '/20NG.npy'), allow_pickle=True)
labels = np.load((data_path + '/20NG_labels.npy'), allow_pickle=True)
elif (dataset == 'USPS'):
X = np.load((data_path + '/USPS.npy'), allow_pickle=True)
labels = np.load((data_path + '/USPS_labels.npy'), allow_pickle=True)
elif (dataset == 'cifar10'):
X = np.load((data_path + '/cifar10_imgs.npy'), allow_pickle=True)
labels = np.load('/cifar10_labels.npy', allow_pickle=True)
elif (dataset == 'cifar100'):
X = np.load((data_path + '/cifar100_imgs.npy'), allow_pickle=True)
labels = np.load('/cifar100_labels.npy', allow_pickle=True)
else:
print('Unsupported dataset')
assert False
return (X[:size], labels[:size]) |
def get_dict_from_yaml(path):
assert os.path.exists(path), f'{path} must exists!'
import yaml
with open(path, 'r') as f:
opt = yaml.load(f)
return opt |
class CUB200(Dataset):
def __init__(self, root='./', train=True, index_path=None, index=None, base_sess=None):
self.root = os.path.expanduser(root)
self.train = train
self._pre_operate(self.root)
if train:
self.transform = transforms.Compose([transforms.Resize(256), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
if base_sess:
(self.data, self.targets) = self.SelectfromClasses(self.data, self.targets, index)
else:
(self.data, self.targets) = self.SelectfromTxt(self.data2label, index_path)
else:
self.transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
(self.data, self.targets) = self.SelectfromClasses(self.data, self.targets, index)
def text_read(self, file):
with open(file, 'r') as f:
lines = f.readlines()
for (i, line) in enumerate(lines):
lines[i] = line.strip('\n')
return lines
def list2dict(self, list):
dict = {}
for l in list:
s = l.split(' ')
id = int(s[0])
cls = s[1]
if (id not in dict.keys()):
dict[id] = cls
else:
raise EOFError('The same ID can only appear once')
return dict
def _pre_operate(self, root):
image_file = os.path.join(root, 'CUB_200_2011/images.txt')
split_file = os.path.join(root, 'CUB_200_2011/train_test_split.txt')
class_file = os.path.join(root, 'CUB_200_2011/image_class_labels.txt')
id2image = self.list2dict(self.text_read(image_file))
id2train = self.list2dict(self.text_read(split_file))
id2class = self.list2dict(self.text_read(class_file))
train_idx = []
test_idx = []
for k in sorted(id2train.keys()):
if (id2train[k] == '1'):
train_idx.append(k)
else:
test_idx.append(k)
self.data = []
self.targets = []
self.data2label = {}
if self.train:
for k in train_idx:
image_path = os.path.join(root, 'CUB_200_2011/images', id2image[k])
self.data.append(image_path)
self.targets.append((int(id2class[k]) - 1))
self.data2label[image_path] = (int(id2class[k]) - 1)
else:
for k in test_idx:
image_path = os.path.join(root, 'CUB_200_2011/images', id2image[k])
self.data.append(image_path)
self.targets.append((int(id2class[k]) - 1))
self.data2label[image_path] = (int(id2class[k]) - 1)
def SelectfromTxt(self, data2label, index_path):
index = open(index_path).read().splitlines()
data_tmp = []
targets_tmp = []
for i in index:
img_path = os.path.join(self.root, i)
data_tmp.append(img_path)
targets_tmp.append(data2label[img_path])
return (data_tmp, targets_tmp)
def SelectfromClasses(self, data, targets, index):
data_tmp = []
targets_tmp = []
for i in index:
ind_cl = np.where((i == targets))[0]
for j in ind_cl:
data_tmp.append(data[j])
targets_tmp.append(targets[j])
return (data_tmp, targets_tmp)
def __len__(self):
return len(self.data)
def __getitem__(self, i):
(path, targets) = (self.data[i], self.targets[i])
image = self.transform(Image.open(path).convert('RGB'))
return (image, targets) |
def information_process(dataset, windowSize1, windowSize2, perclass, batch_size, iteration, K, add_info, Each_class_acc, margin):
res = []
for i in range(len(np.mean(Each_class_acc, 0))):
str_ = ((str(('%.2f' % np.mean(Each_class_acc, 0)[i])) + '+-') + str(('%.2f' % np.std(Each_class_acc, 0)[i])))
res.append(str_)
infomation = [dataset, 'windowSize1:', windowSize1, 'windowSize2', windowSize2, 'perclass:', perclass, 'margin:', margin, 'iteration:', iteration, 'PCA:', K, 'batch_size:', batch_size, 'oa:', np.mean(add_info, 0)[0], '+-', np.std(add_info, 0)[0], 'kappa:', np.mean(add_info, 0)[1], '+-', np.std(add_info, 0)[1], 'aa:', np.mean(add_info, 0)[2], '+-', np.std(add_info, 0)[2], 'each_acc:', res]
print('oa:', np.mean(add_info, 0)[0], '+-', np.std(add_info, 0)[0], 'kappa:', np.mean(add_info, 0)[1], '+-', np.std(add_info, 0)[1], 'aa:', np.mean(add_info, 0)[2], '+-', np.std(add_info, 0)[2])
csvFile = open('./Final_Experiment.csv', 'a')
writer = csv.writer(csvFile)
writer.writerow(infomation)
csvFile.close() |
def preprocess_gsm8k(path):
train = _preprocess_gsm8k(os.path.join(path, 'train.jsonl'))
test = _preprocess_gsm8k(os.path.join(path, 'test.jsonl'))
print('GSM8K Train: {}'.format(len(train)))
print('GSM8K Test: {}'.format(len(test)))
return (train + test) |
class TimestepDropout(Dropout):
def __init__(self, rate, **kwargs):
super(TimestepDropout, self).__init__(rate, **kwargs)
self.input_spec = InputSpec(ndim=3)
def _get_noise_shape(self, inputs):
input_shape = K.shape(inputs)
noise_shape = (input_shape[0], input_shape[1], 1)
return noise_shape |
class PatchEncoder(layers.Layer):
def __init__(self, num_patches, projection_dim):
super().__init__()
self.num_patches = num_patches
self.projection = layers.Dense(units=projection_dim)
self.position_embedding = layers.Embedding(input_dim=num_patches, output_dim=projection_dim)
def call(self, patch):
positions = tf.range(start=0, limit=self.num_patches, delta=1)
encoded = (self.projection(patch) + self.position_embedding(positions))
return encoded |
def batting_stats_range(start_dt: Optional[str]=None, end_dt: Optional[str]=None) -> pd.DataFrame:
(start_dt_date, end_dt_date) = sanitize_date_range(start_dt, end_dt)
if (start_dt_date.year < 2008):
raise ValueError('Year must be 2008 or later')
if (end_dt_date.year < 2008):
raise ValueError('Year must be 2008 or later')
soup = get_soup(start_dt_date, end_dt_date)
table = get_table(soup)
table = table.dropna(how='all')
for column in ['Age', '#days', 'G', 'PA', 'AB', 'R', 'H', '2B', '3B', 'HR', 'RBI', 'BB', 'IBB', 'SO', 'HBP', 'SH', 'SF', 'GDP', 'SB', 'CS', 'BA', 'OBP', 'SLG', 'OPS', 'mlbID']:
table[column] = pd.to_numeric(table[column])
table = table.drop('', axis=1)
return table |
def check_box_4c_format(input_data):
if isinstance(input_data, np.ndarray):
if ((input_data.ndim > 2) or (input_data.shape[(- 1)] != 10)):
raise TypeError('Given input does not have valid number of attributes. Should be N x 10 for box_4c.')
elif isinstance(input_data, tf.Tensor):
if isinstance(input_data, tf.Tensor):
if (input_data.shape[1] != 10):
raise TypeError('Given input does not have valid number of attributes. Should be N x 10 for box_4c.')
else:
raise TypeError('Given input is not of valid types.(i.e. np.ndarray or tf.Tensor)') |
class Tokenizer():
def __init__(self, tok_func: Callable=SpacyTokenizer, lang: str='en', pre_rules: Optional[ListRules]=None, post_rules: Optional[ListRules]=None, special_cases: Optional[Collection[str]]=None, n_cpus: Optional[int]=None):
(self.tok_func, self.lang, self.special_cases) = (tok_func, lang, special_cases)
self.pre_rules = ifnone(pre_rules, defaults.text_pre_rules)
self.post_rules = ifnone(post_rules, defaults.text_post_rules)
self.special_cases = (special_cases if (special_cases is not None) else defaults.text_spec_tok)
self.n_cpus = ifnone(n_cpus, defaults.cpus)
def __repr__(self) -> str:
res = f'''Tokenizer {self.tok_func.__name__} in {self.lang} with the following rules:
'''
for rule in self.pre_rules:
res += f''' - {rule.__name__}
'''
for rule in self.post_rules:
res += f''' - {rule.__name__}
'''
return res
def process_text(self, t: str, tok: BaseTokenizer) -> List[str]:
for rule in self.pre_rules:
t = rule(t)
toks = tok.tokenizer(t)
for rule in self.post_rules:
toks = rule(toks)
return toks
def _process_all_1(self, texts: Collection[str]) -> List[List[str]]:
tok = self.tok_func(self.lang)
if self.special_cases:
tok.add_special_cases(self.special_cases)
return [self.process_text(str(t), tok) for t in texts]
def process_all(self, texts: Collection[str]) -> List[List[str]]:
if (self.n_cpus <= 1):
return self._process_all_1(texts)
with ProcessPoolExecutor(self.n_cpus) as e:
return sum(e.map(self._process_all_1, partition_by_cores(texts, self.n_cpus)), []) |
.script_launch_mode('subprocess')
def test_training_3d_1class_single_channel_with_data_augmentation(download_functional_test_files, script_runner):
file_config = os.path.join(__data_testing_dir__, 'automate_training_config.json')
context = imed_config_manager.ConfigurationManager(file_config).get_config()
context[ConfigKW.DEFAULT_MODEL][ModelParamsKW.IS_2D] = False
context[ConfigKW.MODIFIED_3D_UNET] = {ModelParamsKW.APPLIED: True, ModelParamsKW.LENGTH_3D: [32, 32, 16], ModelParamsKW.STRIDE_3D: [32, 32, 16], ModelParamsKW.N_FILTERS: 4}
context[ConfigKW.LOADER_PARAMETERS][LoaderParamsKW.TARGET_SUFFIX] = ['_lesion-manual']
context[ConfigKW.LOADER_PARAMETERS][LoaderParamsKW.CONTRAST_PARAMS][ContrastParamsKW.TRAINING_VALIDATION] = ['T1w', 'T2w']
context[ConfigKW.LOADER_PARAMETERS][LoaderParamsKW.CONTRAST_PARAMS][ContrastParamsKW.TESTING] = ['T1w', 'T2w']
context[ConfigKW.LOADER_PARAMETERS][LoaderParamsKW.MULTICHANNEL] = False
context[ConfigKW.TRANSFORMATION][TransformationKW.RESAMPLE] = {'wspace': 0.75, 'hspace': 0.75, 'dspace': 0.75}
context[ConfigKW.TRANSFORMATION][TransformationKW.CENTERCROP] = {'size': [32, 32, 16]}
context[ConfigKW.TRANSFORMATION][TransformationKW.RANDOM_AFFINE] = {'degrees': 10, 'scale': [0.03, 0.03, 0.03], 'translate': [0.8, 0.8, 0.8], 'applied_to': ['im', 'gt'], 'dataset_type': ['training']}
file_config_updated = os.path.join(__tmp_dir__, 'data_functional_testing', 'config_3d_training.json')
with Path(file_config_updated).open(mode='w') as fp:
json.dump(context, fp, indent=4)
__output_dir__ = Path(__tmp_dir__, 'results')
ret = script_runner.run('ivadomed', '-c', f'{file_config_updated}', '--path-data', f'{__data_testing_dir__}', '--path-output', f'{__output_dir__}')
logger.debug(f'{ret.stdout}')
logger.debug(f'{ret.stderr}')
assert ret.success |
class SupplyGatherDiscreteEasySingleTargetVision(SupplyGatherDiscreteSingleTarget):
def __init__(self, env_config: EnvContext):
super().__init__(env_config)
def _compute_reward(self, state, action):
reward = 0
if (self.running_steps == 1):
return reward
if (not self.game.is_episode_finished()):
reward -= 1
self.cur_distance = get_distance([self.target_supply[0], self.target_supply[1], self.target_supply[2]], get_position(state))
reward += (((self.target_supply_radius + 2) - self.cur_distance) * 40)
if ((self.valid_collected_supply == 0) and (self.cur_distance <= self.target_supply_radius)):
reward += 300
self.target_supply = None
self.valid_collected_supply += 1
if ((state.num_supply > self.collected_supply) and (self.cur_distance <= 1)):
reward += 300
self.target_supply = None
self.valid_collected_supply += (state.num_supply - self.collected_supply)
self.collected_supply = state.num_supply
return reward
def _get_obs(self, state):
self.np_supply_states = [np.asarray([supply.position_x, supply.position_y, supply.position_z, supply.quantity]) for supply in state.supply_states]
if (self.target_supply is None):
supply_distance = [get_distance([supply[0], supply[1], supply[2]], get_position(state)) for supply in self.np_supply_states]
self.target_supply = (self.np_supply_states[supply_distance.index(min(supply_distance))] if (len(supply_distance) != 0) else None)
self.cur_distance = (get_distance(self.target_supply[:(- 1)], get_position(state)) if (self.target_supply is not None) else None)
else:
self.cur_distance = (get_distance([self.target_supply[0], self.target_supply[1], self.target_supply[2]], get_position(state)) if (self.target_supply is not None) else None)
self._write_obs_log(state, self.cur_distance)
cur_pos = np.asarray(get_position(state))
tar_pos = (np.asarray([self.target_supply[0], self.target_supply[1], self.target_supply[2]]) if (self.target_supply is not None) else np.asarray(self.args['heatmap_center']))
dir_vec = (tar_pos - cur_pos)
obs = (dir_vec / np.linalg.norm(dir_vec))
return obs |
def audio_features(filename):
hop_length = 512
n_fft = 2048
(y, sr) = librosa.load(filename)
duration = float(librosa.core.get_duration(y))
(tempo, beat_frames) = librosa.beat.beat_track(y=y, sr=sr)
beat_times = librosa.frames_to_time(beat_frames, sr=sr)
(y_harmonic, y_percussive) = librosa.effects.hpss(y)
mfcc = librosa.feature.mfcc(y=y, sr=sr, hop_length=hop_length, n_mfcc=13)
mfcc_delta = librosa.feature.delta(mfcc)
beat_mfcc_delta = librosa.util.sync(np.vstack([mfcc, mfcc_delta]), beat_frames)
chromagram = librosa.feature.chroma_cqt(y=y_harmonic, sr=sr)
beat_chroma = librosa.util.sync(chromagram, beat_frames, aggregate=np.median)
beat_features = np.vstack([beat_chroma, beat_mfcc_delta])
zero_crossings = librosa.zero_crossings(y)
zero_crossing_time = librosa.feature.zero_crossing_rate(y)
spectral_centroid = librosa.feature.spectral_centroid(y)
spectral_bandwidth = librosa.feature.spectral_bandwidth(y)
spectral_contrast = librosa.feature.spectral_contrast(y)
spectral_rolloff = librosa.feature.spectral_rolloff(y)
rmse = librosa.feature.rmse(y)
poly_features = librosa.feature.poly_features(y)
chroma_stft = librosa.feature.chroma_stft(y)
chroma_cens = librosa.feature.chroma_cens(y)
tonnetz = librosa.feature.tonnetz(y)
mfcc_all = statlist(mfcc)
mfccd_all = statlist(mfcc_delta)
bmfccd_all = statlist(beat_mfcc_delta)
cg_all = statlist(chromagram)
bc_all = statlist(beat_chroma)
bf_all = statlist(beat_features)
zc_all = statlist(zero_crossings)
sc_all = statlist(spectral_centroid)
sb_all = statlist(spectral_bandwidth)
sc_all = statlist(spectral_contrast)
sr_all = statlist(spectral_rolloff)
rmse_all = statlist(rmse)
pf_all = statlist(poly_features)
cstft_all = statlist(chroma_stft)
ccens_all = statlist(chroma_cens)
tonnetz_all = statlist(tonnetz)
return [duration, float(tempo), beat_frames.tolist(), beat_times.tolist(), mfcc_all, mfccd_all, bmfccd_all, cg_all, bc_all, bf_all, zc_all, sc_all, sb_all, sc_all, sr_all, rmse_all, pf_all, cstft_all, ccens_all, tonnetz_all] |
def quaddobl_real_sweep(pols, sols, par='s', start=0.0, target=1.0):
from phcpy.interface import store_quaddobl_solutions as storesols
from phcpy.interface import store_quaddobl_system as storesys
nvar = (len(pols) + 1)
storesys(pols, nbvar=nvar)
storesols(nvar, sols)
from phcpy.interface import load_quaddobl_solutions as loadsols
from phcpy.phcpy2c3 import py2c_sweep_define_parameters_symbolically as define
from phcpy.phcpy2c3 import py2c_sweep_set_quaddobl_start as set_start
from phcpy.phcpy2c3 import py2c_sweep_set_quaddobl_target as set_target
(nbq, nbp) = (len(pols), 1)
pars = [par]
parnames = ' '.join(pars)
nbc = len(parnames)
define(nbq, nvar, nbp, nbc, parnames)
set_start(nbp, str([start, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]))
set_target(nbp, str([target, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]))
from phcpy.phcpy2c3 import py2c_sweep_quaddobl_real_run as run
run()
result = loadsols()
return result |
class ExplanationJSONDecoder(JSONDecoder):
def __init__(self, *args, **kwargs):
JSONDecoder.__init__(self, *args, object_hook=self.object_hook, **kwargs)
def object_hook(self, obj):
if ('_type' not in obj):
return obj
_type = obj['_type']
if (_type == 'array'):
return np.array(obj['value'])
elif (_type == 'explanation'):
cls = getattr(import_module(obj['module']), obj['class'])
return cls.from_components(obj['components'])
elif (_type == 'component'):
cls = getattr(import_module(obj['module']), obj['class'])
return cls.from_fields(obj['fields'])
elif (_type == 'obj'):
return Obj(obj['value'], obj['dim'])
elif (_type == 'alias'):
return Alias(obj['value'], obj['dim'])
elif (_type == 'dim'):
return Dim(obj['dim'])
return obj |
def parse_sim():
parser = argparse.ArgumentParser(description='')
parser.add_argument('--dataset', type=str, default='TCL', help='Dataset to run experiments. Should be TCL or IMCA')
parser.add_argument('--method', type=str, default='icebeem', help='Method to employ. Should be TCL, iVAE or ICE-BeeM')
parser.add_argument('--config', type=str, default='imca.yaml', help='Path to the config file')
parser.add_argument('--run', type=str, default='run/', help='Path for saving running related data.')
parser.add_argument('--nSims', type=int, default=10, help='Number of simulations to run')
parser.add_argument('--test', action='store_true', help='Whether to evaluate the models from checkpoints')
parser.add_argument('--plot', action='store_true')
return parser.parse_args() |
def log_sum_exp(x, axis=1):
m = T.max(x, axis=axis)
return (m + T.log(T.sum(T.exp((x - m.dimshuffle(0, 'x'))), axis=axis))) |
def rename_state_dict_key(k):
for (pegasus_name, hf_name) in PATTERNS:
k = k.replace(pegasus_name, hf_name)
return k |
class RagTokenForGeneration():
def __init__(self, *args, **kwargs):
requires_pytorch(self) |
def hash_prepare_optimize(optimize):
cls = optimize.__class__
try:
h = _HASH_OPTIMIZE_PREPARERS[cls]
except KeyError:
if isinstance(optimize, list):
h = _HASH_OPTIMIZE_PREPARERS[cls] = tuple
else:
h = _HASH_OPTIMIZE_PREPARERS[cls] = identity
return h(optimize) |
def tree_transpose(list_of_trees: Sequence[T]) -> T:
return jax.tree_util.tree_map((lambda *xs: jnp.stack(xs, axis=0)), *list_of_trees) |
def convert_modelAtmosphere(**kwargs):
modelatm = kwargs.pop('modelatm', None)
if (not (modelatm is None)):
if (isinstance(modelatm, str) and os.path.exists(modelatm)):
modelfilename = modelatm
elif isinstance(modelatm, str):
raise ValueError('modelatm= input is a non-existing filename')
else:
raise ValueError('modelatm= in moogsynth should be set to the name of a file')
else:
modelfilename = appath.modelAtmospherePath(**kwargs)
modeldirname = os.path.dirname(modelfilename)
modelbasename = os.path.basename(modelfilename)
outname = modelbasename.replace('.mod', '.org')
if os.path.exists(os.path.join(modeldirname, outname)):
return None
shutil.copy(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'scripts/makemoogmodel.awk'), modeldirname)
try:
stdout = open(os.path.join(modeldirname, outname), 'w')
stderr = open('/dev/null', 'w')
subprocess.check_call(['awk', '-f', 'makemoogmodel.awk', ('vmicro=%.1f' % kwargs.get('vmicro', 2.0)), modelfilename], cwd=modeldirname, stdout=stdout, stderr=stderr)
stdout.close()
stderr.close()
except:
raise
finally:
os.remove(os.path.join(modeldirname, 'makemoogmodel.awk'))
return None |
def model_parallel_cuda_manual_seed(seed, group='tensor'):
offset = (seed + 2718)
tensor_model_parallel_seed = (offset + parallel_group_size(group))
data_parallel_seed = seed
_CUDA_RNG_STATE_TRACKER.reset()
torch.cuda.manual_seed(data_parallel_seed)
_CUDA_RNG_STATE_TRACKER.add(_MODEL_PARALLEL_RNG_TRACKER_NAME, tensor_model_parallel_seed) |
def output_csv(path, subsets, write):
(success, _, task_fail, err_fail) = subsets
(success_train_len, success_val_len, success_test_len) = map(len, success)
(failure_train_len, failure_val_len, failure_test_len) = map(len, task_fail)
(error_train_len, error_val_len, error_test_len) = map(len, err_fail)
dataset_splits_csv = 'subset, train_count, val_count, test_count\n'
dataset_splits_csv += 'success_only, {0}, {1}, {2}\n'.format(success_train_len, success_val_len, success_test_len)
dataset_splits_csv += 'task_and_error_failure, {0}, {1}, {2}\n'.format((failure_train_len + error_train_len), (failure_val_len + error_val_len), (failure_test_len + error_test_len))
dataset_splits_csv += 'task_failure_only, {0}, {1}, {2}\n'.format(failure_train_len, failure_val_len, failure_test_len)
dataset_splits_csv += 'error_failure_only, {0}, {1}, {2}\n'.format(error_train_len, error_val_len, error_test_len)
dataset_splits_csv_filename = 'costar_block_stacking_dataset_split_summary.csv'
print(((('\n' + dataset_splits_csv_filename) + '\n') + dataset_splits_csv))
csv_path = os.path.join(path, dataset_splits_csv_filename)
if write:
with open(csv_path, 'w+') as file_object:
file_object.write(dataset_splits_csv)
print(('CSV file saved as %s' % csv_path))
else:
print('Dry run. Use --write to actually output the CSV file.')
return csv_path |
class LukeForEntityPairClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def get_records_by_date(start_date, end_date=None):
base_url = '
params = {'verb': 'ListRecords', 'metadataPrefix': 'arXiv', 'from': start_date}
if end_date:
params['until'] = end_date
result = {}
while True:
r = requests.get(base_url, params=params)
print('Fetching', r.url)
if (r.status_code == 503):
time_out = int(r.headers.get('retry-after', 5))
msg = '503: Have to wait before further requests. Retrying in {} seconds.'
print(msg.format(time_out))
sleep(time_out)
continue
root = ET.fromstring(r.text)
for record in root.find((OAI + 'ListRecords')).findall((OAI + 'record')):
element = prepare_record(record)
if element:
result[element['id']] = element
token = root.find((OAI + 'ListRecords')).find((OAI + 'resumptionToken'))
if ((token is None) or (token.text is None)):
break
params = {'verb': 'ListRecords', 'resumptionToken': token.text}
return result |
class Choice(Spec):
def __init__(self, choices):
self._choices = choices
def get(self, x):
if (x in self._choices):
return x
raise ValueError('{!r} is not in {!r}'.format(x, self._choices))
def __repr__(self):
return 'Choice({!r})'.format(self._choices)
def __eq__(self, x):
if isinstance(x, Choice):
return (self._choices == x._choices)
return False |
def init(args, model, dummyInput):
(dir, writer) = setOutputDirAndWriter(args, model.name)
configLayers(model, dummyInput)
showArchAsTable(model)
print('Output dir: ', dir)
raw_flags = sys.argv[1:]
saveFlags(dir, raw_flags)
saveArchAsTableToReport(model, (dir + '/report.txt'))
return (dir, writer) |
def clip_loss(similarity: torch.Tensor, sentence_sim=None, type_loss='clip') -> torch.Tensor:
if ((sentence_sim is not None) and (type_loss == 'weighted_clip')):
text_loss = weighted_loss(similarity, sentence_sim)
audio_loss = weighted_loss(similarity.T, sentence_sim)
else:
text_loss = contrastive_loss(similarity)
audio_loss = contrastive_loss(similarity.T)
return ((text_loss + audio_loss) / 2.0) |
class RunningCellMaskingGenerator():
def __init__(self, input_size, mask_ratio=0.5):
(self.frames, self.height, self.width) = input_size
self.mask_ratio = mask_ratio
num_masks_per_cell = int((4 * self.mask_ratio))
assert (0 < num_masks_per_cell < 4)
num_patches_per_cell = (4 - num_masks_per_cell)
self.cell = Cell(num_masks_per_cell, num_patches_per_cell)
self.cell_size = self.cell.size
mask_list = []
for ptr_pos in range(self.cell_size):
self.cell.set_ptr(ptr_pos)
mask = []
for _ in range(self.frames):
self.cell.run_cell()
mask_unit = self.cell.get_cell().reshape(2, 2)
mask_map = np.tile(mask_unit, [(self.height // 2), (self.width // 2)])
mask.append(mask_map.flatten())
mask = np.stack(mask, axis=0)
mask_list.append(mask)
self.all_mask_maps = np.stack(mask_list, axis=0)
def __repr__(self):
repr_str = f'Running Cell Masking with mask ratio {self.mask_ratio}'
return repr_str
def __call__(self):
mask = self.all_mask_maps[np.random.randint(self.cell_size)]
return np.copy(mask) |
class SoundStreamAttentionEncoder(nn.Module):
def __init__(self, input_channels: int, hidden_channels: int, output_channels: int, **kwargs):
super().__init__()
self.encoder = SoundStreamEncoder(input_channels, hidden_channels, output_channels, **kwargs)
self.pooling = AttentionPooling(output_channels)
def forward(self, x: torch.Tensor, film_embedding: Optional[torch.Tensor]=None) -> torch.Tensor:
x = self.encoder(x, film_embedding)
x = self.pooling(x)
return x |
def load_snlp(f, tok_path):
full_file_name = os.path.join(tok_path, '{}.story.doc.xml'.format(f))
with open(full_file_name) as fobj:
xmlstr = fobj.read()
tree = etree.parse(full_file_name)
root = tree.getroot()
doc_id = root.findall('./document/docId')[0].text
corefrences = root.findall('./document/coreference')[0]
sentences = root.findall('./document/sentences')[0]
return_sentences_obj = []
for sent in list(sentences):
sent_id = (int(sent.attrib['id']) - 1)
sent_words = []
ele_in_sent = list(sent)
toks = ele_in_sent[0]
token_list = list(toks)
for t in token_list:
word = list(t)[0]
sent_words.append(word.text)
parse = ele_in_sent[1].text
return_sentences_obj.append({'sent_id': sent_id, 'tokens': sent_words, 'parse': parse, 'corefs': [[] for _ in range(len(sent_words))]})
coref_bag = []
for coref in list(corefrences):
mentions = list(coref)
return_mentions = []
efficient_mentions = []
repre_sent_id = (- 1)
for m in mentions:
mention_elements = list(m)
sent_location = (int(mention_elements[0].text) - 1)
head_location_in_sent = (int(mention_elements[3].text) - 1)
start_location_in_sent = (int(mention_elements[1].text) - 1)
end_location_in_sent = (int(mention_elements[2].text) - 1)
text = mention_elements[4].text
if m.get('representative'):
represent = True
repre_sent_id = sent_location
else:
represent = False
efficient_mentions.append((sent_location, head_location_in_sent, represent))
return_mentions.append({'sent_id': sent_location, 'word_id': head_location_in_sent, 'start_id': start_location_in_sent, 'end_id': end_location_in_sent, 'text': text, 'rep': represent})
for single_coref in efficient_mentions:
(single_coref_sent, single_coref_word, _) = single_coref
return_sentences_obj[single_coref_sent]['corefs'][single_coref_word] = efficient_mentions
coref_bag.append(return_mentions)
return {'doc_id': doc_id, 'coref': coref_bag, 'sent': return_sentences_obj} |
def get_optimization_params():
return {'finetune_layer': 'pre_logits', 'initial_learning_rate': 0.01, 'momentum': 0.9, 'lr_decay_factor': 0.1, 'decay_steps': (10, 20, 30), 'max_steps': 10, 'warmup_steps': 0, 'tpu_name': None} |
def test_deterministic_numpy():
deterministic.set_seed(22)
rand_tensor = np.random.rand(5, 5)
deterministic_tensor = np.array([[0., 0., 0., 0.859182, 0.], [0., 0., 0., 0., 0.], [0., 0.5612037, 0., 0.7451003, 0.], [0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.]])
assert np.allclose(rand_tensor, deterministic_tensor) |
def example():
with habitat.Env(config=habitat.get_config('configs/tasks/pointnav.yaml')) as env:
print('Environment creation successful')
observations = env.reset()
print('Agent stepping around inside environment.')
count_steps = 0
while (not env.episode_over):
observations = env.step(env.action_space.sample())
count_steps += 1
print('Episode finished after {} steps.'.format(count_steps))
env.close() |
def test_constantbeta_selfconsist_dehnencore_rmin_inbounds(setup_constantbeta_dfs_selfconsist):
if WIN32:
return None
pot = potential.DehnenCoreSphericalPotential(amp=2.5, a=1.15)
twobetas = [(- 1)]
constantbeta_dfs_selfconsist = setup_constantbeta_dfs_selfconsist
rmin = 0.5
for (twobeta, dfh) in zip(twobetas, constantbeta_dfs_selfconsist):
samp = dfh.sample(n=1000000, rmin=rmin)
assert (numpy.min(samp.r()) >= rmin), 'Sample minimum r less than rmin'
samp = dfh.sample(n=1000000, rmin=(rmin + 1.0))
assert (numpy.min(samp.r()) >= (rmin + 1.0)), 'Sample minimum r less than rmin'
return None |
class QLinear(nn.Linear):
def __init__(self, in_features, out_features, bias=True, num_bits=8, num_bits_weight=8, num_bits_grad=None, perC=True, biprecision=False, measure=False, cal_qparams=False):
super(QLinear, self).__init__(in_features, out_features, bias)
self.num_bits = num_bits
self.num_bits_weight = (num_bits_weight or num_bits)
self.num_bits_grad = num_bits_grad
self.biprecision = biprecision
self.equ_scale = nn.Parameter(torch.ones(out_features, 1))
if measure:
self.quantize_input = QuantMeasure(self.num_bits, measure=measure, cal_qparams=cal_qparams)
self.quantize_weight = QuantMeasure(self.num_bits, shape_measure=((out_features if perC else 1), 1), flatten_dims=((1, (- 1)) if perC else (0, (- 1))), measure=measure, reduce_dim=(None if perC else 0))
else:
self.quantize_input = QuantThUpdate(self.num_bits, measure=measure)
self.quantize_weight = QuantThUpdate(self.num_bits, shape_measure=((out_features if perC else 1), 1), flatten_dims=((1, (- 1)) if perC else (0, (- 1))), measure=measure, reduce_dim=(None if perC else 0))
self.measure = measure
self.cal_params = cal_qparams
self.quantize = QUANTIZE
def forward(self, input):
qinput = (self.quantize_input(input) if self.quantize else input)
qweight = (self.quantize_weight((self.weight * self.equ_scale)) if (self.quantize and (not self.cal_params)) else self.weight)
if ((not self.measure) and (os.environ.get('DEBUG') == 'True')):
assert (qinput.unique().numel() <= (2 ** self.num_bits))
assert (qweight[0].unique().numel() <= (2 ** self.num_bits_weight))
if (self.bias is not None):
qbias = (self.bias if (self.measure or (not self.quantize)) else quantize(self.bias, num_bits=(self.num_bits_weight + self.num_bits), flatten_dims=(0, (- 1))))
else:
qbias = None
if ((not self.biprecision) or (self.num_bits_grad is None)):
output = F.linear(qinput, qweight, qbias)
if (self.num_bits_grad is not None):
output = quantize_grad(output, num_bits=self.num_bits_grad)
else:
output = linear_biprec(qinput, qweight, qbias, self.num_bits_grad)
return output |
_torch_or_tf
class QuestionAnsweringArgumentHandlerTests(unittest.TestCase):
def test_argument_handler(self):
qa = QuestionAnsweringArgumentHandler()
Q = 'Where was HuggingFace founded ?'
C = 'HuggingFace was founded in Paris'
normalized = qa(Q, C)
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa(question=Q, context=C)
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa(question=Q, context=C)
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa(question=[Q, Q], context=C)
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 2)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa({'question': Q, 'context': C})
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa([{'question': Q, 'context': C}])
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa([{'question': Q, 'context': C}, {'question': Q, 'context': C}])
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 2)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa(X={'question': Q, 'context': C})
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa(X=[{'question': Q, 'context': C}])
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
normalized = qa(data={'question': Q, 'context': C})
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 1)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
def test_argument_handler_error_handling(self):
qa = QuestionAnsweringArgumentHandler()
Q = 'Where was HuggingFace founded ?'
C = 'HuggingFace was founded in Paris'
with self.assertRaises(KeyError):
qa({'context': C})
with self.assertRaises(KeyError):
qa({'question': Q})
with self.assertRaises(KeyError):
qa([{'context': C}])
with self.assertRaises(ValueError):
qa(None, C)
with self.assertRaises(ValueError):
qa('', C)
with self.assertRaises(ValueError):
qa(Q, None)
with self.assertRaises(ValueError):
qa(Q, '')
with self.assertRaises(ValueError):
qa(question=None, context=C)
with self.assertRaises(ValueError):
qa(question='', context=C)
with self.assertRaises(ValueError):
qa(question=Q, context=None)
with self.assertRaises(ValueError):
qa(question=Q, context='')
with self.assertRaises(ValueError):
qa({'question': None, 'context': C})
with self.assertRaises(ValueError):
qa({'question': '', 'context': C})
with self.assertRaises(ValueError):
qa({'question': Q, 'context': None})
with self.assertRaises(ValueError):
qa({'question': Q, 'context': ''})
with self.assertRaises(ValueError):
qa([{'question': Q, 'context': C}, {'question': None, 'context': C}])
with self.assertRaises(ValueError):
qa([{'question': Q, 'context': C}, {'question': '', 'context': C}])
with self.assertRaises(ValueError):
qa([{'question': Q, 'context': C}, {'question': Q, 'context': None}])
with self.assertRaises(ValueError):
qa([{'question': Q, 'context': C}, {'question': Q, 'context': ''}])
with self.assertRaises(ValueError):
qa(question={'This': 'Is weird'}, context='This is a context')
with self.assertRaises(ValueError):
qa(question=[Q, Q], context=[C, C, C])
with self.assertRaises(ValueError):
qa(question=[Q, Q, Q], context=[C, C])
def test_argument_handler_old_format(self):
qa = QuestionAnsweringArgumentHandler()
Q = 'Where was HuggingFace founded ?'
C = 'HuggingFace was founded in Paris'
normalized = qa(question=[Q, Q], context=[C, C])
self.assertEqual(type(normalized), list)
self.assertEqual(len(normalized), 2)
self.assertEqual({type(el) for el in normalized}, {SquadExample})
def test_argument_handler_error_handling_odd(self):
qa = QuestionAnsweringArgumentHandler()
with self.assertRaises(ValueError):
qa(None)
with self.assertRaises(ValueError):
qa(Y=None)
with self.assertRaises(ValueError):
qa(1) |
def create_exp_dir(path, scripts_to_save=None, dict=None, options=None):
if (not os.path.exists(path)):
os.mkdir(path)
else:
shutil.rmtree(path)
os.mkdir(path)
print('Experiment dir : {}'.format(path))
if (scripts_to_save is not None):
if (not os.path.exists(os.path.join(path, 'scripts'))):
os.mkdir(os.path.join(path, 'scripts'))
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
if (dict is not None):
with open(os.path.join(path, 'vocab.json'), 'w') as f:
json.dump(dict, f)
if (options is not None):
with open(os.path.join(path, 'options.json'), 'w') as f:
json.dump(vars(options), f) |
def run_test(cfg, model, distributed):
if distributed:
model = model.module
torch.cuda.empty_cache()
output_folders = ([None] * len(cfg.DATASETS.TEST))
dataset_names = cfg.DATASETS.TEST
if cfg.OUTPUT_DIR:
for (idx, dataset_name) in enumerate(dataset_names):
output_folder = os.path.join(cfg.OUTPUT_DIR, 'inference', dataset_name)
os.makedirs(output_folder, exist_ok=True)
output_folders[idx] = output_folder
data_loaders_test = make_data_loader(cfg, is_train=False, is_distributed=distributed)
for (output_folder, dataset_name, data_loader_test) in zip(output_folders, dataset_names, data_loaders_test):
inference(model, data_loader_test, dataset_name, mem_active=has_memory(cfg.MODEL.HIT_STRUCTURE), output_folder=output_folder)
synchronize() |
class QuantitativeDataFrame():
def __init__(self, dataframe):
if (type(dataframe) != pandas.DataFrame):
raise Exception('type of dataframe must be pandas.dataframe')
self.__dataframe = dataframe
self.__preprocessed_columns = self.__preprocess_columns(dataframe)
self.__literal_cache = LiteralCache()
self.size = dataframe.index.size
def dataframe(self):
return self.__dataframe
def column(self, colname):
return self.__preprocessed_columns[colname]
def mask(self, vals):
return self.__dataframe[vals]
def find_covered_by_antecedent_mask(self, antecedent):
dataset_size = self.__dataframe.index.size
cummulated_mask = np.ones(dataset_size).astype(bool)
for literal in antecedent:
(attribute, interval) = literal
relevant_column = self.__dataframe[[attribute]].values.reshape(dataset_size)
current_mask = self.get_literal_coverage(literal, relevant_column)
cummulated_mask &= current_mask
return cummulated_mask
def find_covered_by_literal_mask(self, literal):
for literal in rule.antecedent:
(attribute, interval) = literal
relevant_column = self.__dataframe[[attribute]].values.reshape(dataset_size)
current_mask = self.get_literal_coverage(literal, relevant_column)
cummulated_mask &= current_mask
def find_covered_by_rule_mask(self, rule):
dataset_size = self.__dataframe.index.size
cummulated_mask = np.array(([True] * dataset_size))
for literal in rule.antecedent:
(attribute, interval) = literal
relevant_column = self.__dataframe[[attribute]].values.reshape(dataset_size)
current_mask = self.get_literal_coverage(literal, relevant_column)
cummulated_mask &= current_mask
instances_satisfying_antecedent_mask = cummulated_mask
instances_satisfying_consequent_mask = self.__get_consequent_coverage_mask(rule)
print(instances_satisfying_consequent_mask)
instances_satisfying_consequent_mask = instances_satisfying_consequent_mask.reshape(dataset_size)
return (instances_satisfying_antecedent_mask, instances_satisfying_consequent_mask)
def calculate_rule_statistics(self, rule):
dataset_size = self.__dataframe.index.size
cummulated_mask = np.array(([True] * dataset_size))
for literal in rule.antecedent:
(attribute, interval) = literal
relevant_column = self.__dataframe[[attribute]].values.reshape(dataset_size)
current_mask = self.get_literal_coverage(literal, relevant_column)
cummulated_mask &= current_mask
instances_satisfying_antecedent = self.__dataframe[cummulated_mask].index
instances_satisfying_antecedent_count = instances_satisfying_antecedent.size
instances_satisfying_consequent_mask = self.__get_consequent_coverage_mask(rule)
instances_satisfying_consequent_mask = instances_satisfying_consequent_mask.reshape(dataset_size)
instances_satisfying_consequent_and_antecedent = self.__dataframe[(instances_satisfying_consequent_mask & cummulated_mask)].index
instances_satisfying_consequent_and_antecedent_count = instances_satisfying_consequent_and_antecedent.size
instances_satisfying_consequent_count = self.__dataframe[instances_satisfying_consequent_mask].index.size
support = (instances_satisfying_antecedent_count / dataset_size)
confidence = 0
if (instances_satisfying_antecedent_count != 0):
confidence = (instances_satisfying_consequent_and_antecedent_count / instances_satisfying_antecedent_count)
return (support, confidence)
def __get_consequent_coverage_mask(self, rule):
consequent = rule.consequent
(attribute, value) = consequent
class_column = self.__dataframe[[attribute]].values
class_column = class_column.astype(str)
literal_key = '{}={}'.format(attribute, value)
mask = []
if (literal_key in self.__literal_cache):
mask = self.__literal_cache.get(literal_key)
else:
mask = (class_column == value)
return mask
def get_literal_coverage(self, literal, values):
if (type(values) != np.ndarray):
raise Exception('Type of values must be numpy.ndarray')
mask = []
(attribute, interval) = literal
literal_key = '{}={}'.format(attribute, interval)
if (literal_key in self.__literal_cache):
mask = self.__literal_cache.get(literal_key)
else:
mask = None
if (type(interval) == str):
mask = np.array([(val == interval) for val in values])
else:
mask = interval.test_membership(values)
self.__literal_cache.insert(literal_key, mask)
mask = mask.reshape(values.size)
return mask
def __preprocess_columns(self, dataframe):
dataframe_dict = dataframe.to_dict(orient='list')
dataframe_ndarray = {}
for (column, value_list) in dataframe_dict.items():
transformed_list = np.sort(np.unique(value_list))
dataframe_ndarray[column] = transformed_list
return dataframe_ndarray |
def register_annotations(line_: str) -> None:
line_ = line_.strip()
usage = 'Usage: %flow register_annotations <directory_or_file>'
if os.path.isdir(line_):
modules = register_annotations_directory(line_)
elif os.path.isfile(line_):
modules = register_annotations_file(line_)
else:
warn(usage)
return
print_('Registered annotations for modules:', modules) |
def load_configs(ex_dir):
configs = []
run_nums = get_run_nums(ex_dir)
for run_num in run_nums:
loc = ((ex_dir + '/') + run_num)
try:
configs.append(extract_config(loc))
except:
warnings.warn('Cannot load config in {}. Consider deleting.'.format(loc), Warning)
return (configs, run_nums) |
class Recipe100k(BaseData):
def __init__(self, data_root: Optional[str]=None) -> None:
super().__init__('recipe-100k-v2', data_root)
self._content = {'num_classes': 8, 'num_vertices': 101585, 'num_edges': 12387, 'dim_features': 2254, 'features': {'upon': [{'filename': 'features.pkl', 'md5': '4fdd76cd4108fd07bddc1eaf'}], 'loader': load_from_pickle, 'preprocess': [to_tensor]}, 'edge_list': {'upon': [{'filename': 'edge_list.pkl', 'md5': '3dc1d8fe7a0f91b5cbda9021'}], 'loader': load_from_pickle}, 'labels': {'upon': [{'filename': 'labels.pkl', 'md5': 'bd8a3bcaef27a58c6d1d5def255c5065'}], 'loader': load_from_pickle, 'preprocess': [to_long_tensor]}} |
def get_brats_regions():
regions = {'whole tumor': (1, 2, 3), 'tumor core': (2, 3), 'enhancing tumor': (3,)}
return regions |
def test_dense_matrix_from_nested_dictionary_square():
d = {'a': {'b': 10}, 'b': {'c': 20}}
(X, rows, columns) = dense_matrix_from_nested_dictionary(d, square_result=True)
eq_(rows, ['a', 'b', 'c'])
eq_(columns, ['a', 'b', 'c'])
assert np.isnan(X[(0, 0)])
eq_(X[(0, 1)], 10)
assert np.isnan(X[(0, 2)])
assert np.isnan(X[(1, 0)])
assert np.isnan(X[(1, 1)])
eq_(X[(1, 2)], 20)
assert np.isnan(X[(2, 0)])
assert np.isnan(X[(2, 1)])
assert np.isnan(X[(2, 2)]) |
def extract_tags(module):
output = []
for i in dir(module):
if (i.startswith('_') or (not isinstance(getattr(module, i), str))):
continue
output.append(i)
return output |
class KerasONNXRuntimeQuantization(BaseONNXRuntimeQuantization):
def __init__(self, framework='onnxrt_qlinear', **kwargs):
kwargs['framework'] = framework
self.session_options = kwargs.pop('onnxruntime_session_options', None)
super().__init__(**kwargs)
self._inc_metric_cls = KerasONNXRuntimeINCMetic
def _pre_execution(self, model, calib_dataset=None, metric=None):
if calib_dataset:
(x, y) = calib_dataset
calib_dataset = KerasNumpyDataset(x, y, model.dtype)
if isinstance(model, KerasONNXRuntimeModel):
model = model.onnx_model
return (model, calib_dataset, metric)
def _post_execution(self, q_model):
return KerasONNXRuntimeModel(q_model.model, onnxruntime_session_options=self.session_options) |
class ToTensor_with_RandomZoom(object):
def __init__(self, ratio=1):
self.ratio = ratio
def __call__(self, sample):
(image, depth) = (sample['image'], sample['depth'])
original_size = image.size
applied_zoom = random.uniform(1, self.ratio)
(image, depth) = self.zoom(image, depth, applied_zoom)
(image, depth) = self.randomCrop(image, depth, original_size)
image = self.to_tensor(image)
depth = self.to_tensor(depth).float()
depth = (((depth / (pow(2, 16) - 1)) * 10) / applied_zoom)
depth_mean = depth.mean()
return {'image': image, 'depth': depth}
def zoom(self, image, depth, applied_zoom):
(w1, h1) = image.size
w2 = round((w1 * applied_zoom))
h2 = round((h1 * applied_zoom))
image = image.resize((w2, h2), Image.BICUBIC)
depth = depth.resize((w2, h2), Image.BICUBIC)
return (image, depth)
def randomCrop(self, image, depth, size):
(w1, h1) = size
(w2, h2) = image.size
if ((w1 == w2) and (h1 == h2)):
return (image, depth)
x = round((random.uniform(0, (w2 - w1)) - 0.5))
y = round((random.uniform(0, (h2 - h1)) - 0.5))
image = image.crop((x, y, (x + w1), (y + h1)))
depth = depth.crop((x, y, (x + w1), (y + h1)))
return (image, depth)
def to_tensor(self, pic):
if (not (_is_pil_image(pic) or _is_numpy_image(pic))):
raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic)))
if isinstance(pic, np.ndarray):
img = torch.from_numpy(pic.transpose((2, 0, 1)))
return img.float().div(255)
if (pic.mode == 'I'):
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif (pic.mode == 'I;16'):
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
if (pic.mode == 'YCbCr'):
nchannel = 3
elif (pic.mode == 'I;16'):
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return img |
class SampleList(OrderedDict):
_TENSOR_FIELD_ = '_tensor_field'
def __init__(self, samples=[]):
super().__init__(self)
if (len(samples) == 0):
return
if self._check_and_load_dict(samples):
return
if self._check_and_load_tuple(samples):
return
fields = samples[0].keys()
for field in fields:
if isinstance(samples[0][field], torch.Tensor):
size = (len(samples), *samples[0][field].size())
self[field] = samples[0][field].new_empty(size)
if (self._get_tensor_field() is None):
self._set_tensor_field(field)
else:
self[field] = [None for _ in range(len(samples))]
for (idx, sample) in enumerate(samples):
if (isinstance(sample[field], torch.Tensor) and (len(sample[field].size()) != 0) and (sample[field].size(0) != samples[0][field].size(0))):
raise AssertionError('Fields for all samples must be equally sized. {} is of different sizes'.format(field))
self[field][idx] = self._get_data_copy(sample[field])
if isinstance(samples[0][field], collections.abc.Mapping):
self[field] = SampleList(self[field])
def _check_and_load_tuple(self, samples):
if (isinstance(samples[0], (tuple, list)) and isinstance(samples[0][0], str)):
for kv_pair in samples:
self.add_field(kv_pair[0], kv_pair[1])
return True
else:
return False
def _check_and_load_dict(self, samples):
if isinstance(samples, collections.abc.Mapping):
for (key, value) in samples.items():
self.add_field(key, value)
return True
else:
return False
def _fix_sample_type(self, samples):
if (not isinstance(samples[0], Sample)):
proper_samples = []
for sample in samples:
proper_samples.append(Sample(sample))
samples = proper_samples
return samples
def __getattr__(self, key):
if (key not in self):
raise AttributeError('Key {} not found in the SampleList. Valid choices are {}'.format(key, self.fields()))
fields = self.keys()
if (key in fields):
return self[key]
sample = Sample()
for field in fields:
sample[field] = self[field][key]
return sample
def get_item_list(self, key):
sample = self[key]
return SampleList([sample])
def copy(self):
sample_list = SampleList()
fields = self.fields()
for field in fields:
sample_list.add_field(field, self[field])
return sample_list
def fields(self):
return list(self.keys())
def get_fields(self, fields):
current_fields = self.fields()
return_list = SampleList()
for field in fields:
if (field not in current_fields):
raise AttributeError('{} not present in SampleList. Valid choices are {}'.format(field, current_fields))
return_list.add_field(field, self[field])
return return_list
def get_field(self, field):
return self[field]
def _get_data_copy(self, data):
if isinstance(data, torch.Tensor):
copy_ = data.clone()
else:
copy_ = deepcopy(data)
return copy_
def _get_tensor_field(self):
return self.__dict__.get(SampleList._TENSOR_FIELD_, None)
def _set_tensor_field(self, value):
self.__dict__[SampleList._TENSOR_FIELD_] = value
def get_batch_size(self):
tensor_field = self._get_tensor_field()
assert (tensor_field is not None), 'There is no tensor yet in SampleList'
return self[tensor_field].size(0)
def add_field(self, field, data):
fields = self.fields()
tensor_field = self._get_tensor_field()
if (len(fields) == 0):
self[field] = self._get_data_copy(data)
else:
if (isinstance(data, torch.Tensor) and (len(data.size()) != 0) and (tensor_field is not None) and (data.size(0) != self[tensor_field].size(0))):
raise AssertionError('A tensor field to be added must have same size as existing tensor fields in SampleList. Passed size: {}, Required size: {}'.format(len(data), len(self[fields[0]])))
self[field] = self._get_data_copy(data)
if (isinstance(self[field], torch.Tensor) and (tensor_field is None)):
self._set_tensor_field(field)
def to(self, device, non_blocking=True):
fields = self.keys()
sample_list = self.copy()
if (not isinstance(device, torch.device)):
if (not isinstance(device, str)):
raise TypeError("device must be either 'str' or 'torch.device' type, {} found".format(type(device)))
device = torch.device(device)
for field in fields:
if hasattr(sample_list[field], 'to'):
sample_list[field] = sample_list[field].to(device, non_blocking=non_blocking)
return sample_list |
def comp_sent_ora(single_file, max_edu_num, beam, path_doc, path_abs, path_write_data):
doc_files = os.listdir(path_doc)
f_docs = [f for f in doc_files if f.endswith('.doc.merge')]
abs_files = os.listdir(path_abs)
f_abss = [f for f in abs_files if f.endswith('.abs.merge')]
assert (len(f_docs) == len(f_abss))
bag_abs = []
bag_doc = []
bag_name = []
for (j, fdoc) in enumerate(f_docs):
name = fdoc.split('.')[0]
(doc_spans, doc_sent_idx) = read_merge_span(os.path.join(path_doc, fdoc))
doc_spans = convert_edu_doc_to_sent_doc(doc_spans)
doc_spans = truncate_doc_list(max_edu_num, doc_spans)
doc_spans = [s for s in doc_spans if (len(s) > 0)]
inp_abs_str = read_merge_simple(os.path.join(path_abs, (name + '.abs.merge')))
bag_doc.append(doc_spans)
bag_abs.append(inp_abs_str)
bag_name.append(name)
bag_path_write_data = ([path_write_data] * len(bag_name))
bag_beam = ([beam] * len(bag_name))
assert (len(bag_doc) == len(bag_abs))
cnt = multiprocessing.cpu_count()
pool = multiprocessing.Pool(processes=cnt)
pairs = pool.starmap(sent_oracle, zip(bag_doc, bag_abs, bag_name, bag_path_write_data, bag_beam))
pool.close()
pool.join()
pairs = [p for p in pairs if (p is not None)]
print('Final Stage')
with open(single_file, 'w') as fd:
fd.write('\n'.join(pairs)) |
class AugInput():
def transform(self, tfm: Transform) -> None:
raise NotImplementedError
def apply_augmentations(self, augmentations: List[Union[(Augmentation, Transform)]]) -> TransformList:
tfms = []
for aug in augmentations:
if isinstance(aug, Augmentation):
args = []
for f in aug.input_args:
try:
args.append(getattr(self, f))
except AttributeError:
raise AttributeError(f"Augmentation {aug} needs '{f}', which is not an attribute of {self}!")
tfm = aug.get_transform(*args)
assert isinstance(tfm, Transform), f'{type(aug)}.get_transform must return an instance of Transform! Got {{type(tfm)}} instead.'
else:
tfm = aug
self.transform(tfm)
tfms.append(tfm)
return TransformList(tfms) |
def test_digits_lazy_sparse():
model = MaxCoverageSelection(100, optimizer='lazy')
model.fit(X_digits_sparse)
assert_array_equal(model.ranking[:3], digits_ranking[:3])
assert_array_almost_equal(model.gains[:3], digits_gains[:3], 4)
assert_array_almost_equal(model.subset, X_digits_sparse[model.ranking].toarray()) |
def eval_data_collator(dataset: Dataset, batch_size: int):
for i in range((len(dataset) // batch_size)):
batch = dataset[(i * batch_size):((i + 1) * batch_size)]
batch = {k: np.array(v) for (k, v) in batch.items()}
batch = shard(batch)
(yield batch) |
class ResnetEncoder(nn.Module):
def __init__(self, num_layers, pretrained, num_input_images=1):
super().__init__()
if (num_layers not in RESNETS):
raise ValueError(f'{num_layers} is not a valid number of resnet layers')
self.encoder = RESNETS[num_layers](pretrained)
self.encoder.conv1.weight.data = (self.encoder.conv1.weight.data.repeat((1, num_input_images, 1, 1)) / num_input_images)
self.encoder.conv1.in_channels = (num_input_images * 3)
self.encoder.fc = None
if (num_layers > 34):
self.num_ch_enc = (64, 256, 512, 1024, 2048)
else:
self.num_ch_enc = (64, 64, 128, 256, 512)
def forward(self, l_0):
l_0 = self.encoder.conv1(l_0)
l_0 = self.encoder.bn1(l_0)
l_0 = self.encoder.relu(l_0)
l_1 = self.encoder.maxpool(l_0)
l_1 = self.encoder.layer1(l_1)
l_2 = self.encoder.layer2(l_1)
l_3 = self.encoder.layer3(l_2)
l_4 = self.encoder.layer4(l_3)
return (l_0, l_1, l_2, l_3, l_4) |
class SomeOf(BaseCompose):
def __init__(self, num_transforms: (int or tuple), transforms, p: float=1.0):
super().__init__(transforms, p)
self.transform_indexes = []
self.num_transforms = num_transforms
self.should_apply = True
def randomize_parameters(self, *args, **kwargs):
super().randomize_parameters(*args, **kwargs)
self.should_apply = (random.random() < self.p)
if self.should_apply:
if (type(self.num_transforms) == tuple):
if (self.num_transforms[1] is None):
num_transforms_to_apply = random.randint(self.num_transforms[0], len(self.transforms))
else:
num_transforms_to_apply = random.randint(self.num_transforms[0], self.num_transforms[1])
else:
num_transforms_to_apply = self.num_transforms
all_transforms_indexes = list(range(len(self.transforms)))
self.transform_indexes = sorted(random.sample(all_transforms_indexes, num_transforms_to_apply))
return self.transform_indexes
def __call__(self, *args, **kwargs):
if (not self.are_parameters_frozen):
kwargs['apply_to_children'] = False
self.randomize_parameters(*args, **kwargs)
if self.should_apply:
if ('apply_to_children' in kwargs):
del kwargs['apply_to_children']
if issubclass(type(self.transforms[0]), BaseSpectrogramTransform):
if ('magnitude_spectrogram' in kwargs):
magnitude_spectrogram = kwargs['magnitude_spectrogram']
else:
magnitude_spectrogram = args[0]
for transform_index in self.transform_indexes:
magnitude_spectrogram = self.transforms[transform_index](magnitude_spectrogram)
return magnitude_spectrogram
else:
if ('sample_rate' in kwargs):
samples = (kwargs['samples'] if ('samples' in kwargs) else args[0])
sample_rate = kwargs['sample_rate']
else:
samples = args[0]
sample_rate = args[1]
for transform_index in self.transform_indexes:
samples = self.transforms[transform_index](samples, sample_rate)
return samples
if ('samples' in kwargs):
return kwargs['samples']
elif ('magnitude_spectrogram' in kwargs):
return kwargs['magnitude_spectrogram']
else:
return args[0] |
_measure
class CorrectAnswer(Measure):
def __init__(self, dataset, *args: Any, **kwargs: Any):
self._dataset = dataset
super().__init__(**kwargs)
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return 'correct_answer'
def reset_metric(self, episode, *args: Any, **kwargs: Any):
self._metric = episode.question.answer_token
def update_metric(self, *args: Any, **kwargs: Any):
pass |
def test_rotation_encoding():
test_add_sub_angles(1, 64)
test_add_sub_angles(30, 64)
test_add_sub_angles(60, 64)
test_add_sub_angles(90, 64)
test_add_sub_angles(100, 64)
test_add_sub_angles(150, 64)
test_add_sub_angles(180, 64)
test_add_sub_angles(340, 64)
test_add_sub_angles(1, 32)
test_add_sub_angles(90, 32)
test_add_sub_angles(100, 32)
test_add_sub_angles(150, 32)
test_add_sub_angles(180, 32)
test_add_sub_angles(340, 32)
test_add_sub_angles(1, 28)
test_add_sub_angles(90, 28)
test_add_sub_angles(100, 28)
test_add_sub_angles(150, 28)
test_add_sub_angles(180, 28)
test_add_sub_angles(340, 28)
test_add_sub_angles(1, 56)
test_add_sub_angles(90, 56)
test_add_sub_angles(100, 56)
test_add_sub_angles(150, 56)
test_add_sub_angles(180, 56)
test_add_sub_angles(340, 56) |
class RunFunctionTest(unittest.TestCase):
def setUpClass(cls):
logging.disable(logging.CRITICAL)
conf = yaml.load(open(os.path.join('tests', 'data', 'config.yml'), 'r'))
conf['default'] = {'feature_extractor': False, 'discriminator': False, 'generator': 'rdn', 'training_set': 'test', 'test_set': 'test'}
conf['session'] = {}
conf['session']['training'] = {}
conf['session']['training']['patch_size'] = 0
conf['session']['training']['epochs'] = 0
conf['session']['training']['steps_per_epoch'] = 0
conf['session']['training']['batch_size'] = 0
conf['session']['prediction'] = {}
conf['session']['prediction']['patch_size'] = 5
conf['generators'] = {}
conf['generators']['rdn'] = {}
conf['generators']['rdn']['x'] = 0
conf['training_sets'] = {}
conf['training_sets']['test'] = {}
conf['training_sets']['test']['lr_train_dir'] = None
conf['training_sets']['test']['hr_train_dir'] = None
conf['training_sets']['test']['lr_valid_dir'] = None
conf['training_sets']['test']['hr_valid_dir'] = None
conf['loss_weights'] = None
conf['training_sets']['test']['data_name'] = None
conf['log_dirs'] = {}
conf['log_dirs']['logs'] = None
conf['log_dirs']['weights'] = None
conf['weights_paths'] = {}
conf['weights_paths']['generator'] = 'a/path/rdn-C1-D6-G1-G02-x0-weights.hdf5'
conf['weights_paths']['discriminator'] = 'a/path/rdn-weights.hdf5'
conf['session']['training']['n_validation_samples'] = None
conf['session']['training']['metrics'] = None
conf['session']['training']['learning_rate'] = {}
conf['session']['training']['adam_optimizer'] = None
conf['session']['training']['flatness'] = None
conf['session']['training']['fallback_save_every_n_epochs'] = None
conf['session']['training']['monitored_metrics'] = None
conf['losses'] = None
cls.conf = conf
def tearDownClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
('ISR.assistant._get_module', return_value=Object())
('ISR.train.trainer.Trainer', return_value=Object())
def test_run_arguments_trainer(self, trainer, _get_module):
with patch('yaml.load', return_value=self.conf):
assistant.run(config_file='tests/data/config.yml', training=True, prediction=False, default=True)
trainer.assert_called_once()
('ISR.assistant._get_module', return_value=Object())
('ISR.predict.predictor.Predictor', return_value=Object())
def test_run_arguments_predictor(self, predictor, _get_module):
with patch('yaml.load', return_value=self.conf):
assistant.run(config_file='tests/data/config.yml', training=False, prediction=True, default=True)
predictor.assert_called_once() |
class Classifier(nn.Module):
def __init__(self, num_classes, in_planes=512, visualize=False):
super(Classifier, self).__init__()
self.in_planes = in_planes
self.visualize = visualize
self.layer5 = self._make_layer(Bottleneck, 512, 2, stride=2)
self.layer6 = self._make_layer(Bottleneck, 512, 2, stride=2)
self.linear = nn.Linear(1024, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = self.layer5(x)
feature_maps = self.layer6(out)
(n, c, _, _) = feature_maps.size()
out = feature_maps.view(n, c, (- 1)).mean((- 1))
out = self.linear(out)
if self.visualize:
return (out, feature_maps)
return out |
def parse(opt_path, is_train=True):
with open(opt_path, mode='r') as f:
opt = yaml.load(f, Loader=Loader)
gpu_list = ','.join((str(x) for x in opt.get('gpu_ids', [])))
opt['is_train'] = is_train
for (phase, dataset) in opt['datasets'].items():
phase = phase.split('_')[0]
dataset['phase'] = phase
is_lmdb = False
if (dataset.get('dataroot_GT', None) is not None):
dataset['dataroot_GT'] = osp.expanduser(dataset['dataroot_GT'])
if dataset['dataroot_GT'].endswith('lmdb'):
is_lmdb = True
if (dataset.get('dataroot_LQ', None) is not None):
dataset['dataroot_LQ'] = osp.expanduser(dataset['dataroot_LQ'])
if dataset['dataroot_LQ'].endswith('lmdb'):
is_lmdb = True
dataset['data_type'] = ('lmdb' if is_lmdb else 'img')
if ('train' in opt):
niter = opt['train']['niter']
if ('T_period_rel' in opt['train']):
opt['train']['T_period'] = [int((x * niter)) for x in opt['train']['T_period_rel']]
if ('restarts_rel' in opt['train']):
opt['train']['restarts'] = [int((x * niter)) for x in opt['train']['restarts_rel']]
if ('lr_steps_rel' in opt['train']):
opt['train']['lr_steps'] = [int((x * niter)) for x in opt['train']['lr_steps_rel']]
if ('lr_steps_inverse_rel' in opt['train']):
opt['train']['lr_steps_inverse'] = [int((x * niter)) for x in opt['train']['lr_steps_inverse_rel']]
print(opt['train'])
return opt |
class BaseEvaluator():
def __init__(self):
pass
def evaluate(self, output_image, truth_image):
raise NotImplementedError |
_charset('heb')
class HebCharSet(BaseCharset):
_CHARS = u'#$&-<HSTabdghklmnpqrstwyz'
_FEATURES = [''] |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--train_file', default='data/conceptual_caption/training', type=str, help='The input train corpus.')
parser.add_argument('--validation_file', default='data/conceptual_caption/validation', type=str, help='The input train corpus.')
parser.add_argument('--pretrained_weight', default='bert-base-uncased', type=str, help='Bert pre-trained model selected in the list: bert-base-uncased, bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.')
parser.add_argument('--bert_model', default='bert-base-uncased', type=str, help='Bert pre-trained model selected in the list: bert-base-uncased, bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.')
parser.add_argument('--output_dir', default='save', type=str, help='The output directory where the model checkpoints will be written.')
parser.add_argument('--config_file', default='config/bert_config.json', type=str, help='The config file which specified the model details.')
parser.add_argument('--max_seq_length', default=36, type=int, help='The maximum total input sequence length after WordPiece tokenization. \nSequences longer than this will be truncated, and sequences shorter \nthan this will be padded.')
parser.add_argument('--predict_feature', action='store_true', help='visual target.')
parser.add_argument('--use_location', action='store_true', help='whether use location.')
parser.add_argument('--do_train', action='store_true', help='Whether to run training.')
parser.add_argument('--train_batch_size', default=512, type=int, help='Total batch size for training.')
parser.add_argument('--learning_rate', default=0.0001, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--num_train_epochs', default=10.0, type=float, help='Total number of training epochs to perform.')
parser.add_argument('--warmup_proportion', default=0.1, type=float, help='Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.')
parser.add_argument('--img_weight', default=1, type=float, help='weight for image loss')
parser.add_argument('--no_cuda', action='store_true', help='Whether not to use CUDA when available')
parser.add_argument('--on_memory', action='store_true', help='Whether to load train samples into memory or use disk')
parser.add_argument('--do_lower_case', action='store_true', help='Whether to lower case the input text. True for uncased models, False for cased models.')
parser.add_argument('--local_rank', type=int, default=(- 1), help='local_rank for distributed training on gpus')
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumualte before performing a backward/update pass.')
parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit float precision instead of 32-bit')
parser.add_argument('--loss_scale', type=float, default=0, help='Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n0 (default value): dynamic loss scaling.\nPositive power of 2: static loss scaling value.\n')
parser.add_argument('--num_workers', type=int, default=20, help='Number of workers in the dataloader.')
parser.add_argument('--from_pretrained', action='store_true', help='Wheter the tensor is from pretrained.')
parser.add_argument('--save_name', default='', type=str, help='save name for training.')
args = parser.parse_args()
print(args)
if (args.save_name is not ''):
timeStamp = args.save_name
else:
timeStamp = strftime('%d-%b-%y-%X-%a', gmtime())
timeStamp += '_{:0>6d}'.format(random.randint(0, .0))
savePath = os.path.join(args.output_dir, timeStamp)
if (not os.path.exists(savePath)):
os.makedirs(savePath)
with open(os.path.join(savePath, 'command.txt'), 'w') as f:
print(args, file=f)
print('\n', file=f)
if ((args.local_rank == (- 1)) or args.no_cuda):
device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu'))
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
logger.info('device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}'.format(device, n_gpu, bool((args.local_rank != (- 1))), args.fp16))
if (args.gradient_accumulation_steps < 1):
raise ValueError('Invalid gradient_accumulation_steps parameter: {}, should be >= 1'.format(args.gradient_accumulation_steps))
args.train_batch_size = (args.train_batch_size // args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if (n_gpu > 0):
torch.cuda.manual_seed_all(args.seed)
if (not args.do_train):
raise ValueError('Training is currently the only implemented execution option. Please set `do_train`.')
if (not os.path.exists(args.output_dir)):
os.makedirs(args.output_dir)
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
num_train_optimization_steps = None
if args.do_train:
viz = TBlogger('logs', timeStamp)
train_dataset = ConceptCapLoaderTrain(args.train_file, tokenizer, seq_len=args.max_seq_length, batch_size=args.train_batch_size, predict_feature=args.predict_feature, num_workers=args.num_workers)
validation_dataset = ConceptCapLoaderVal(args.validation_file, tokenizer, seq_len=args.max_seq_length, batch_size=args.train_batch_size, predict_feature=args.predict_feature, num_workers=args.num_workers)
num_train_optimization_steps = (int(((train_dataset.num_dataset / args.train_batch_size) / args.gradient_accumulation_steps)) * args.num_train_epochs)
if (args.local_rank != (- 1)):
num_train_optimization_steps = (num_train_optimization_steps // torch.distributed.get_world_size())
config = BertConfig.from_json_file(args.config_file)
if args.from_pretrained:
model = BertForMultiModalPreTraining.from_pretrained(args.bert_model, config)
else:
model = BertForMultiModalPreTraining(config)
if args.fp16:
model.half()
if (args.local_rank != (- 1)):
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError('Please install apex from to use distributed and fp16 training.')
model = DDP(model)
elif (n_gpu > 1):
model = torch.nn.DataParallel(model)
model.cuda()
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
if (not args.from_pretrained):
param_optimizer = list(model.named_parameters())
optimizer_grouped_parameters = [{'params': [p for (n, p) in param_optimizer if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.01}, {'params': [p for (n, p) in param_optimizer if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
else:
bert_weight_name = json.load(open((('config/' + args.pretrained_weight) + '_weight_name.json'), 'r'))
optimizer_grouped_parameters = []
for (key, value) in dict(model.named_parameters()).items():
if value.requires_grad:
if (key[12:] in bert_weight_name):
lr = (args.learning_rate * 0.1)
else:
lr = args.learning_rate
if any(((nd in key) for nd in no_decay)):
optimizer_grouped_parameters += [{'params': [value], 'lr': lr, 'weight_decay': 0.01}]
if (not any(((nd in key) for nd in no_decay))):
optimizer_grouped_parameters += [{'params': [value], 'lr': lr, 'weight_decay': 0.0}]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError('Please install apex from to use distributed and fp16 training.')
optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0)
if (args.loss_scale == 0):
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
elif args.from_pretrained:
optimizer = BertAdam(optimizer_grouped_parameters, warmup=args.warmup_proportion, t_total=num_train_optimization_steps)
else:
optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=num_train_optimization_steps)
if args.do_train:
logger.info('***** Running training *****')
logger.info(' Num examples = %d', train_dataset.num_dataset)
logger.info(' Batch size = %d', args.train_batch_size)
logger.info(' Num steps = %d', num_train_optimization_steps)
startIterID = 0
global_step = 0
masked_loss_v_tmp = 0
masked_loss_t_tmp = 0
next_sentence_loss_tmp = 0
loss_tmp = 0
start_t = timer()
model.train()
for epochId in trange(int(args.num_train_epochs), desc='Epoch'):
tr_loss = 0
(nb_tr_examples, nb_tr_steps) = (0, 0)
for (step, batch) in enumerate(train_dataset):
iterId = ((startIterID + step) + (epochId * len(train_dataset)))
batch = tuple((t.cuda(device=device, non_blocking=True) for t in batch))
(input_ids, input_mask, segment_ids, lm_label_ids, is_next, image_feat, image_loc, image_target, image_label, image_mask, image_ids) = batch
(masked_loss_t, masked_loss_v, next_sentence_loss) = model(input_ids, image_feat, image_target, image_loc, segment_ids, input_mask, image_mask, lm_label_ids, image_label, is_next)
masked_loss_v = (masked_loss_v * args.img_weight)
loss = ((masked_loss_t + masked_loss_v) + next_sentence_loss)
if (n_gpu > 1):
loss = loss.mean()
masked_loss_t = masked_loss_t.mean()
masked_loss_v = masked_loss_v.mean()
next_sentence_loss = next_sentence_loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
if math.isnan(loss.item()):
pdb.set_trace()
tr_loss += loss.item()
viz.linePlot(iterId, loss.item(), 'loss', 'train')
viz.linePlot(iterId, masked_loss_t.item(), 'masked_loss_t', 'train')
viz.linePlot(iterId, masked_loss_v.item(), 'masked_loss_v', 'train')
viz.linePlot(iterId, next_sentence_loss.item(), 'next_sentence_loss', 'train')
loss_tmp += loss.item()
masked_loss_v_tmp += masked_loss_v.item()
masked_loss_t_tmp += masked_loss_t.item()
next_sentence_loss_tmp += next_sentence_loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (((step + 1) % args.gradient_accumulation_steps) == 0):
if args.fp16:
lr_this_step = (args.learning_rate * warmup_linear((global_step / num_train_optimization_steps), args.warmup_proportion))
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
if (((step % 20) == 0) and (step != 0)):
masked_loss_t_tmp = (masked_loss_t_tmp / 20.0)
masked_loss_v_tmp = (masked_loss_v_tmp / 20.0)
next_sentence_loss_tmp = (next_sentence_loss_tmp / 20.0)
loss_tmp = (loss_tmp / 20.0)
end_t = timer()
timeStamp = strftime('%a %d %b %y %X', gmtime())
Ep = (epochId + (nb_tr_steps / float(len(train_dataset))))
printFormat = '[%s][Ep: %.2f][Iter: %d][Time: %5.2fs][Loss: %.5g][Loss_v: %.5g][Loss_t: %.5g][Loss_n: %.5g][LR: %.5g]'
printInfo = [timeStamp, Ep, nb_tr_steps, (end_t - start_t), loss_tmp, masked_loss_v_tmp, masked_loss_t_tmp, next_sentence_loss_tmp, optimizer.get_lr()[0]]
start_t = end_t
print((printFormat % tuple(printInfo)))
masked_loss_v_tmp = 0
masked_loss_t_tmp = 0
next_sentence_loss_tmp = 0
loss_tmp = 0
torch.set_grad_enabled(False)
start_t = timer()
numBatches = len(validation_dataset)
eval_masked_loss_t = 0
eval_masked_loss_v = 0
eval_next_sentence_loss = 0
eval_total_loss = 0
model.eval()
for (step, batch) in enumerate(validation_dataset):
batch = tuple((t.cuda(device=device, non_blocking=True) for t in batch))
(input_ids, input_mask, segment_ids, lm_label_ids, is_next, image_feat, image_loc, image_target, image_label, image_mask, image_ids) = batch
(masked_loss_t, masked_loss_v, next_sentence_loss) = model(input_ids, image_feat, image_target, image_loc, segment_ids, input_mask, image_mask, lm_label_ids, image_label, is_next)
masked_loss_v = (masked_loss_v * args.img_weight)
loss = ((masked_loss_t + masked_loss_v) + next_sentence_loss)
if (n_gpu > 1):
loss = loss.mean()
masked_loss_t = masked_loss_t.mean()
masked_loss_v = masked_loss_v.mean()
next_sentence_loss = next_sentence_loss.mean()
eval_masked_loss_t += masked_loss_t.item()
eval_masked_loss_v += masked_loss_v.item()
eval_next_sentence_loss += next_sentence_loss.item()
eval_total_loss += loss.item()
end_t = timer()
delta_t = (' Time: %5.2fs' % (end_t - start_t))
start_t = end_t
progressString = ("\r Evaluating split '%s' [%d/%d]\t" + delta_t)
sys.stdout.write((progressString % ('val', (step + 1), numBatches)))
sys.stdout.flush()
eval_masked_loss_t = (eval_masked_loss_t / float(numBatches))
eval_masked_loss_v = (eval_masked_loss_v / float(numBatches))
eval_next_sentence_loss = (eval_next_sentence_loss / float(numBatches))
eval_total_loss = (eval_total_loss / float(numBatches))
printFormat = 'Evaluation: [Loss: %.5g][Loss_v: %.5g][Loss_t: %.5g][Loss_n: %.5g]'
printInfo = [eval_total_loss, eval_masked_loss_t, eval_masked_loss_v, eval_next_sentence_loss]
print((printFormat % tuple(printInfo)))
torch.set_grad_enabled(True)
viz.linePlot(epochId, eval_total_loss, 'loss', 'val')
viz.linePlot(epochId, eval_masked_loss_t, 'masked_loss_t', 'val')
viz.linePlot(epochId, eval_masked_loss_v, 'masked_loss_v', 'val')
viz.linePlot(epochId, eval_next_sentence_loss, 'next_sentence_loss', 'val')
logger.info('** ** * Saving fine - tuned model ** ** * ')
model_to_save = (model.module if hasattr(model, 'module') else model)
output_model_file = os.path.join(savePath, (('pytorch_model_' + str(epochId)) + '.bin'))
if args.do_train:
torch.save(model_to_save.state_dict(), output_model_file) |
class TrueRiskEstimator(RiskEstimator):
def __init__(self, loss, dataset, model):
super().__init__(loss)
idxs = dataset.test_idxs
y_true = dataset.y[idxs]
y_pred = model.predict(dataset.x[idxs], idxs=idxs)
self.true_loss_vals = self.loss(y_pred, y_true)
self.true_loss = self.true_loss_vals.mean()
self.true_loss_all_idxs = np.zeros(dataset.N)
self.true_loss_all_idxs[idxs] = self.true_loss_vals
def estimate(self, *args):
return self.return_and_save(self.true_loss) |
def remove_b_for_nucl_phys(citation_elements):
for el in citation_elements:
if ((el['type'] == 'JOURNAL') and (el['title'] == 'Nucl.Phys.Proc.Suppl.') and ('volume' in el) and (el['volume'].startswith('b') or el['volume'].startswith('B'))):
el['volume'] = el['volume'][1:]
return citation_elements |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.