code stringlengths 101 5.91M |
|---|
def remove_symbols_and_diacritics(s: str, keep=''):
def replace_character(char):
if (char in keep):
return char
elif (char in ADDITIONAL_DIACRITICS):
return ADDITIONAL_DIACRITICS[char]
elif (unicodedata.category(char) == 'Mn'):
return ''
elif (unicodedata.category(char)[0] in 'MSP'):
return ' '
return char
return ''.join((replace_character(c) for c in unicodedata.normalize('NFKD', s))) |
_tokenizers
class CodeGenTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = CodeGenTokenizer
rust_tokenizer_class = CodeGenTokenizerFast
test_rust_tokenizer = True
from_pretrained_kwargs = {'add_prefix_space': True}
test_seq2seq = False
def setUp(self):
super().setUp()
vocab = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'G', 'Gl', 'Gn', 'Glo', 'Glow', 'er', 'Glowest', 'Gnewer', 'Gwider', '<unk>', '<|endoftext|>']
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ['#version: 0.2', 'G l', 'Gl o', 'Glo w', 'e r', '']
self.special_tokens_map = {'unk_token': '<unk>'}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write((json.dumps(vocab_tokens) + '\n'))
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(merges))
def get_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return CodeGenTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_rust_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = 'lower newer'
output_text = 'lower newer'
return (input_text, output_text)
def test_full_tokenizer(self):
tokenizer = CodeGenTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
text = 'lower newer'
bpe_tokens = ['Glow', 'er', 'G', 'n', 'e', 'w', 'er']
tokens = tokenizer.tokenize(text, add_prefix_space=True)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = (tokens + [tokenizer.unk_token])
input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
def test_rust_and_python_full_tokenizers(self):
if (not self.test_rust_tokenizer):
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True)
sequence = 'lower newer'
tokens = tokenizer.tokenize(sequence, add_prefix_space=True)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=True)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True)
ids = tokenizer.encode(sequence, add_prefix_space=True)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
input_tokens = (tokens + [rust_tokenizer.unk_token])
input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
def test_pretokenized_inputs(self, *args, **kwargs):
pass
def test_padding(self, max_length=15):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
s = 'This is a simple input'
s2 = ['This is a simple input 1', 'This is a simple input 2']
p = ('This is a simple input', 'This is a pair')
p2 = [('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2')]
self.assertRaises(ValueError, tokenizer_r.encode, s, max_length=max_length, padding='max_length')
self.assertRaises(ValueError, tokenizer_r.encode_plus, s, max_length=max_length, padding='max_length')
self.assertRaises(ValueError, tokenizer_r.batch_encode_plus, s2, max_length=max_length, padding='max_length')
self.assertRaises(ValueError, tokenizer_r.encode, p, max_length=max_length, padding='max_length')
self.assertRaises(ValueError, tokenizer_r.encode_plus, p, max_length=max_length, padding='max_length')
self.assertRaises(ValueError, tokenizer_r.batch_encode_plus, p2, max_length=max_length, padding='max_length')
def test_padding_if_pad_token_set_slow(self):
tokenizer = CodeGenTokenizer.from_pretrained(self.tmpdirname, pad_token='<pad>')
s = 'This is a simple input'
s2 = ['This is a simple input looooooooong', 'This is a simple input']
p = ('This is a simple input', 'This is a pair')
p2 = [('This is a simple input loooooong', 'This is a simple input'), ('This is a simple pair loooooong', 'This is a simple pair')]
pad_token_id = tokenizer.pad_token_id
out_s = tokenizer(s, padding='max_length', max_length=30, return_tensors='np')
out_s2 = tokenizer(s2, padding=True, truncate=True, return_tensors='np')
out_p = tokenizer(*p, padding='max_length', max_length=60, return_tensors='np')
out_p2 = tokenizer(p2, padding=True, truncate=True, return_tensors='np')
self.assertEqual(out_s['input_ids'].shape[(- 1)], 30)
self.assertTrue((pad_token_id in out_s['input_ids']))
self.assertTrue((0 in out_s['attention_mask']))
self.assertEqual(out_s2['input_ids'].shape[(- 1)], 33)
self.assertFalse((pad_token_id in out_s2['input_ids'][0]))
self.assertFalse((0 in out_s2['attention_mask'][0]))
self.assertTrue((pad_token_id in out_s2['input_ids'][1]))
self.assertTrue((0 in out_s2['attention_mask'][1]))
self.assertEqual(out_p['input_ids'].shape[(- 1)], 60)
self.assertTrue((pad_token_id in out_p['input_ids']))
self.assertTrue((0 in out_p['attention_mask']))
self.assertEqual(out_p2['input_ids'].shape[(- 1)], 52)
self.assertFalse((pad_token_id in out_p2['input_ids'][0]))
self.assertFalse((0 in out_p2['attention_mask'][0]))
self.assertTrue((pad_token_id in out_p2['input_ids'][1]))
self.assertTrue((0 in out_p2['attention_mask'][1]))
def test_add_bos_token_slow(self):
bos_token = '$$$'
tokenizer = CodeGenTokenizer.from_pretrained(self.tmpdirname, bos_token=bos_token, add_bos_token=True)
s = 'This is a simple input'
s2 = ['This is a simple input 1', 'This is a simple input 2']
bos_token_id = tokenizer.bos_token_id
out_s = tokenizer(s)
out_s2 = tokenizer(s2)
self.assertEqual(out_s.input_ids[0], bos_token_id)
self.assertTrue(all(((o[0] == bos_token_id) for o in out_s2.input_ids)))
decode_s = tokenizer.decode(out_s.input_ids)
decode_s2 = tokenizer.batch_decode(out_s2.input_ids)
self.assertEqual(decode_s.split()[0], bos_token)
self.assertTrue(all(((d.split()[0] == bos_token) for d in decode_s2)))
def test_truncation(self):
tokenizer = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono')
text = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
expected_trucated_text = '\nif len_a > len_b: result = a\nelse: result = b'
input_ids = tokenizer.encode(text)
truncation_pattern = ['^#', re.escape('<|endoftext|>'), "^'''", '^"""', '\n\n\n']
decoded_text = tokenizer.decode(input_ids, truncate_before_pattern=truncation_pattern)
self.assertEqual(decoded_text, expected_trucated_text)
def test_padding_different_model_input_name(self):
pass |
class OptimWrapper():
def __init__(self, opt, wd, true_wd: bool=False, bn_wd: bool=True):
(self.opt, self.true_wd, self.bn_wd) = (opt, true_wd, bn_wd)
self.opt_keys = list(self.opt.param_groups[0].keys())
self.opt_keys.remove('params')
self.read_defaults()
self.wd = wd
def create(cls, opt_func, lr, layer_groups, **kwargs):
split_groups = split_bn_bias(layer_groups)
opt = opt_func([{'params': trainable_params(l), 'lr': 0} for l in split_groups])
opt = cls(opt, **kwargs)
(opt.lr, opt.opt_func) = (listify(lr, layer_groups), opt_func)
return opt
def new(self, layer_groups):
opt_func = getattr(self, 'opt_func', self.opt.__class__)
return self.create(opt_func, self.lr, layer_groups, wd=self.wd, true_wd=self.true_wd, bn_wd=self.bn_wd)
def __repr__(self) -> str:
return f'''OptimWrapper over {repr(self.opt)}.
True weight decay: {self.true_wd}'''
def step(self) -> None:
if self.true_wd:
for (lr, wd, pg1, pg2) in zip(self._lr, self._wd, self.opt.param_groups[::2], self.opt.param_groups[1::2]):
for p in pg1['params']:
if (p.requires_grad is False):
continue
p.data.mul_((1 - (wd * lr)))
if self.bn_wd:
for p in pg2['params']:
if (p.requires_grad is False):
continue
p.data.mul_((1 - (wd * lr)))
self.set_val('weight_decay', listify(0, self._wd))
self.opt.step()
def zero_grad(self) -> None:
self.opt.zero_grad()
def __getattr__(self, k: str):
return getattr(self.opt, k, None)
def clear(self):
sd = self.state_dict()
sd['state'] = {}
self.load_state_dict(sd)
def lr(self) -> float:
return self._lr[(- 1)]
def lr(self, val: float) -> None:
self._lr = self.set_val('lr', listify(val, self._lr))
def mom(self) -> float:
return self._mom[(- 1)]
def mom(self, val: float) -> None:
if ('momentum' in self.opt_keys):
self.set_val('momentum', listify(val, self._mom))
elif ('betas' in self.opt_keys):
self.set_val('betas', (listify(val, self._mom), self._beta))
self._mom = listify(val, self._mom)
def beta(self) -> float:
return (None if (self._beta is None) else self._beta[(- 1)])
def beta(self, val: float) -> None:
if (val is None):
return
if ('betas' in self.opt_keys):
self.set_val('betas', (self._mom, listify(val, self._beta)))
elif ('alpha' in self.opt_keys):
self.set_val('alpha', listify(val, self._beta))
self._beta = listify(val, self._beta)
def wd(self) -> float:
return self._wd[(- 1)]
def wd(self, val: float) -> None:
if (not self.true_wd):
self.set_val('weight_decay', listify(val, self._wd), bn_groups=self.bn_wd)
self._wd = listify(val, self._wd)
def read_defaults(self) -> None:
self._beta = None
if ('lr' in self.opt_keys):
self._lr = self.read_val('lr')
if ('momentum' in self.opt_keys):
self._mom = self.read_val('momentum')
if ('alpha' in self.opt_keys):
self._beta = self.read_val('alpha')
if ('betas' in self.opt_keys):
(self._mom, self._beta) = self.read_val('betas')
if ('weight_decay' in self.opt_keys):
self._wd = self.read_val('weight_decay')
def set_val(self, key: str, val, bn_groups: bool=True):
if is_tuple(val):
val = [(v1, v2) for (v1, v2) in zip(*val)]
for (v, pg1, pg2) in zip(val, self.opt.param_groups[::2], self.opt.param_groups[1::2]):
pg1[key] = v
if bn_groups:
pg2[key] = v
return val
def read_val(self, key: str):
val = [pg[key] for pg in self.opt.param_groups[::2]]
if is_tuple(val[0]):
val = ([o[0] for o in val], [o[1] for o in val])
return val |
def convert_xvector(base_model_name, hf_config, downstream_dict):
model = WavLMForXVector.from_pretrained(base_model_name, config=hf_config)
model.projector.weight.data = downstream_dict['connector.weight']
model.projector.bias.data = downstream_dict['connector.bias']
for (i, kernel_size) in enumerate(hf_config.tdnn_kernel):
model.tdnn[i].kernel.weight.data = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.weight']
model.tdnn[i].kernel.bias.data = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias']
model.feature_extractor.weight.data = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
model.feature_extractor.bias.data = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
model.classifier.weight.data = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
model.classifier.bias.data = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
model.objective.weight.data = downstream_dict['objective.W']
return model |
def load_all_image_paths_track_val(opt, phase):
image_dir = ((opt.ImagesRoot + phase) + '/')
inpainted_back_dir = ((opt.BackRoot + phase) + '/')
instance_gt_dir = ((opt.InstanceGTRoot + phase) + '/')
instance_mask_rcnn_dir = ((opt.Instance_maskrcnn + phase) + '/')
semantic_psp_dir = ((opt.SemanticRoot + phase) + '/')
semantic_gt_dir = ((opt.SemanticGTRoot + phase) + '/')
depth_dir = ((opt.DepthMapRoot + phase) + '/')
track_instance_dir = opt.TrackInstanceRoot
city_dir = os.listdir(image_dir)
city_dir.sort()
cnt = 0
video = []
for i in range(len(city_dir)):
frame_dir = (image_dir + city_dir[i])
back_dir = (inpainted_back_dir + city_dir[i])
frame_depth_dir = (depth_dir + city_dir[i])
frame_list = os.listdir(frame_dir)
frame_list.sort()
for j in range((len(frame_list) // 30)):
image = []
back = []
instance = []
semantic = []
instance_npy = []
depth = []
for k in range(((j * 30) + 0), ((j + 1) * 30)):
full_image_path = ((frame_dir + '/') + frame_list[k])
assert os.path.isfile(full_image_path)
full_back_path = ((back_dir + '/') + frame_list[k])
full_instance_path = (((instance_mask_rcnn_dir + city_dir[i]) + '/') + frame_list[k])
full_semantic_path = (((semantic_psp_dir + city_dir[i]) + '/') + frame_list[k])
full_depth_path = (((frame_depth_dir + '/') + frame_list[k][:(- 3)]) + 'npy')
image.append(full_image_path)
back.append(full_back_path)
instance.append(full_instance_path)
semantic.append(full_semantic_path)
depth.append(full_depth_path)
track_instance_video_dir = (track_instance_dir + ('%04d/' % cnt))
cnt = (cnt + 1)
track_dict = load_tracked_dict_val(track_instance_video_dir)
video.append((image, back, instance, semantic, track_dict, depth))
return video |
def getDataLoader(batch_size, num_of_questions, max_step):
handle = DataReader('dataset/assist2009/builder_train.csv', 'dataset/assist2009/builder_test.csv', max_step, num_of_questions)
dtrain = torch.tensor(handle.getTrainData().astype(float).tolist(), dtype=torch.float32)
dtest = torch.tensor(handle.getTestData().astype(float).tolist(), dtype=torch.float32)
trainLoader = Data.DataLoader(dtrain, batch_size=batch_size, shuffle=True)
testLoader = Data.DataLoader(dtest, batch_size=batch_size, shuffle=False)
return (trainLoader, testLoader) |
class AvKeyframeVideoCompressor(VideoLoader):
def __init__(self, csv=None, video_dict=None, framerate=1, size=112, centercrop=False, max_num_frames=5, **kwargs):
super().__init__(csv, video_dict, framerate, size, centercrop)
self.max_num_frames = max_num_frames
def _get_video_dim(self, video_fn):
import av
with av.open(video_fn) as container:
height = container.streams.video[0].codec_context.height
width = container.streams.video[0].codec_context.width
return (height, width)
def _get_output_dim(self, height, width):
if (height >= width):
return (int(((height * self.size) / width)), self.size)
else:
return (self.size, int(((width * self.size) / height)))
def __getitem__(self, idx):
import av
video_path = self.csv['video_path'].values[idx]
output_file = self.csv['feature_path'].values[idx]
if ((not os.path.isdir(output_file)) and os.path.isfile(video_path)):
try:
(h, w) = self._get_video_dim(video_path)
except Exception:
print('probe failed at: {}'.format(video_path))
return {'video': th.zeros(1), 'input': video_path, 'output': output_file}
try:
(height, width) = self._get_output_dim(h, w)
with av.open(video_path) as container:
container.streams.video[0].thread_type = 'AUTO'
container.streams.video[0].codec_context.height = height
container.streams.video[0].codec_context.width = width
if (self.framerate == 0):
container.streams.video[0].codec_context.skip_frame = 'NONKEY'
frames = []
for frame in container.decode(video=0):
frames.append(frame)
frames = random.sample(frames, self.max_num_frames)
os.makedirs(output_file, exist_ok=True)
for frame in frames:
frame.to_image().save(os.path.join(output_file, ('%04d.jpg' % frame.index)))
except Exception:
print('extract failed at: {}'.format(video_path))
return {'video': th.zeros(1), 'input': video_path, 'output': output_file}
video = th.zeros(1)
return {'video': video, 'input': video_path, 'output': output_file} |
class MultiSumBlock(PlainNetBasicBlockClass):
def __init__(self, block_list, no_create=False, **kwargs):
super(MultiSumBlock, self).__init__(**kwargs)
self.block_list = block_list
if (not no_create):
self.module_list = nn.ModuleList(block_list)
self.in_channels = np.max([x.in_channels for x in block_list])
self.out_channels = np.max([x.out_channels for x in block_list])
self.no_create = no_create
res = 1024
res = self.block_list[0].get_output_resolution(res)
self.stride = (1024 // res)
def forward(self, x):
output = self.block_list[0](x)
for inner_block in self.block_list[1:]:
output2 = inner_block(x)
output = (output + output2)
return output
def __str__(self):
s = 'MultiSumBlock({}|'.format(self.block_name)
for inner_block in self.block_list:
s += (str(inner_block) + ';')
s = s[:(- 1)]
s += ')'
return s
def __repr__(self):
return str(self)
def get_output_resolution(self, input_resolution):
the_res = self.block_list[0].get_output_resolution(input_resolution)
for the_block in self.block_list:
assert (the_res == the_block.get_output_resolution(input_resolution))
return the_res
def get_FLOPs(self, input_resolution):
the_flops = 0
for the_block in self.block_list:
the_flops += the_block.get_FLOPs(input_resolution)
return the_flops
def get_model_size(self):
the_size = 0
for the_block in self.block_list:
the_size += the_block.get_model_size()
return the_size
def set_in_channels(self, c):
self.in_channels = c
for the_block in self.block_list:
the_block.set_in_channels(c)
def create_from_str(cls, s, no_create=False, **kwargs):
assert MultiSumBlock.is_instance_from_str(s)
idx = _get_right_parentheses_index_(s)
assert (idx is not None)
param_str = s[len('MultiSumBlock('):idx]
tmp_idx = param_str.find('|')
if (tmp_idx < 0):
tmp_block_name = 'uuid{}'.format(uuid.uuid4().hex)
else:
tmp_block_name = param_str[0:tmp_idx]
param_str = param_str[(tmp_idx + 1):]
the_s = param_str
the_block_list = []
while (len(the_s) > 0):
(tmp_block_list, remaining_s) = _create_netblock_list_from_str_(the_s, no_create=no_create)
the_s = remaining_s
if (tmp_block_list is None):
pass
elif (len(tmp_block_list) == 1):
the_block_list.append(tmp_block_list[0])
else:
the_block_list.append(Sequential(block_list=tmp_block_list, no_create=no_create))
pass
if (len(the_block_list) == 0):
return (None, s[(idx + 1):])
return (MultiSumBlock(block_list=the_block_list, block_name=tmp_block_name, no_create=no_create), s[(idx + 1):]) |
def test_diskdf_method_inputAsQuantity_special():
from galpy.df import dehnendf, shudf
from galpy.util import conversion
(ro, vo) = (7.0, 230.0)
df = dehnendf(ro=ro, vo=vo)
dfnou = dehnendf()
dfs = shudf(ro=ro, vo=vo)
dfsnou = shudf()
assert (numpy.fabs((df((((0.6 * (vo ** 2.0)) * (units.km ** 2)) / (units.s ** 2)), (((((1.1 * vo) * ro) * units.kpc) * units.km) / units.s)).to(((1 / (units.kpc ** 2)) / ((units.km / units.s) ** 2))).value - ((dfnou(0.6, 1.1) / (vo ** 2)) / (ro ** 2)))) < (10.0 ** (- 6.0))), 'diskdf method __call__ with Quantity input does not return correct Quantity'
assert (numpy.fabs((dfs((((0.6 * (vo ** 2.0)) * (units.km ** 2)) / (units.s ** 2)), (((((1.1 * vo) * ro) * units.kpc) * units.km) / units.s)).to(((1 / (units.kpc ** 2)) / ((units.km / units.s) ** 2))).value - ((dfsnou(0.6, 1.1) / (vo ** 2)) / (ro ** 2)))) < (10.0 ** (- 6.0))), 'diskdf method __call__ with Quantity input does not return correct Quantity'
assert (numpy.fabs((df.targetSurfacemassLOS(((1.2 * ro) * units.kpc), (40.0 * units.deg)).to((units.Msun / units.pc)).value - (((dfnou.targetSurfacemassLOS(1.2, 40.0) * conversion.surfdens_in_msolpc2(vo, ro)) * ro) * 1000.0))) < (10.0 ** (- 8.0))), 'diskdf method targetSurfacemassLOS with Quantity input does not return correct Quantity'
assert (numpy.fabs((df.surfacemassLOS(((1.2 * ro) * units.kpc), (35.0 * units.deg)).to((units.Msun / units.pc)).value - (((dfnou.surfacemassLOS(1.2, 35.0) * conversion.surfdens_in_msolpc2(vo, ro)) * ro) * 1000.0))) < (10.0 ** (- 8.0))), 'diskdf method surfacemassLOS does with Quantity input not return correct Quantity'
assert (numpy.fabs((df.vmomentsurfacemass(1.1, 0, 0, ro=(9.0 * units.kpc), vo=((245.0 * units.km) / units.s)).to((units.Msun / (units.pc ** 2))).value - (dfnou.vmomentsurfacemass(1.1, 0, 0) * conversion.surfdens_in_msolpc2(245, 9.0)))) < (10.0 ** (- 8.0))), 'diskdf method vmomentsurfacemass does with Quantity input not return correct Quantity'
return None |
def standard_retrieve(nbt, dim):
from ast import literal_eval
from phcpy.phcpy2c3 import py2c_numbtrop_standard_retrieve as load
(fail, strdata) = load(nbt, dim)
data = literal_eval(strdata)
wnd = [int(data[k]) for k in range(nbt)]
dirs = []
for i in range(nbt):
dirs.append([data[((nbt + (i * dim)) + j)] for j in range(dim)])
err = [data[((nbt * (dim + 1)) + k)] for k in range(nbt)]
return (wnd, dirs, err) |
class MUSTC(Dataset):
SPLITS = ['train', 'dev', 'tst-COMMON', 'tst-HE']
LANGUAGES = ['de', 'es', 'fr', 'it', 'nl', 'pt', 'ro', 'ru']
def __init__(self, root: str, lang: str, split: str) -> None:
assert ((split in self.SPLITS) and (lang in self.LANGUAGES))
_root = (((Path(root) / f'en-{lang}') / 'data') / split)
(wav_root, txt_root) = ((_root / 'wav'), (_root / 'txt'))
assert (_root.is_dir() and wav_root.is_dir() and txt_root.is_dir())
try:
import yaml
except ImportError:
print('Please install PyYAML to load the MuST-C YAML files')
with open((txt_root / f'{split}.yaml')) as f:
segments = yaml.load(f, Loader=yaml.BaseLoader)
for _lang in ['en', lang]:
with open((txt_root / f'{split}.{_lang}')) as f:
utterances = [r.strip() for r in f]
assert (len(segments) == len(utterances))
for (i, u) in enumerate(utterances):
segments[i][_lang] = u
self.data = []
for (wav_filename, _seg_group) in groupby(segments, (lambda x: x['wav'])):
wav_path = (wav_root / wav_filename)
sample_rate = sf.info(wav_path.as_posix()).samplerate
seg_group = sorted(_seg_group, key=(lambda x: x['offset']))
for (i, segment) in enumerate(seg_group):
offset = int((float(segment['offset']) * sample_rate))
n_frames = int((float(segment['duration']) * sample_rate))
_id = f'{wav_path.stem}_{i}'
self.data.append((wav_path.as_posix(), offset, n_frames, sample_rate, segment['en'], segment[lang], segment['speaker_id'], _id))
def __getitem__(self, n: int) -> Tuple[(torch.Tensor, int, str, str, str, str)]:
(wav_path, offset, n_frames, sr, src_utt, tgt_utt, spk_id, utt_id) = self.data[n]
(waveform, _) = get_waveform(wav_path, frames=n_frames, start=offset)
waveform = torch.from_numpy(waveform)
return (waveform, sr, src_utt, tgt_utt, spk_id, utt_id)
def __len__(self) -> int:
return len(self.data) |
class CHMMArguments():
train_path: Optional[str] = field(default='', metadata={'help': 'training data name'})
valid_path: Optional[str] = field(default='', metadata={'help': 'development data name'})
test_path: Optional[str] = field(default='', metadata={'help': 'test data name'})
output_dir: Optional[str] = field(default='.', metadata={'help': 'The output folder where the model predictions and checkpoints will be written.'})
save_dataset: Optional[bool] = field(default=False, metadata={'help': 'Whether save the datasets used for training & validation & test'})
save_dataset_to_data_dir: Optional[bool] = field(default=False, metadata={'help': 'Whether save the datasets to the original dataset folder. If not, the dataset would be saved to the result folder.'})
load_preprocessed_dataset: Optional[bool] = field(default=False, metadata={'help': 'Whether load the pre-processed datasets from disk'})
track_training_time: Optional[bool] = field(default=False, metadata={'help': 'Whether track training time in log files'})
trans_nn_weight: Optional[float] = field(default=1.0, metadata={'help': 'the weight of neural part in the transition matrix'})
no_neural_emiss: Optional[bool] = field(default=False, metadata={'help': 'Not use neural networks to predict emission probabilities.'})
emiss_nn_weight: Optional[float] = field(default=1.0, metadata={'help': 'the weight of neural part in the emission matrix'})
num_lm_train_epochs: Optional[int] = field(default=15, metadata={'help': 'number of denoising model training epochs'})
num_lm_nn_pretrain_epochs: Optional[int] = field(default=5, metadata={'help': 'number of denoising model pre-training epochs'})
num_lm_valid_tolerance: Optional[int] = field(default=10, metadata={'help': 'How many tolerance epochs before quiting training'})
hmm_lr: Optional[float] = field(default=0.01, metadata={'help': 'learning rate of the original hidden markov model transition and emission'})
nn_lr: Optional[float] = field(default=0.001, metadata={'help': 'learning rate of the neural networks in CHMM'})
lm_batch_size: Optional[int] = field(default=128, metadata={'help': 'denoising model training batch size'})
obs_normalization: Optional[bool] = field(default=False, metadata={'help': 'whether normalize observations'})
bert_model_name_or_path: Optional[str] = field(default='', metadata={'help': 'Path to pretrained BERT model or model identifier from huggingface.co/models; Used to construct BERT embeddings if not exist'})
no_cuda: Optional[bool] = field(default=False, metadata={'help': 'Disable CUDA even when it is available'})
log_dir: Optional[str] = field(default=None, metadata={'help': "the directory of the log file. Set to '' to disable logging"})
seed: Optional[int] = field(default=42, metadata={'help': 'Random seed that will be set at the beginning of training.'})
debug_mode: Optional[bool] = field(default=False, metadata={'help': 'Debugging mode with fewer training data'})
_property
def _setup_devices(self) -> 'torch.device':
if (self.no_cuda or (not torch.cuda.is_available())):
device = torch.device('cpu')
self._n_gpu = 0
else:
device = torch.device('cuda')
self._n_gpu = 1
return device
def device(self) -> 'torch.device':
return self._setup_devices
def n_gpu(self) -> 'int':
_ = self._setup_devices
return self._n_gpu |
class HtmlReport(EventSink):
folder_name = 'htmlreport'
def __init__(self, dataroot):
self.dataroot = dataroot
self.data = {}
os.makedirs(os.path.join(dataroot, self.folder_name), exist_ok=True)
def load_epochs_data(self, epochs, consts):
assert (not self.data)
for (i, data) in enumerate(epochs):
self._store_epoch_data(i, data, consts)
self.render((len(epochs) - 1))
return self
def _store_epoch_data(self, epoch, data, consts):
assert (epoch >= 0)
for (key, item) in data.items():
(*key, key2) = key.split('/', 2)
key = '/'.join(key)
if (key not in self.data):
self.data[key] = {}
for (subkey, subitem) in item['data'].items():
subkey = ('%s/%s' % (key2, subkey))
if (subkey not in self.data[key]):
self.data[key][subkey] = {'data': [], 'subtype': (item['dtype'].rsplit('/', 1)[1] if ('/' in item['dtype']) else '')}
if item['dtype'].startswith('scalar/'):
if (not isinstance(subitem, (list, np.ndarray))):
self.data[key][subkey]['plot_type'] = 'curve'
self.data[key][subkey]['data'].append(((epoch + 1), subitem))
else:
if (not isinstance(subitem, np.ndarray)):
subitem = np.array(subitem)
subitem = subitem[(~ np.isnan(subitem))]
(values, bins) = np.histogram(subitem, bins=20)
centers = ((bins[1:] + bins[:(- 1)]) / 2)
self.data[key][subkey]['plot_type'] = 'distribution'
self.data[key][subkey]['data'].append(((epoch + 1), centers, values, np.mean(subitem)))
elif item['dtype'].startswith('weight/'):
self.data[key][subkey]['plot_type'] = 'histogram'
for (i, subitem_item) in enumerate(subitem):
self.data[key][subkey]['data'].append((((epoch + ((item['relative_iteration'][i] + 1) / item['epoch_size'])),) + subitem_item))
elif (item['dtype'] == 'blob'):
self.data[key][subkey]['plot_type'] = 'thumbnail_set'
for (i, subitem_item) in enumerate(subitem):
self.data[key][subkey]['data'].append({**subitem_item, 'epoch': epoch, 'iteration': item['relative_iteration'][i], 'thumbnail': self._square_thumbnail(subitem_item['path'], 200)})
for (key, item) in consts.items():
(*key, key2) = key.split('/', 2)
key = '/'.join(key)
if ((key not in self.data) and (item['dtype'] == 'blob')):
self.data[key] = {}
for (subkey, subitem) in item['data'].items():
subkey = ('%s/%s' % (key2, subkey))
self.data[key][subkey] = {**subitem, 'plot_type': 'thumbnail', 'thumbnail': self._square_thumbnail(subitem['path'], 200)}
def register_epoch_data(self, epoch, data, consts):
self._store_epoch_data(epoch, data, consts)
self.render(epoch)
def _square_thumbnail(self, path, size):
thumb = ('%s.thumb.%s' % tuple(os.path.basename(path).rsplit('.', 1)))
if (not os.path.exists(os.path.join(self.dataroot, self.folder_name, thumb))):
img = Image.open(path)
if (min(img.size) < size):
(diff0, diff1) = (max((size - img.size[0]), 0), max((size - img.size[1]), 0))
newimg = Image.new('RGBA', (size, size), (255, 255, 255, 0))
newimg.paste(img, ((diff0 // 2), (diff1 // 2)))
img = newimg
(diff0, diff1) = (((img.size[0] - min(img.size)) / 2), ((img.size[1] - min(img.size)) / 2))
img = img.crop((diff0, diff1, (min(img.size) + diff0), (min(img.size) + diff1)))
img.thumbnail((size, size))
img.save(os.path.join(self.dataroot, self.folder_name, thumb))
return thumb
def render(self, epoch):
name = os.path.basename(os.path.dirname(os.path.abspath(self.dataroot)))
html = {'name': ("<div style='word-break: break-word;'>Epoch %s of %s</div>" % ((epoch + 1), name)), 'data': [], 'type': 'rows'}
order = {'train/learning': 0, 'val/learning': 1, 'train/net': 2, 'net': 3, 'train/data': 4}
sets = {}
for (key, item) in sorted(self.data.items(), key=(lambda x: order.get(x[0], 100))):
section = []
for (subkey, subitem) in item.items():
fname = ('%s_%s_%%s.png' % (key.replace('/', '_'), subkey.replace('/', '_')))
if (subitem['plot_type'] == 'curve'):
fname %= 'plot'
self.store_plot(fname, subitem['data'], subitem['subtype'])
elif (subitem['plot_type'] == 'distribution'):
fname %= 'dist'
self.store_distribution(fname, subitem['data'], subitem['subtype'])
elif (subitem['plot_type'] == 'histogram'):
fname %= 'hist'
self.store_histogram(fname, subitem['data'], subitem['subtype'])
elif (subitem['plot_type'] == 'thumbnail'):
fname = os.path.relpath(subitem['path'], os.path.join(self.dataroot, self.folder_name))
section.append({'type': 'blocks', 'name': subkey.replace('/', '<br />'), 'data': [{'type': 'image', 'source': subitem['thumbnail'], 'link': fname, 'size': 200}]})
continue
elif (subitem['plot_type'] == 'thumbnail_set'):
if (key not in sets):
sets[key] = {}
for singleimg in subitem['data']:
name = ('Epoch %s (iter %s)' % ((singleimg['epoch'] + 1), (singleimg['iteration'] + 1)))
if (name not in sets[key]):
sets[key][name] = []
section.append({'type': 'blocks', 'name': name, 'data': sets[key][name]})
fname = os.path.relpath(singleimg['path'], os.path.join(self.dataroot, self.folder_name))
sets[key][name].append({'type': 'blocks', 'name': subkey.replace('/', '<br />'), 'data': [{'type': 'image', 'source': singleimg['thumbnail'], 'link': fname, 'size': 200}]})
continue
else:
continue
h_name = ("<div style='max-width: 300px; word-break: break-word;'>%s</div>" % subkey.replace('/', '<br />'))
section.append({'type': 'blocks', 'name': h_name, 'data': [{'type': 'image', 'source': fname, 'link': fname, 'size': 300}]})
html['data'].append({'name': key, 'data': section, 'type': 'blocks', 'css': 'margin: 0 3pt 0 3pt;'})
with open(os.path.join(self.dataroot, self.folder_name, 'index.html'), 'w') as handle:
css = '\n only screen and (min-resolution: 200dpi) {\n body { zoom: 0.65; }\n }\n '
handle.write(pres.Document().struct2html(html, css=css))
def store_plot(self, fname, data, ylabel):
plt.figure(figsize=(6, 4))
plt.ylabel(ylabel)
plots.plot_curve(data, plt.gca())
plt.savefig(os.path.join(self.dataroot, self.folder_name, fname), transparent=True, bbox_inches='tight')
plt.close()
def store_distribution(self, fname, data, ylabel):
plt.figure(figsize=(6, 4))
plt.ylabel(ylabel)
ax = plt.gca()
if (len(data[0]) == 4):
plots.plot_curve([(i, z) for (i, x, y, z) in data], plt.gca())
plots.plot_distribution([x[:3] for x in data], plt.gca())
else:
plots.plot_distribution(data, plt.gca())
plt.savefig(os.path.join(self.dataroot, self.folder_name, fname), transparent=True, bbox_inches='tight')
plt.close()
def store_histogram(self, fname, data, ylabel):
plt.figure(figsize=(6, 5))
plt.xlabel(ylabel)
ax = plt.gca()
plots.plot_histogram(data, plt.gca())
plt.savefig(os.path.join(self.dataroot, self.folder_name, fname), transparent=True, bbox_inches='tight')
plt.close() |
_model_architecture(model_name='head_selection_s2t_transformer', arch_name='head_selection_s2t_transformer')
def base_architecture(args):
s2t_base_architecture(args)
args.encoder_attn_head_select = getattr(args, 'encoder_attn_head_select', False)
args.decoder_self_attn_head_select = getattr(args, 'decoder_self_attn_head_select', False)
args.dec_enc_attn_head_select = getattr(args, 'dec_enc_attn_head_select', False)
args.total_encoder_attention_heads = getattr(args, 'total_encoder_attention_heads', 8)
args.total_decoder_attention_heads = getattr(args, 'total_decoder_attention_heads', 8)
args.attn_head_select_strategy = getattr(args, 'attn_head_select_strategy', 'group') |
def binary(o1, o2, step, op='NONE'):
if is_fixed(o1):
val = simplify(o1['z3']).as_long()
if ((op in ['MUL', 'AND', 'DIV', 'SDIV']) and (0 == val)):
return {'type': 'constant', 'step': step, 'z3': BitVecVal(0, 256)}
if ((op in ['XOR', 'ADD']) and (0 == val)):
return o2
if is_fixed(o2):
val = simplify(o2['z3']).as_long()
if ((op in ['MUL', 'AND', 'DIV', 'SDIV']) and (0 == val)):
return {'type': 'constant', 'step': step, 'z3': BitVecVal(0, 256)}
if ((op in ['XOR', 'ADD']) and (0 == val)):
return o1
if (is_undefined(o1) or is_undefined(o2)):
return {'type': 'undefined', 'step': step}
z1 = simplify(o1['z3'])
z2 = simplify(o2['z3'])
if (op == 'AND'):
z3 = (z1 & z2)
elif (op == 'OR'):
z3 = (z1 | z2)
elif (op == 'XOR'):
z3 = (z1 ^ z2)
elif (op == 'ADD'):
z3 = (z1 + z2)
elif (op == 'SUB'):
z3 = (z1 - z2)
elif (op == 'EXP'):
if (is_bv_value(z1) and is_bv_value(z2)):
z3 = BitVecVal(power(z1.as_long(), z2.as_long(), (2 ** 256)), 256)
else:
return {'type': 'undefined', 'step': step}
elif (op == 'DIV'):
z3 = UDiv(z1, z2)
elif (op == 'SDIV'):
z3 = (z1 / z2)
elif (op == 'MOD'):
z3 = URem(z1, z2)
elif (op == 'SMOD'):
z3 = (z1 % z2)
elif (op == 'MUL'):
z3 = (z1 * z2)
elif (op == 'GT'):
z3 = If(UGT(z1, z2), BitVecVal(1, 256), BitVecVal(0, 256))
elif (op == 'SGT'):
z3 = If((z1 > z2), BitVecVal(1, 256), BitVecVal(0, 256))
elif (op == 'LT'):
z3 = If(ULT(z1, z2), BitVecVal(1, 256), BitVecVal(0, 256))
elif (op == 'SLT'):
z3 = If((z1 < z2), BitVecVal(1, 256), BitVecVal(0, 256))
elif (op == 'EQ'):
global last_eq_step, last_eq_func
if (is_bv_value(z1) and (z1.as_long() < (2 ** 32)) and (z1.as_long() > (2 ** 28))):
MyGlobals.last_eq_step = step
MyGlobals.last_eq_func = z1.as_long()
if (is_bv_value(z2) and (z2.as_long() < (2 ** 32)) and (z2.as_long() > (2 ** 28))):
MyGlobals.last_eq_step = step
MyGlobals.last_eq_func = z2.as_long()
z3 = If((z1 == z2), BitVecVal(1, 256), BitVecVal(0, 256))
else:
print(('did not process binary operation %s ' % op))
print(o1)
print(o2)
return {'type': 'undefined', 'step': step}
return {'type': 'constant', 'step': step, 'z3': z3} |
def test_python_to_cpp_to_python_from_process():
assert (_run_in_process(_python_to_cpp_to_python) == 0) |
def _check_params(start, end, include_start, include_end):
if ((start is None) and (include_start is False)):
raise ValueError('include_start should be True given start=None')
if ((end is None) and (include_end is False)):
raise ValueError('include_end should be True given end=None')
if isinstance(start, str):
if (start not in UNet.layer_dimension.keys()):
raise ValueError(start)
if isinstance(end, str):
if (end not in UNet.layer_dimension.keys()):
raise ValueError(end)
if (isinstance(start, str) and isinstance(end, str)):
if (arch_order(start) > arch_order(end)):
raise ValueError((start, end)) |
class CrossEntropyLoss(torch.autograd.Function):
def forward(ctx, logits, labels, smoothing, lse_square_scale=0.0, ignored_index=(- 100), inplace_backward=False, process_group=None):
(n_rows, n_cols) = logits.shape
assert (labels.shape == (n_rows,))
world_size = (1 if (process_group is None) else torch.distributed.get_world_size(process_group))
total_classes = (world_size * n_cols)
rank = (0 if (process_group is None) else torch.distributed.get_rank(process_group))
class_start_idx = (rank * n_cols)
if (logits.stride((- 1)) != 1):
logits = logits.contiguous()
MAX_BLOCK_SIZE = (64 * 1024)
BLOCK_SIZE = min(triton.next_power_of_2(n_cols), MAX_BLOCK_SIZE)
num_warps = (4 if (BLOCK_SIZE < 2048) else (8 if (BLOCK_SIZE < 8192) else (16 if (BLOCK_SIZE < (128 * 1024)) else 32)))
split = ((world_size > 1) or (n_cols > MAX_BLOCK_SIZE))
n_splits = (((n_cols + BLOCK_SIZE) - 1) // BLOCK_SIZE)
loss_shape = ((n_splits, n_rows) if (n_splits > 1) else (n_rows,))
losses = torch.empty(*loss_shape, dtype=torch.float, device=logits.device)
lse = torch.empty(*loss_shape, dtype=torch.float, device=logits.device)
with torch.cuda.device(logits.device.index):
cross_entropy_fwd_kernel[(n_rows, n_splits)](losses, lse, logits, labels, smoothing, lse_square_scale, ignored_index, total_classes, class_start_idx, n_cols, n_rows, logits.stride(0), BLOCK_SIZE=BLOCK_SIZE, num_warps=num_warps, SPLIT=split)
if split:
if (world_size > 1):
lse_allgather = torch.empty(world_size, n_rows, dtype=lse.dtype, device=lse.device)
torch.distributed.all_gather_into_tensor(lse_allgather, lse, group=process_group)
handle_losses = torch.distributed.all_reduce(losses, op=torch.distributed.ReduceOp.SUM, group=process_group, async_op=True)
lse = torch.logsumexp(lse_allgather, dim=0)
handle_losses.wait()
else:
lse = torch.logsumexp(lse, dim=0)
losses = losses.sum(dim=0)
losses += lse
if (lse_square_scale != 0.0):
losses += (lse_square_scale * lse.square())
losses.masked_fill_((labels == ignored_index), 0.0)
ctx.save_for_backward(logits, lse, labels)
ctx.smoothing = smoothing
ctx.lse_square_scale = lse_square_scale
ctx.ignored_index = ignored_index
ctx.total_classes = total_classes
ctx.class_start_idx = class_start_idx
ctx.inplace_backward = inplace_backward
return losses
def backward(ctx, grad_losses):
(logits, lse, labels) = ctx.saved_tensors
dlogits = (logits if ctx.inplace_backward else torch.empty_like(logits))
(n_rows, n_cols) = logits.shape
BLOCK_SIZE = min(triton.next_power_of_2(n_cols), (4 * 1024))
num_warps = (4 if (BLOCK_SIZE < 2048) else (8 if (BLOCK_SIZE < 8192) else 16))
grid = (lambda META: (n_rows, triton.cdiv(n_cols, META['BLOCK_SIZE'])))
with torch.cuda.device(logits.device.index):
cross_entropy_bwd_kernel[grid](dlogits, grad_losses, logits, lse, labels, ctx.smoothing, ctx.lse_square_scale, ctx.ignored_index, ctx.total_classes, ctx.class_start_idx, n_cols, logits.stride(0), dlogits.stride(0), grad_losses.stride(0), BLOCK_SIZE=BLOCK_SIZE, num_warps=num_warps)
return (dlogits, None, None, None, None, None, None, None) |
def quaddobl_next_loop(hom, idx, sols, verbose=False):
from phcpy.solver import number_of_symbols
result = []
dim = (number_of_symbols(hom) - 1)
quaddobl_set_parameter_homotopy(hom, idx, verbose)
(idx, tval) = (0, 0.0)
fmt = 'pole step : %.3e, estimated distance : %.3e, Hessian step : %.3e'
for sol in sols:
idx = (idx + 1)
print('tracking solution path', idx, '...')
quaddobl_set_solution(dim, sol, verbose)
while True:
answer = input('next predictor-corrector step ? (y/n) ')
if (answer != 'y'):
result.append(sol)
break
else:
quaddobl_predict_correct(verbose)
sol = quaddobl_get_solution(verbose)
print(sol)
polestep = quaddobl_pole_step()
estidist = quaddobl_estimated_distance()
curvstep = quaddobl_hessian_step()
print((fmt % (polestep, estidist, curvstep)))
previoustval = tval
(tval, step) = (quaddobl_t_value(), quaddobl_step_size())
frp = quaddobl_pole_radius()
print(('t : %.3e, step : %.3e, frp : %.3e' % (tval, step, frp)))
cfp = quaddobl_closest_pole()
print('For the previous t value', previoustval, ':')
print('1) closest pole : ', cfp)
print('2) the series:', quaddobl_series_coefficients(dim))
print('3) Pade vector:', quaddobl_pade_vector(dim))
print('4) poles:', quaddobl_poles(dim))
return result |
class AVATAR_OT_WearCloth(bpy.types.Operator):
bl_idname = 'avt.wear_cloth'
bl_label = 'Wear Cloth'
bl_description = 'Dress human with selected cloth'
def execute(self, context):
global avt_path
scn = context.scene
obj = context.active_object
iconname = bpy.context.scene.avt_thumbnails
iconname = iconname.split('.')[0]
for o in bpy.context.scene.objects:
o.select_set(False)
c_file = ('%s/dressing/models/clothes/%s.obj' % (avt_path, iconname))
dressing.load_cloth(c_file, iconname)
cloth = bpy.data.objects[iconname]
cloth.select_set(True)
import material_utils
importlib.reload(material_utils)
mat_id = dressing.get_material_id(iconname)
cloth_mat = material_utils.create_material_generic(iconname, 0, mat_id)
(tex_img, tex_norm, tex_spec) = dressing.read_file_textures(avt_path, iconname)
material_utils.assign_textures_generic_mat(cloth, cloth_mat, tex_img, tex_norm, tex_spec)
return {'FINISHED'} |
_config
def gsn_side_fcn5s():
cfg = {'learner': {'model': 'GenericSidetuneNetwork', 'model_kwargs': {'side_class': 'FCN5', 'side_weights_path': None, 'side_kwargs': {'img_channels': 3, 'eval_only': False, 'normalize_outputs': False}}}} |
def _crop(image, offset_height, offset_width, crop_height, crop_width):
original_shape = tf.shape(image)
rank_assertion = tf.Assert(tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.'])
cropped_shape = control_flow_ops.with_dependencies([rank_assertion], tf.stack([crop_height, crop_width, original_shape[2]]))
size_assertion = tf.Assert(tf.logical_and(tf.greater_equal(original_shape[0], crop_height), tf.greater_equal(original_shape[1], crop_width)), ['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
image = control_flow_ops.with_dependencies([size_assertion], tf.slice(image, offsets, cropped_shape))
return tf.reshape(image, cropped_shape) |
(reraise=True)
def main_worker(rank, ngpus_per_node, config, config_manager, port):
save_dir = str(config['Trainer']['save_dir'])
logger.add(os.path.join('runs', save_dir, 'loguru.log'), level='TRACE', diagnose=True)
seed = config.get('RandomSeed', 10)
config_arch = deepcopy(config['Arch'])
model_checkpoint = config_arch.pop('checkpoint', None)
with fix_all_seed_within_context(seed):
model = UNet(**config_arch)
logger.info(f'Initializing {model.__class__.__name__}')
if model_checkpoint:
logger.info(f'loading checkpoint from {model_checkpoint}')
model.load_state_dict(extract_model_state_dict(model_checkpoint), strict=True)
ratios = ratio_zoo[config['Data']['name']]
trainer_name = config['Trainer']['name']
semi_train(model=model, label_ratios=ratios, config=config, seed=seed, save_dir=save_dir, trainer_name=trainer_name) |
class FlaxAutoModelForSequenceClassification(_BaseAutoModelClass):
_model_mapping = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING |
class Trim(BaseWaveformTransform):
supports_multichannel = True
def __init__(self, top_db: float=30.0, p: float=0.5):
super().__init__(p)
self.top_db = top_db
def apply(self, samples: NDArray[np.float32], sample_rate: int):
(samples, lens) = librosa.effects.trim(samples, top_db=self.top_db)
return samples |
class BinaryFocalLoss(Loss):
def __init__(self, alpha=0.25, gamma=2.0):
super().__init__(name='binary_focal_loss')
self.alpha = alpha
self.gamma = gamma
def __call__(self, gt, pr):
return F.binary_focal_loss(gt, pr, alpha=self.alpha, gamma=self.gamma, **self.submodules) |
class MjrContextWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
def ptr(self):
return self._wrapped
def obj(self):
return self._wrapped.contents
def linewidth(self):
return self._wrapped.contents.linewidth
def linewidth(self, value):
self._wrapped.contents.linewidth = value
def znear(self):
return self._wrapped.contents.znear
def znear(self, value):
self._wrapped.contents.znear = value
def zfar(self):
return self._wrapped.contents.zfar
def zfar(self, value):
self._wrapped.contents.zfar = value
def shadowclip(self):
return self._wrapped.contents.shadowclip
def shadowclip(self, value):
self._wrapped.contents.shadowclip = value
def shadowscale(self):
return self._wrapped.contents.shadowscale
def shadowscale(self, value):
self._wrapped.contents.shadowscale = value
def shadowsize(self):
return self._wrapped.contents.shadowsize
def shadowsize(self, value):
self._wrapped.contents.shadowsize = value
def offwidth(self):
return self._wrapped.contents.offwidth
def offwidth(self, value):
self._wrapped.contents.offwidth = value
def offheight(self):
return self._wrapped.contents.offheight
def offheight(self, value):
self._wrapped.contents.offheight = value
def offFBO(self):
return self._wrapped.contents.offFBO
def offFBO(self, value):
self._wrapped.contents.offFBO = value
def offColor(self):
return self._wrapped.contents.offColor
def offColor(self, value):
self._wrapped.contents.offColor = value
def offDepthStencil(self):
return self._wrapped.contents.offDepthStencil
def offDepthStencil(self, value):
self._wrapped.contents.offDepthStencil = value
def shadowFBO(self):
return self._wrapped.contents.shadowFBO
def shadowFBO(self, value):
self._wrapped.contents.shadowFBO = value
def shadowTex(self):
return self._wrapped.contents.shadowTex
def shadowTex(self, value):
self._wrapped.contents.shadowTex = value
def ntexture(self):
return self._wrapped.contents.ntexture
def ntexture(self, value):
self._wrapped.contents.ntexture = value
def texture(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.texture, dtype=np.int, count=100), (100,))
arr.setflags(write=False)
return arr
def texture(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.texture, val_ptr, (100 * sizeof(c_int)))
def textureType(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.textureType, dtype=np.int, count=100), (100,))
arr.setflags(write=False)
return arr
def textureType(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.textureType, val_ptr, (100 * sizeof(c_int)))
def basePlane(self):
return self._wrapped.contents.basePlane
def basePlane(self, value):
self._wrapped.contents.basePlane = value
def baseMesh(self):
return self._wrapped.contents.baseMesh
def baseMesh(self, value):
self._wrapped.contents.baseMesh = value
def baseHField(self):
return self._wrapped.contents.baseHField
def baseHField(self, value):
self._wrapped.contents.baseHField = value
def baseBuiltin(self):
return self._wrapped.contents.baseBuiltin
def baseBuiltin(self, value):
self._wrapped.contents.baseBuiltin = value
def baseFontNormal(self):
return self._wrapped.contents.baseFontNormal
def baseFontNormal(self, value):
self._wrapped.contents.baseFontNormal = value
def baseFontBack(self):
return self._wrapped.contents.baseFontBack
def baseFontBack(self, value):
self._wrapped.contents.baseFontBack = value
def baseFontBig(self):
return self._wrapped.contents.baseFontBig
def baseFontBig(self, value):
self._wrapped.contents.baseFontBig = value
def rangePlane(self):
return self._wrapped.contents.rangePlane
def rangePlane(self, value):
self._wrapped.contents.rangePlane = value
def rangeMesh(self):
return self._wrapped.contents.rangeMesh
def rangeMesh(self, value):
self._wrapped.contents.rangeMesh = value
def rangeHField(self):
return self._wrapped.contents.rangeHField
def rangeHField(self, value):
self._wrapped.contents.rangeHField = value
def rangeBuiltin(self):
return self._wrapped.contents.rangeBuiltin
def rangeBuiltin(self, value):
self._wrapped.contents.rangeBuiltin = value
def rangeFont(self):
return self._wrapped.contents.rangeFont
def rangeFont(self, value):
self._wrapped.contents.rangeFont = value
def charWidth(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.charWidth, dtype=np.int, count=127), (127,))
arr.setflags(write=False)
return arr
def charWidth(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.charWidth, val_ptr, (127 * sizeof(c_int)))
def charWidthBig(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.charWidthBig, dtype=np.int, count=127), (127,))
arr.setflags(write=False)
return arr
def charWidthBig(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.charWidthBig, val_ptr, (127 * sizeof(c_int)))
def charHeight(self):
return self._wrapped.contents.charHeight
def charHeight(self, value):
self._wrapped.contents.charHeight = value
def charHeightBig(self):
return self._wrapped.contents.charHeightBig
def charHeightBig(self, value):
self._wrapped.contents.charHeightBig = value
def glewInitialized(self):
return self._wrapped.contents.glewInitialized
def glewInitialized(self, value):
self._wrapped.contents.glewInitialized = value |
class MAE(ZooKerasCreator, JavaValue):
def __init__(self, bigdl_type='float'):
super(MAE, self).__init__(None, bigdl_type) |
def get_task(task_name):
module = importlib.import_module(f'.{task_name}', package=__package__)
CustomTask = getattr(module, 'CustomTask')
return CustomTask |
class TrainerMemoryTracker():
stages = {'__init__': 'init', 'train': 'train', '_inner_training_loop': 'train', 'evaluate': 'eval', 'predict': 'test'}
def __init__(self, skip_memory_metrics=False):
self.skip_memory_metrics = skip_memory_metrics
if (not is_psutil_available()):
self.skip_memory_metrics = True
if self.skip_memory_metrics:
return
import psutil
if is_torch_cuda_available():
import torch
self.torch = torch
self.gpu = {}
else:
self.torch = None
self.process = psutil.Process()
self.cur_stage = None
self.cpu = {}
self.init_reported = False
def derive_stage(self):
caller = inspect.currentframe().f_back.f_back.f_code.co_name
if (caller in self.stages):
return self.stages[caller]
else:
raise ValueError(f'was called from {caller}, but only expect to be called from one of {self.stages.keys()}')
def cpu_mem_used(self):
return self.process.memory_info().rss
def peak_monitor_func(self):
self.cpu_mem_used_peak = (- 1)
while True:
self.cpu_mem_used_peak = max(self.cpu_mem_used(), self.cpu_mem_used_peak)
if (not self.peak_monitoring):
break
def start(self):
if self.skip_memory_metrics:
return
stage = self.derive_stage()
if ((self.cur_stage is not None) and (self.cur_stage != stage)):
return
self.cur_stage = stage
gc.collect()
if (self.torch is not None):
self.torch.cuda.reset_peak_memory_stats()
self.torch.cuda.empty_cache()
if (self.torch is not None):
self.gpu_mem_used_at_start = self.torch.cuda.memory_allocated()
self.cpu_mem_used_at_start = self.cpu_mem_used()
self.peak_monitoring = True
peak_monitor_thread = threading.Thread(target=self.peak_monitor_func)
peak_monitor_thread.daemon = True
peak_monitor_thread.start()
def stop(self, stage):
if ((self.cur_stage is not None) and (self.cur_stage != stage)):
return
self.peak_monitoring = False
gc.collect()
if (self.torch is not None):
self.torch.cuda.empty_cache()
if (self.torch is not None):
self.gpu_mem_used_now = self.torch.cuda.memory_allocated()
self.gpu_mem_used_peak = self.torch.cuda.max_memory_allocated()
self.gpu[self.cur_stage] = {'begin': self.gpu_mem_used_at_start, 'end': self.gpu_mem_used_now, 'alloc': (self.gpu_mem_used_now - self.gpu_mem_used_at_start), 'peaked': max(0, (self.gpu_mem_used_peak - self.gpu_mem_used_now))}
self.cpu_mem_used_now = self.cpu_mem_used()
self.cpu[self.cur_stage] = {'begin': self.cpu_mem_used_at_start, 'end': self.cpu_mem_used_now, 'alloc': (self.cpu_mem_used_now - self.cpu_mem_used_at_start), 'peaked': max(0, (self.cpu_mem_used_peak - self.cpu_mem_used_now))}
self.cur_stage = None
def update_metrics(self, stage, metrics):
if self.skip_memory_metrics:
return
if ((self.cur_stage is not None) and (self.cur_stage != stage)):
return
stages = [stage]
if (not self.init_reported):
stages.insert(0, 'init')
self.init_reported = True
for stage in stages:
for t in ['alloc', 'peaked']:
if ((stage in self.cpu) and (t in self.cpu[stage])):
metrics[f'{stage}_mem_cpu_{t}_delta'] = self.cpu[stage][t]
if ((self.torch is not None) and (stage in self.gpu) and (t in self.gpu[stage])):
metrics[f'{stage}_mem_gpu_{t}_delta'] = self.gpu[stage][t]
if (stages[0] == 'init'):
metrics['before_init_mem_cpu'] = self.cpu['init']['begin']
if (self.torch is not None):
metrics['before_init_mem_gpu'] = self.gpu['init']['begin']
def stop_and_update_metrics(self, metrics=None):
if self.skip_memory_metrics:
return
stage = self.derive_stage()
self.stop(stage)
if (metrics is not None):
self.update_metrics(stage, metrics) |
def unitwise_norm(x, norm_type=2.0):
if (x.ndim <= 1):
return x.norm(norm_type)
else:
return x.norm(norm_type, dim=tuple(range(1, x.ndim)), keepdim=True) |
class SHMArray(np.ndarray):
def __new__(cls, shape, dtype, shm_name=None, create=False):
shm = shared_memory.SharedMemory(create=create, name=shm_name, size=(np.prod(shape) * np.dtype(dtype).itemsize))
obj = super().__new__(cls, shape=shape, dtype=dtype, buffer=shm.buf)
obj.shm = shm
return obj
def __array_finalize__(self, obj):
if (obj is None):
return
self.shm = getattr(obj, 'shm', None) |
def check_random_state(seed):
if ((seed is None) or (seed is numpy.random)):
return numpy.random.mtrand._rand
if isinstance(seed, (numbers.Integral, numpy.integer)):
return numpy.random.RandomState(seed)
if isinstance(seed, numpy.random.RandomState):
return seed
raise ValueError(('%r cannot be used to seed a numpy.random.RandomState instance' % seed)) |
class FCN4Reshaped(FCN4):
def forward(self, x, cache={}, time_idx: int=(- 1)):
x = super().forward(x, time_idx)
x = F.avg_pool2d(x, x.size()[3]).view(x.shape[0], 64)
return x |
def get_imdb(name):
if (not (name in __sets)):
raise KeyError('Unknown dataset: {}'.format(name))
return __sets[name]() |
def get_conf(py_conf=None):
logger.info(f'Entering get_conf, original py_conf is {py_conf}')
logger.info(('current working director is %s' % os.getcwd()))
attribute_class = py_conf
if py_conf:
if isinstance(py_conf, str):
attribute_class = reflect_util.get_class(py_conf)
properties = dict()
for i in dir(attribute_class):
if (not i.startswith('__')):
properties[i] = getattr(attribute_class, i)
attribute_class = type('py_conf', (ConfigurationManagerInterface,), properties)
all_conf = ConfigurationManagerMeta.merge_configs()
return all_conf |
def test_filepath_error():
wide = Wide(np.unique(X_wide).shape[0], 1)
deeptabular = TabMlp(mlp_hidden_dims=[16, 4], column_idx=column_idx, cat_embed_input=embed_input, continuous_cols=colnames[(- 5):])
model = WideDeep(wide=wide, deeptabular=deeptabular)
with pytest.raises(ValueError):
trainer = Trainer(model=model, objective='binary', callbacks=[ModelCheckpoint(filepath='wrong_file_path')], verbose=0) |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif (last_checkpoint is not None):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
logger.setLevel((logging.INFO if is_main_process(training_args.local_rank) else logging.WARN))
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s', training_args)
set_seed(training_args.seed)
train_dataset = datasets.load_dataset('common_voice', data_args.dataset_config_name, split=data_args.train_split_name)
eval_dataset = datasets.load_dataset('common_voice', data_args.dataset_config_name, split='test')
chars_to_ignore_regex = f"[{''.join(data_args.chars_to_ignore)}]"
def remove_special_characters(batch):
batch['text'] = (re.sub(chars_to_ignore_regex, '', batch['sentence']).lower() + ' ')
return batch
train_dataset = train_dataset.map(remove_special_characters, remove_columns=['sentence'])
eval_dataset = eval_dataset.map(remove_special_characters, remove_columns=['sentence'])
def extract_all_chars(batch):
all_text = ' '.join(batch['text'])
vocab = list(set(all_text))
return {'vocab': [vocab], 'all_text': [all_text]}
vocab_train = train_dataset.map(extract_all_chars, batched=True, batch_size=(- 1), keep_in_memory=True, remove_columns=train_dataset.column_names)
vocab_test = train_dataset.map(extract_all_chars, batched=True, batch_size=(- 1), keep_in_memory=True, remove_columns=eval_dataset.column_names)
vocab_list = list((set(vocab_train['vocab'][0]) | set(vocab_test['vocab'][0])))
vocab_dict = {v: k for (k, v) in enumerate(vocab_list)}
vocab_dict['|'] = vocab_dict[' ']
del vocab_dict[' ']
vocab_dict['[UNK]'] = len(vocab_dict)
vocab_dict['[PAD]'] = len(vocab_dict)
with open('vocab.json', 'w') as vocab_file:
json.dump(vocab_dict, vocab_file)
tokenizer = Wav2Vec2CTCTokenizer('vocab.json', unk_token='[UNK]', pad_token='[PAD]', word_delimiter_token='|')
feature_extractor = Wav2Vec2FeatureExtractor(feature_size=1, sampling_rate=16000, padding_value=0.0, do_normalize=True, return_attention_mask=True)
processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
model = Wav2Vec2ForCTC.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir, activation_dropout=model_args.activation_dropout, attention_dropout=model_args.attention_dropout, hidden_dropout=model_args.hidden_dropout, feat_proj_dropout=model_args.feat_proj_dropout, mask_time_prob=model_args.mask_time_prob, gradient_checkpointing=training_args.gradient_checkpointing, layerdrop=model_args.layerdrop, ctc_loss_reduction='mean', pad_token_id=processor.tokenizer.pad_token_id, vocab_size=len(processor.tokenizer))
if (data_args.max_train_samples is not None):
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
if (data_args.max_val_samples is not None):
eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
resampler = torchaudio.transforms.Resample(48000, 16000)
def speech_file_to_array_fn(batch):
(speech_array, sampling_rate) = torchaudio.load(batch['path'])
batch['speech'] = resampler(speech_array).squeeze().numpy()
batch['sampling_rate'] = 16000
batch['target_text'] = batch['text']
return batch
train_dataset = train_dataset.map(speech_file_to_array_fn, remove_columns=train_dataset.column_names, num_proc=data_args.preprocessing_num_workers)
eval_dataset = eval_dataset.map(speech_file_to_array_fn, remove_columns=eval_dataset.column_names, num_proc=data_args.preprocessing_num_workers)
def prepare_dataset(batch):
assert (len(set(batch['sampling_rate'])) == 1), f'Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'
processed_batch = processor(audio=batch['speech'], text=batch['target_text'], sampling_rate=batch['sampling_rate'][0])
batch.update(processed_batch)
return batch
train_dataset = train_dataset.map(prepare_dataset, remove_columns=train_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=True, num_proc=data_args.preprocessing_num_workers)
eval_dataset = eval_dataset.map(prepare_dataset, remove_columns=eval_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=True, num_proc=data_args.preprocessing_num_workers)
wer_metric = datasets.load_metric('wer')
def compute_metrics(pred):
pred_logits = pred.predictions
pred_ids = np.argmax(pred_logits, axis=(- 1))
pred.label_ids[(pred.label_ids == (- 100))] = processor.tokenizer.pad_token_id
pred_str = processor.batch_decode(pred_ids)
label_str = processor.batch_decode(pred.label_ids, group_tokens=False)
wer = wer_metric.compute(predictions=pred_str, references=label_str)
return {'wer': wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True)
trainer = CTCTrainer(model=model, data_collator=data_collator, args=training_args, compute_metrics=compute_metrics, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), tokenizer=processor.feature_extractor)
if training_args.do_train:
if (last_checkpoint is not None):
checkpoint = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path):
checkpoint = model_args.model_name_or_path
else:
checkpoint = None
if is_main_process(training_args.local_rank):
processor.save_pretrained(training_args.output_dir)
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model()
metrics = train_result.metrics
max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(train_dataset))
metrics['train_samples'] = min(max_train_samples, len(train_dataset))
trainer.log_metrics('train', metrics)
trainer.save_metrics('train', metrics)
trainer.save_state()
results = {}
if training_args.do_eval:
logger.info('*** Evaluate ***')
metrics = trainer.evaluate()
max_val_samples = (data_args.max_val_samples if (data_args.max_val_samples is not None) else len(eval_dataset))
metrics['eval_samples'] = min(max_val_samples, len(eval_dataset))
trainer.log_metrics('eval', metrics)
trainer.save_metrics('eval', metrics)
return results |
def _resnet(arch: str, block: Type[Union[(BasicBlock, Bottleneck)]], layers: List[int], pretrained: bool, progress: bool, **kwargs: Any) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict, strict=False)
return model |
class MetaActionAngle(type):
def __new__(meta, name, bases, attrs):
for key in copy.copy(attrs):
if (key[0] == '_'):
skey = copy.copy(key[1:])
if (skey == 'evaluate'):
skey = '__call__'
for base in bases:
original = getattr(base, skey, None)
if (original is not None):
funccopy = copyfunc(original)
funccopy.__doc__ = attrs[key].__doc__
attrs[skey] = funccopy
break
return type.__new__(meta, name, bases, attrs) |
class Evaluation():
def __init__(self, config, logger, experiment_id):
self.config = config
self.logger = logger
self.device = torch.device(self.config.training.device)
self.experiment_id = experiment_id
self.path_to_model = os.path.join(self.config.env.experiments_dir, self.experiment_id, 'best_model.pth.tar')
self.load_dataset()
self.build_model()
def load_dataset(self):
self.logger.write('Loading dataset')
dataset_name = self.config.dataset_config.dataset_name
if (dataset_name == 'audiocaption'):
test_dataset = AudioCaptionDataset(self.config.dataset_config, dataset_type='test')
else:
raise ValueError('{} dataset is not supported.'.format(dataset_name))
token_freq_dict = json.load(open(self.logger.vocab_path, 'r'))
self.vocab = Vocabulary(tokens=None, token_freq=token_freq_dict)
OmegaConf.update(self.config, 'model_config.vocab_size', self.vocab.size)
self.test_loader = DataLoader(dataset=test_dataset, batch_size=1, collate_fn=custom_collate_fn)
def build_model(self):
self.logger.write('Building model')
model_name = self.config.model_config.model_name
if (model_name == 'cnn_lstm_caption'):
self.model = CNNLSTMCaption(self.config.model_config, self.vocab, self.device, teacher_forcing=False)
elif (model_name == 'cnn_attention_lstm'):
self.model = AttentionModel(self.config.model_config, self.vocab, self.device, teacher_forcing=False)
else:
raise ValueError('{} model is not supported.'.format(model_name))
self.checkpoint = torch.load(self.path_to_model)
self.model.load_state_dict(self.checkpoint['state_dict'])
self.model.to(self.device)
self.model.eval()
def obtain_predictions(self):
if os.path.exists(self.predictions_path):
(predictions, true_captions, audio_paths) = json.load(open(self.predictions_path)).values()
else:
print('No captions found.')
return (predictions, true_captions, audio_paths) |
class FilterResponseNormNd(nn.Module):
def __init__(self, ndim, num_features, eps=1e-06, learnable_eps=False):
assert (ndim in [3, 4, 5]), 'FilterResponseNorm only supports 3d, 4d or 5d inputs.'
super(FilterResponseNormNd, self).__init__()
shape = ((1, num_features) + ((1,) * (ndim - 2)))
self.eps = nn.Parameter((torch.ones(*shape) * eps))
if (not learnable_eps):
self.eps.requires_grad_(False)
self.gamma = nn.Parameter(torch.Tensor(*shape))
self.beta = nn.Parameter(torch.Tensor(*shape))
self.tau = nn.Parameter(torch.Tensor(*shape))
self.reset_parameters()
def forward(self, x):
avg_dims = tuple(range(2, x.dim()))
nu2 = torch.pow(x, 2).mean(dim=avg_dims, keepdim=True)
x = (x * torch.rsqrt((nu2 + torch.abs(self.eps))))
return torch.max(((self.gamma * x) + self.beta), self.tau)
def reset_parameters(self):
nn.init.ones_(self.gamma)
nn.init.zeros_(self.beta)
nn.init.zeros_(self.tau) |
class TVMType(ctypes.Structure):
_fields_ = [('type_code', ctypes.c_uint8), ('bits', ctypes.c_uint8), ('lanes', ctypes.c_uint16)]
CODE2STR = {0: 'int', 1: 'uint', 2: 'float', 4: 'handle'}
def __init__(self, type_str):
super(TVMType, self).__init__()
if isinstance(type_str, np.dtype):
type_str = str(type_str)
if (type_str == 'bool'):
self.bits = 1
self.type_code = 1
self.lanes = 1
return
arr = type_str.split('x')
head = arr[0]
self.lanes = (int(arr[1]) if (len(arr) > 1) else 1)
bits = 32
if head.startswith('int'):
self.type_code = 0
head = head[3:]
elif head.startswith('uint'):
self.type_code = 1
head = head[4:]
elif head.startswith('float'):
self.type_code = 2
head = head[5:]
elif head.startswith('handle'):
self.type_code = 4
bits = 64
head = ''
elif head.startswith('custom'):
(low, high) = (head.find('['), head.find(']'))
if ((not low) or (not high) or (low >= high)):
raise ValueError(('Badly formatted custom type string %s' % type_str))
type_name = head[(low + 1):high]
self.type_code = _api_internal._datatype_get_type_code(type_name)
head = head[(high + 1):]
else:
raise ValueError(('Do not know how to handle type %s' % type_str))
bits = (int(head) if head else bits)
self.bits = bits
def __repr__(self):
if ((self.bits == 1) and (self.lanes == 1)):
return 'bool'
if (self.type_code in TVMType.CODE2STR):
type_name = TVMType.CODE2STR[self.type_code]
else:
type_name = ('custom[%s]' % _api_internal._datatype_get_type_name(self.type_code))
x = ('%s%d' % (type_name, self.bits))
if (self.lanes != 1):
x += ('x%d' % self.lanes)
return x
def __eq__(self, other):
return ((self.bits == other.bits) and (self.type_code == other.type_code) and (self.lanes == other.lanes))
def __ne__(self, other):
return (not self.__eq__(other)) |
class priorityDictionary(dict):
def __init__(self):
self.__heap = []
dict.__init__(self)
def smallest(self):
if (len(self) == 0):
raise IndexError('smallest of empty priorityDictionary')
heap = self.__heap
while ((heap[0][1] not in self) or (self[heap[0][1]] != heap[0][0])):
lastItem = heap.pop()
insertionPoint = 0
while 1:
smallChild = ((2 * insertionPoint) + 1)
if (((smallChild + 1) < len(heap)) and (heap[smallChild] > heap[(smallChild + 1)])):
smallChild += 1
if ((smallChild >= len(heap)) or (lastItem <= heap[smallChild])):
heap[insertionPoint] = lastItem
break
heap[insertionPoint] = heap[smallChild]
insertionPoint = smallChild
return heap[0][1]
def __iter__(self):
def iterfn():
while (len(self) > 0):
x = self.smallest()
(yield x)
del self[x]
return iterfn()
def __setitem__(self, key, val):
dict.__setitem__(self, key, val)
heap = self.__heap
if (len(heap) > (2 * len(self))):
self.__heap = [(v, k) for (k, v) in self.items()]
self.__heap.sort()
else:
newPair = (val, key)
insertionPoint = len(heap)
heap.append(None)
while ((insertionPoint > 0) and (newPair < heap[((insertionPoint - 1) // 2)])):
heap[insertionPoint] = heap[((insertionPoint - 1) // 2)]
insertionPoint = ((insertionPoint - 1) // 2)
heap[insertionPoint] = newPair
def setdefault(self, key, val):
if (key not in self):
self[key] = val
return self[key] |
class AtariHeadDataloader():
def __init__(self, directory, batch_size=32, stack=3, controls=18, size=(84, 84), percentile=None, top_n=None, augment=False, preload=False, merge=False, dqn=False, action_delay=0, print_stats=False):
self.batch_size = batch_size
self.stack = stack
self.controls = controls
self.size = size
self.merge = merge
self.dqn = dqn
self.action_delay = action_delay
self.directory = directory
self.all_trajs = self._get_trajectory_list()
self.n_traj = len(self.all_trajs)
self.traj_len = []
for traj in range(len(self.all_trajs)):
self.traj_len.append(self._get_samples_in_trajectory(traj))
self.total_len = sum(self.traj_len)
def _get_trajectory_list(self):
names = os.listdir(self.directory)
names = list(filter((lambda x: x.endswith('.txt')), names))
names = list(map((lambda x: x[:(- 4)]), names))
names = sorted(names)
return names
def _get_samples_in_trajectory(self, traj):
lines = self._get_data_lines(traj)
return (len(lines) - 1)
def _get_index_traj_and_sample(self, index):
total = 0
for (i, t_len) in enumerate(self.traj_len):
if (index < (total + t_len)):
return (i, (index - total))
total += t_len
def _get_frame_id(self, traj, index):
lines = self._get_data_lines(traj)
return lines[(index + 1)].split(',')[0]
def _get_image_stacked(self, traj, id):
stack = []
shape = None
for i in range(self.stack):
ix = (id - i)
if (ix >= 0):
stack.insert(0, self._get_image(traj, ix))
if (shape is None):
shape = stack[0].shape
else:
stack.insert(0, np.zeros(shape, dtype=np.uint8))
if self.merge:
stack = map(Image.fromarray, stack)
img = reduce(ImageChops.lighter, stack)
return np.asarray(img, dtype=np.uint8)
else:
return np.concatenate(stack, axis=2)
def _get_image(self, traj, index):
traj_name = self.all_trajs[traj]
frame_id = self._get_frame_id(traj, index)
filename = '{}.png'.format(frame_id)
path = os.path.join(self.directory, traj_name, filename)
img = Image.open(path)
img.load()
img = img.resize(self.size, Image.BILINEAR)
img = np.asarray(img, dtype=np.uint8)
return img
_cache(maxsize=128)
def _get_data_lines(self, traj):
traj_name = '{}.txt'.format(self.all_trajs[traj])
with open(os.path.join(self.directory, traj_name)) as f:
return f.read().splitlines()
def _get_data(self, traj, id):
lines = self._get_data_lines(traj)
num_frames = (len(lines) - 1)
data = lines[(id + 1)].split(',')[:6]
data = [s.strip() for s in data]
try:
data[2] = int(data[2])
except ValueError:
data[2] = (- 1)
try:
data[4] = int(data[4])
except ValueError:
data[4] = 0
try:
data[5] = int(data[5])
except ValueError:
data[5] = 0
if (id >= (num_frames - 1)):
last = 1
else:
last = 0
data.append(last)
return [id, data[4], data[2], False, data[5], last]
def __len__(self):
return int((self.total_len / self.batch_size))
def get_batch(self, samples):
batch_x = []
batch_y = []
if self.dqn:
batch_x_next = []
batch_reward = []
batch_done = []
for sample in samples:
(traj, ix) = self._get_index_traj_and_sample(sample)
data = self._get_data(traj, ix)
if (self.action_delay != 0):
action_ix = (ix + self.action_delay)
if (action_ix >= self.traj_len[traj]):
action_ix = (self.traj_len[traj] - 1)
if (action_ix < 0):
action_ix = 0
delayed_data = self._get_data(traj, action_ix)
data[4] = delayed_data[4]
if (self.dqn and (data[5] == 1) and (data[3] == 0)):
continue
batch_x.append(self._get_image_stacked(traj, ix))
batch_y.append(np.eye(self.controls)[data[4]])
if self.dqn:
batch_reward.append(data[1])
batch_done.append(data[5])
if (not data[5]):
batch_x_next.append(self._get_image_stacked(traj, (ix + 1)))
else:
batch_x_next.append(np.zeros(batch_x[0].shape, dtype=np.uint8))
if (self.dqn == False):
return (np.array(batch_x, dtype=np.uint8), np.array(batch_y, dtype=np.uint8))
else:
return (np.array(batch_x, dtype=np.uint8), np.array(batch_y, dtype=np.uint8), np.array(batch_x_next, dtype=np.uint8), np.array(batch_reward), np.array(batch_done, dtype=np.uint8)) |
def get_teacher_name(model_path):
segments = model_path.split('/')[(- 2)].split('_')
if (segments[0] != 'wrn'):
return segments[0]
else:
return ((((segments[0] + '_') + segments[1]) + '_') + segments[2]) |
class BertSplade(BertForMaskedLM):
def forward(self, input_ids, attention_mask, token_type_ids=None, position_ids=None, return_dict=False):
outputs = super().forward(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, return_dict=True)
(vocab_emb, _) = torch.max((torch.log((1 + torch.relu(outputs.logits))) * attention_mask.unsqueeze((- 1))), dim=1)
if return_dict:
outputs.logits = vocab_emb
return outputs
else:
return vocab_emb |
def total_params():
total_parameters = 0
for variable in tf.trainable_variables():
shape = variable.get_shape()
variable_parametes = 1
for dim in shape:
variable_parametes *= dim.value
total_parameters += variable_parametes
print('Total number of trainable parameters: {}'.format(total_parameters)) |
class img_dataset(Dataset):
def __init__(self, get_visual_path: Callable[([str], dict)], get_text_annotation: Callable[([str], dict)], get_all_model_ids: Callable[([], dict)]):
super().__init__()
self.get_visual_path = get_visual_path
self.get_text_annotation = get_text_annotation
self.get_all_model_ids = get_all_model_ids
id_info = self.get_all_model_ids()
self.model_ids = id_info['ids']
self.data_format = id_info['format']
def __len__(self):
return len(self.model_ids)
def __getitem__(self, idx):
model_id = self.model_ids[idx]
img = self.get_visual_path(model_id)
img_format = img['format']
img_file = img['path']
text_annot = self.get_text_annotation(model_id)['text']
if img_format.endswith('-url'):
assert (file_location_type(img_file) == 'url')
try:
img = read_img(img_file)
except:
logger.info(f'error loading {img_file}')
img = None
return {'id': model_id, 'img': img, 'text': text_annot, 'format': self.data_format} |
class conv2D(Layer):
def __init__(self, size, outchn, x=None, name=None, stride=1, pad='SAME', usebias=True, values=None, kernel_data=None, bias_data=None, dilation_rate=1, weight_norm=False):
self.x = x
self.size = size
self.outchn = outchn
self.name = name
self.stride = stride
self.pad = pad
self.usebias = usebias
if (values is None):
self.kernel_data = None
self.bias_data = None
else:
self.kernel_data = values[0]
self.bias_data = values[1]
self.dilation_rate = dilation_rate
self.weight_norm = weight_norm
super().__init__(name)
def _parse_args(self):
inchannel = self.x.get_shape().as_list()[(- 1)]
if isinstance(self.size, list):
self.size = [self.size[0], self.size[1], inchannel, self.outchn]
else:
self.size = [self.size, self.size, inchannel, self.outchn]
if isinstance(self.stride, list):
self.stride = [1, self.stride[0], self.stride[1], 1]
else:
self.stride = [1, self.stride, self.stride, 1]
if isinstance(self.dilation_rate, list):
self.dilation_rate = [1, self.dilation_rate[0], self.dilation_rate[1], 1]
else:
self.dilation_rate = [1, self.dilation_rate, self.dilation_rate, 1]
def _initialize(self):
if (self.kernel_data is not None):
self.W = weight_conv(self.kernel_data.shape, self.kernel_data)
else:
self.W = weight_conv(self.size)
if self.weight_norm:
print('Enable weight norm')
self.W = self.W.initialized_value()
self.W = tf.nn.l2_normalize(self.W, [0, 1, 2])
print('Initialize weight norm')
x_init = tf.nn.conv2d(self.x, self.W, stride, pad, dilations=dilation_rate)
(m_init, v_init) = tf.nn.moments(x_init, [0, 1, 2])
s_init = (1.0 / tf.sqrt((v_init + 1e-08)))
s = tf.get_variable('weight_scale', dtype=tf.float32, initializer=s_init)
self.S = s.initialized_value()
self.S = tf.reshape(self.S, [1, 1, 1, outchn])
self.W = (self.S * self.W)
self._add_variable(self.S)
self._add_variable(self.W)
if self.usebias:
if (self.bias_data is not None):
self.b = bias([self.outchn], value=self.bias_data)
else:
self.b = bias([self.outchn])
self._add_variable(self.b)
def _deploy(self):
out = tf.nn.conv2d(self.x, self.W, self.stride, self.pad, dilations=self.dilation_rate)
if self.usebias:
out = tf.nn.bias_add(out, self.b)
return out |
_searchspace('discrete')
class DiscreteSearchSpace(BaseSearchSpace):
def __init__(self, bound=None, interval=None, value=None, type=None):
if (bound and (interval is None)):
if (isinstance(bound[0], int) and isinstance(bound[1], int)):
interval = 1
else:
interval = 0.01
super().__init__(bound=bound, interval=interval, value=value, type='discrete')
def get_random_value(self):
idx = random.randint(0, (self.total_num - 1))
return self.get_nth_value(idx)
def get_nth_value(self, idx):
if self.bound:
return round((self.bound[0] + (idx * self.interval)), 10)
else:
return self.value[idx]
def get_all(self):
return [self.get_nth_value(i) for i in range(self.total_num)]
def get_value(self, idx=None):
if (idx is not None):
if (not isinstance(idx, int)):
raise TypeError('The type of idx should be int, not {}'.format(type(idx)))
if (idx < 0):
return self.get_all()
value = self.get_nth_value(idx)
else:
value = self.get_random_value()
return value
def index(self, value):
if self.value:
return self.value.index(value)
else:
return int(((value - self.bound[0]) / self.interval)) |
class NONLocalBlock2D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock2D, self).__init__(in_channels, inter_channels=inter_channels, dimension=2, sub_sample=sub_sample, bn_layer=bn_layer) |
class OnnxSeq2SeqConfigWithPast(OnnxConfigWithPast):
def outputs(self) -> Mapping[(str, Mapping[(int, str)])]:
common_outputs = super(OnnxConfigWithPast, self).outputs
for (name, axes_names) in common_outputs.items():
sequence_name = ('encoder_sequence' if ('encoder' in name) else 'decoder_sequence')
for (axis_idx, name) in axes_names.items():
if ('sequence' in name):
axes_names[axis_idx] = sequence_name
else:
axes_names[axis_idx] = name
if self.use_past:
self.fill_with_past_key_values_(common_outputs, direction='outputs')
return common_outputs
def num_layers(self) -> Tuple[int]:
try:
num_layers = super().num_layers
num_layers = (num_layers, num_layers)
except AttributeError:
if (hasattr(self._config, 'encoder_layers') and hasattr(self._config, 'decoder_layers')):
num_layers = (self._config.encoder_layers, self._config.decoder_layers)
else:
raise AttributeError('could not find the number of encoder and decoder layers attributes in the model configuration, override the num_layers property of the model OnnxConfig to solve this')
return num_layers
def num_attention_heads(self) -> Tuple[int]:
try:
num_attention_heads = super().num_attention_heads
num_attention_heads = (num_attention_heads, num_attention_heads)
except AttributeError:
if (hasattr(self._config, 'encoder_attention_heads') and hasattr(self._config, 'decoder_attention_heads')):
num_attention_heads = (self._config.encoder_attention_heads, self._config.decoder_attention_heads)
else:
raise AttributeError('could not find the number of attention heads for the encoder and the decoder attributes in the model configuration, override the num_attention_heads property of the model OnnxConfig to solve this')
return num_attention_heads
def generate_dummy_inputs(self, tokenizer: 'PreTrainedTokenizerBase', batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[(str, Any)]:
encoder_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework)
decoder_seq_length = (seq_length if (not self.use_past) else 1)
decoder_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(tokenizer, batch_size=batch_size, seq_length=decoder_seq_length, is_pair=is_pair, framework=framework)
decoder_inputs = {f'decoder_{name}': tensor for (name, tensor) in decoder_inputs.items()}
common_inputs = dict(**encoder_inputs, **decoder_inputs)
if self.use_past:
if (not is_torch_available()):
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
batch = common_inputs['input_ids'].shape[0]
encoder_seq_length = common_inputs['input_ids'].shape[1]
decoder_seq_length = common_inputs['decoder_input_ids'].shape[1]
(num_encoder_attention_heads, num_decoder_attention_heads) = self.num_attention_heads
encoder_shape = (batch, num_encoder_attention_heads, encoder_seq_length, (self._config.hidden_size // num_encoder_attention_heads))
decoder_shape = (batch, num_decoder_attention_heads, (decoder_seq_length + 3), (self._config.hidden_size // num_decoder_attention_heads))
common_inputs['past_key_values'] = []
(num_encoder_layers, num_decoder_layers) = self.num_layers
min_num_layers = min(num_encoder_layers, num_decoder_layers)
max_num_layers = (max(num_encoder_layers, num_decoder_layers) - min_num_layers)
remaining_side_name = ('encoder' if (num_encoder_layers > num_decoder_layers) else 'decoder')
for _ in range(min_num_layers):
common_inputs['past_key_values'].append((torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape)))
shape = (encoder_shape if (remaining_side_name == 'encoder') else decoder_shape)
for _ in range(min_num_layers, max_num_layers):
common_inputs['past_key_values'].append((torch.zeros(shape), torch.zeros(shape)))
return common_inputs
def fill_with_past_key_values_(self, inputs_or_outputs: Mapping[(str, Mapping[(int, str)])], direction: str):
if (direction not in ['inputs', 'outputs']):
raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given')
name = ('past_key_values' if (direction == 'inputs') else 'present')
(num_encoder_layers, num_decoder_layers) = self.num_layers
min_num_layers = min(num_encoder_layers, num_decoder_layers)
max_num_layers = (max(num_encoder_layers, num_decoder_layers) - min_num_layers)
remaining_side_name = ('encoder' if (num_encoder_layers > num_decoder_layers) else 'decoder')
encoder_sequence = 'past_encoder_sequence'
decoder_sequence = ('past_decoder_sequence' if (direction == 'inputs') else 'past_decoder_sequence + sequence')
for i in range(min_num_layers):
inputs_or_outputs[f'{name}.{i}.decoder.key'] = {0: 'batch', 2: decoder_sequence}
inputs_or_outputs[f'{name}.{i}.decoder.value'] = {0: 'batch', 2: decoder_sequence}
inputs_or_outputs[f'{name}.{i}.encoder.key'] = {0: 'batch', 2: encoder_sequence}
inputs_or_outputs[f'{name}.{i}.encoder.value'] = {0: 'batch', 2: encoder_sequence}
for i in range(min_num_layers, max_num_layers):
if (remaining_side_name == 'encoder'):
axes_info = {0: 'batch', 2: encoder_sequence}
else:
axes_info = {0: 'batch', 2: decoder_sequence}
inputs_or_outputs[f'{name}.{i}.{remaining_side_name}.key'] = axes_info
def _flatten_past_key_values_(self, flattened_output, name, idx, t):
flattened_output[f'{name}.{idx}.decoder.key'] = t[0]
flattened_output[f'{name}.{idx}.decoder.value'] = t[1]
flattened_output[f'{name}.{idx}.encoder.key'] = t[2]
flattened_output[f'{name}.{idx}.encoder.value'] = t[3] |
def main():
args = parse_args()
if (not os.path.exists(args.result_root)):
os.mkdir(args.result_root)
cfg = Config.fromfile(args.config)
print('load config.')
dataset = build_dataset(cfg.data.test)
print(f'Dataset: {len(dataset)}')
print('cfg.data.test', cfg.data.test)
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, backend='nccl')
data_loader = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=1, dist=distributed, shuffle=False)
original = []
for (i, data) in enumerate(tqdm(data_loader)):
original.append(data['img_metas'][0].data[0][0]['filename'])
if (i >= args.limit):
break
infer_model = init_detector(args.config, args.pre_train, device='cuda')
print('save attack bbox results')
for (i, at_img) in enumerate(tqdm(original)):
result = inference_detector(infer_model, at_img)
infer_model.show_result(at_img, result, score_thr=0.5, out_file=f'{args.result_root}/bbox_attack_{i}.png') |
def map_roberta(mapping, vocab):
inverse_vocab = {str(v): k for (k, v) in vocab.items()}
EXTRA_TOKENS = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
offset = len(EXTRA_TOKENS)
output_vocab = EXTRA_TOKENS
for (word_id, position) in mapping.items():
if (word_id in inverse_vocab):
output_vocab[inverse_vocab[word_id]] = (position + offset)
else:
print('not found: {}'.format(word_id))
output_vocab[word_id] = (position + offset)
output_vocab['<mask>'] = len(output_vocab)
for word in [inverse_vocab[x] for x in ((set([str(vocab[k]) for k in vocab]) - set(mapping)) - set(EXTRA_TOKENS.keys()))]:
output_vocab[word] = len(output_vocab)
return output_vocab |
def read_into_df(fileName, delimiter=';', header='infer'):
return pd.read_csv(fileName, delimiter=delimiter, header=header) |
def rand_brightness(x):
x = (x + (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) - 0.5))
return x |
def run_xacro_in_file(filename):
assert (filename != '')
assert subprocess.check_output(['xacro', '--inorder', 'tests/{}'.format(filename)], cwd=path) |
def test_decoder():
config = anyconfig.load('/home/luning/dev/projects/master-tf/configs/master.yaml')
config = easydict.EasyDict(config)
image = tf.random.normal([10, 48, 160, 3])
model = MasterModel(config.model, 10, (48, 160))
ys = model.decode(image, padding=tf.constant(True))
decoded_tensor = data_utils.LabelTransformer.decode_tensor(ys)
print(decoded_tensor) |
class MLMAccuracyWVC(EvalMetric):
def __init__(self, allreduce=False, num_replicas=1):
super(MLMAccuracyWVC, self).__init__('MLMAccWVC', allreduce, num_replicas)
def update(self, outputs):
with torch.no_grad():
logits = outputs['mlm_logits_wvc']
label = outputs['mlm_label_wvc']
keep = (label != (- 1))
if (keep.sum() > 0):
self.sum_metric += float((logits[keep].argmax(dim=1) == label[keep]).sum().item())
self.num_inst += keep.sum().item() |
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x) |
def run(method, x_unvec, y, idx_feat_dict, num_feature, max_num_feature, num_class, max_num_sample, feature_selection, k_idx, k, num_search, perm_indices):
print(('-' * 72))
print('Partition k = {}'.format(k_idx))
(x_train_unvec, y_train, x_val_unvec, y_val, _, _) = emr.get_k_fold_partition(x_unvec, y, k_idx=k_idx, k=k, perm_indices=perm_indices)
if (max_num_feature > 0):
(feat_encoding_dict, _) = select_features(x_train_unvec, y_train, idx_feat_dict, method=feature_selection, num_feature=num_feature, max_num_feature=max_num_feature)
x_val_unvec = subset_reencode_features(x_val_unvec, feat_encoding_dict)
num_feature = max_num_feature
if ((max_num_sample != None) and (len(x_val_unvec) > max_num_sample)):
x_val_unvec = x_val_unvec[0:max_num_sample]
y_val = y_val[0:max_num_sample]
start = time.time()
if (method == 'riddle'):
model_class = MLP
init_args = {'num_feature': num_feature, 'num_class': num_class}
param_dist = {'num_hidden_layer': 2, 'num_hidden_node': 512, 'activation': ['prelu', 'relu'], 'dropout': tuning.Uniform(lo=0.2, hi=0.8), 'learning_rate': tuning.UniformLogSpace(10, lo=(- 6), hi=(- 1))}
best_param = tuning.random_search(model_class, init_args, param_dist, x_val_unvec, y_val, num_class=num_class, k=TUNING_K, num_search=num_search)
else:
x_val = vectorize_features(x_val_unvec, num_feature)
if (method == 'logit'):
from sklearn.linear_model import LogisticRegression
estimator = LogisticRegression(multi_class='multinomial', solver='lbfgs')
param_dist = {'C': tuning.UniformLogSpace(base=10, lo=(- 3), hi=3)}
elif (method == 'random_forest'):
from sklearn.ensemble import RandomForestClassifier
estimator = RandomForestClassifier()
param_dist = {'max_features': ['sqrt', 'log2', None], 'max_depth': tuning.UniformIntegerLogSpace(base=2, lo=0, hi=7), 'n_estimators': tuning.UniformIntegerLogSpace(base=2, lo=4, hi=8)}
elif (method == 'linear_svm'):
from sklearn.svm import SVC
estimator = SVC(kernel='poly', degree=1, coef0=0.0, gamma=1.0, probability=True, cache_size=1000)
param_dist = {'C': tuning.UniformLogSpace(base=10, lo=(- 2), hi=1)}
elif (method == 'poly_svm'):
from sklearn.svm import SVC
estimator = SVC(kernel='poly', probability=True, cache_size=1000)
param_dist = {'C': tuning.UniformLogSpace(base=10, lo=(- 2), hi=1), 'degree': [2, 3, 4], 'gamma': tuning.UniformLogSpace(base=10, lo=(- 5), hi=1)}
elif (method == 'rbf_svm'):
from sklearn.svm import SVC
estimator = SVC(kernel='rbf', probability=True, cache_size=1000)
param_dist = {'C': tuning.UniformLogSpace(base=10, lo=(- 2), hi=1), 'gamma': tuning.UniformLogSpace(base=10, lo=(- 5), hi=1)}
elif (method == 'gbdt'):
from xgboost import XGBClassifier
estimator = XGBClassifier(objective='multi:softprob')
param_dist = {'max_depth': tuning.UniformIntegerLogSpace(base=2, lo=0, hi=5), 'n_estimators': tuning.UniformIntegerLogSpace(base=2, lo=4, hi=8), 'learning_rate': tuning.UniformLogSpace(base=10, lo=(- 3), hi=0)}
else:
raise ValueError('unknown method: {}'.format(method))
param_search = RandomizedSearchCV(estimator, param_dist, refit=False, n_iter=num_search, scoring=loss_scorer)
param_search.fit(x_val, y_val)
best_param = param_search.best_params_
print('Best parameters for {} for k_idx={}: {} found in {:.3f} s'.format(method, k_idx, best_param, (time.time() - start)))
return best_param |
class LatentLayersKLLoss(_Loss):
def __init__(self, args):
super().__init__()
self.args = args
def forward(self, layer_samples, lang_idx, update_num, sample_size):
prior = self.args.prior
samples = layer_samples[lang_idx]
eps = 1e-07
if (prior == 'uniform'):
kl_loss = (samples * (torch.log((samples + eps)) - math.log(0.5))).sum((- 1))
elif (prior == 'agged_posterior'):
y_t = torch.stack([x.detach() for x in layer_samples], dim=0)
agged_q = torch.sum(y_t, dim=0)
row_norm = agged_q.sum((- 1))
normed_agg_q = (agged_q / row_norm)
kl_loss = (samples * (torch.log((samples + eps)) - torch.log((normed_agg_q + eps)))).sum((- 1))
else:
raise NotImplementedError('The specified prior is not implemented.')
kl_loss /= layer_samples[0].size()[0]
kl_weight = min(self.args.sparsity_weight, (((update_num - self.args.soft_update) * self.args.sparsity_weight) / self.args.anneal_updates))
kl_loss *= (kl_weight * sample_size)
return kl_loss |
def generateLine2(data):
global linenumber, tframe
if ((linenumber % 2) == 1):
bgcolor = '#e5e5e5'
else:
bgcolor = '#ffffff'
frame = Frame(tframe, bg=bgcolor)
assert (len(data) == 5)
Label(frame, text=data[0], font=(None, 10), bg=bgcolor, width=15, anchor=CENTER).grid(row=0, column=0)
Button(frame, text=data[1], font=(None, 10), bg=bgcolor, bd=0, width=30, command=(lambda : callback(data[1]))).grid(row=0, column=1)
Button(frame, text=data[2], font=(None, 10), bg=bgcolor, bd=0, width=30, command=(lambda : callback(data[2]))).grid(row=0, column=2)
Label(frame, text=data[3], font=(None, 10), bg=bgcolor, width=30, anchor=CENTER).grid(row=0, column=3)
if (data[4] == 1):
Label(frame, font=(None, 10), image=rightimg, bg=bgcolor, width=30, anchor=CENTER).grid(row=0, column=4)
else:
Label(frame, font=(None, 10), image=wrongimg, bg=bgcolor, width=30, anchor=CENTER).grid(row=0, column=4)
if (data[1] in d100k):
Label(frame, font=(None, 10), image=rightimg, bg=bgcolor, width=30, anchor=CENTER).grid(row=0, column=6)
else:
Label(frame, font=(None, 10), image=wrongimg, bg=bgcolor, width=30, anchor=CENTER).grid(row=0, column=6)
frame.grid(row=linenumber, column=0, padx=2, pady=2)
linenumber += 1 |
def get_kenlm_processor(model_path, path_lm=None):
path_tokenizer = model_path
if Path(model_path).is_dir():
processor = AutoProcessor.from_pretrained(path_tokenizer)
model = AutoModelForCTC.from_pretrained(model_path)
else:
print(f'Error. Models were not found in {model_path}')
if (path_lm is None):
return (processor, None, None, model)
vocab = processor.tokenizer.convert_ids_to_tokens(range(0, processor.tokenizer.vocab_size))
vocab.append('<s>')
vocab.append('</s>')
tokenizer = Wav2Vec2CTCTokenizer((path_tokenizer + '/vocab.json'), unk_token='[UNK]', pad_token='[PAD]', word_delimiter_token='|')
ctcdecoder_kenlm = build_ctcdecoder(labels=vocab, kenlm_model_path=path_lm)
processor_ctc_kenlm = Wav2Vec2ProcessorWithLM(feature_extractor=processor.feature_extractor, tokenizer=tokenizer, decoder=ctcdecoder_kenlm)
return (processor, processor_ctc_kenlm, model) |
def make_summary(tag, value):
return tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)]) |
class Data():
def __init__(self, data_dir='data/FB15k-237', reverse=False):
self.train_data = self.load_data(data_dir, 'train', reverse=reverse)
self.valid_data = self.load_data(data_dir, 'valid', reverse=reverse)
self.test_data = self.load_data(data_dir, 'test', reverse=reverse)
self.data = ((self.train_data + self.valid_data) + self.test_data)
self.entities = self.get_entities(self.data)
self.train_relations = self.get_relations(self.train_data)
self.valid_relations = self.get_relations(self.valid_data)
self.test_relations = self.get_relations(self.test_data)
self.relations = ((self.train_relations + [i for i in self.valid_relations if (i not in self.train_relations)]) + [i for i in self.test_relations if (i not in self.train_relations)])
def load_data(self, data_dir, data_type='train', reverse=False):
with open(('%s%s.txt' % (data_dir, data_type)), 'r') as f:
data = f.read().strip().split('\n')
data = [i.split() for i in data]
if reverse:
data += [[i[2], (i[1] + '_reverse'), i[0]] for i in data]
return data
def get_relations(self, data):
relations = sorted(list(set([d[1] for d in data])))
return relations
def get_entities(self, data):
entities = sorted(list(set(([d[0] for d in data] + [d[2] for d in data]))))
return entities |
def load_model_result(model, data_dir):
files = os.listdir(((data_dir + model) + '/result/'))
result = {}
for file in files:
city = file.split('_')[0].strip()
result[city] = load_city_result(city, model, data_dir)
return result |
def random_partial_box(random_state):
def generate():
x1 = random_state.uniform(0, 0.5)
(x2, y2) = random_state.uniform(0.5, 1, size=2)
side = (x2 - x1)
if (not (0.5 < side < y2)):
return None
return np.array([x1, (y2 - side), side, side])
while True:
box = generate()
if (box is not None):
return box |
def is_compatible_episode(s, t, sim, near_dist, far_dist, geodesic_to_euclid_ratio):
euclid_dist = np.power(np.power((np.array(s) - np.array(t)), 2).sum(0), 0.5)
if (np.abs((s[1] - t[1])) > 0.5):
return (False, 0)
d_separation = sim.geodesic_distance(s, [t])
if (d_separation == np.inf):
return (False, 0)
if (not (near_dist <= d_separation <= far_dist)):
return (False, 0)
distances_ratio = (d_separation / euclid_dist)
if ((distances_ratio < geodesic_to_euclid_ratio) and (np.random.rand() > _ratio_sample_rate(distances_ratio, geodesic_to_euclid_ratio))):
return (False, 0)
if (sim.island_radius(s) < ISLAND_RADIUS_LIMIT):
return (False, 0)
return (True, d_separation) |
def main(args):
token_classification_model = args.input_model
path_to_files = args.input_files.rstrip().split(' ')
test_set_names = args.test_names.rstrip().split(' ')
output_folder = args.output_folder
assert (len(path_to_files) == len(test_set_names)), 'number of test files and their names differ'
os.makedirs((os.path.dirname(output_folder) + '/evaluations'), exist_ok=True)
print('\nLoading the TOKEN classification recognition model (NER)\n')
eval_model = AutoModelForTokenClassification.from_pretrained(token_classification_model)
tokenizer = AutoTokenizer.from_pretrained(token_classification_model, use_fast=True, do_lower_case=True)
tag2id = eval_model.config.label2id
id2tag = eval_model.config.id2label
trainer = Trainer(model=eval_model, data_collator=DataCollatorWithPadding(tokenizer))
for (path_to_file, dataset_name) in zip(path_to_files, test_set_names):
print(f'****** NAMED-ENTITY RECOGNITION (for ATC) ******')
print(f'---- Evaluating dataset: --> {dataset_name} -----')
(eval_texts, eval_tags) = read_atc_ner_data(path_to_file)
eval_encodings = tokenizer(eval_texts, is_split_into_words=True, return_offsets_mapping=True, padding=True, truncation=True)
eval_labels = encode_tags(tag2id, eval_tags, eval_encodings)
eval_encodings.pop('offset_mapping')
eval_dataset = ATCDataset_for_ner(eval_encodings, eval_labels)
(raw_pred, raw_labels, _) = trainer.predict(eval_dataset)
path_to_output_file = f'{output_folder}/{dataset_name}_metrics'
metrics = compute_metrics(raw_pred, raw_labels, label_list=id2tag, log_folder=path_to_output_file) |
class BlenderbotForConditionalGeneration(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class LogFileWriter(ContextDecorator, ContextMethodDecorator):
def __init__(self, experiment):
self.experiment = experiment
def log_writer_decorator(instance, original_method, original_args, original_kwargs):
result = original_method(instance, *original_args, **original_kwargs)
if ('logdir' in original_kwargs):
logdir = original_kwargs['logdir']
else:
logdir = original_args[0]
self.experiment.info.setdefault('tensorflow', {}).setdefault('logdirs', []).append(logdir)
return result
ContextMethodDecorator.__init__(self, tensorflow.summary.FileWriter, '__init__', log_writer_decorator) |
class ResidualLayer(torch.nn.Module):
def __init__(self, units: int, nLayers: int=2, activation=None, name=None):
super().__init__()
self.dense_mlp = torch.nn.Sequential(*[Dense(units, units, activation=activation, bias=False) for i in range(nLayers)])
self.inv_sqrt_2 = (1 / (2.0 ** 0.5))
def forward(self, inputs):
x = self.dense_mlp(inputs)
x = (inputs + x)
x = (x * self.inv_sqrt_2)
return x |
def download_url(url, dst_file_path, chunk_size=8192, progress_hook=_progress_bar):
response = urlopen(url)
total_size = response.info().getheader('Content-Length').strip()
total_size = int(total_size)
bytes_so_far = 0
with open(dst_file_path, 'wb') as f:
while 1:
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if (not chunk):
break
if progress_hook:
progress_hook(bytes_so_far, total_size)
f.write(chunk)
return bytes_so_far |
def main(params):
for (k, v) in zip(params.keys(), params.values()):
assert (v is not None), f'Value for {k} is None'
metadata_schema = schema_from_dict(params)
base_directory = params['out_dir']
store = Store(base_directory)
def make_err_redirector(stream_name):
tee = Tee(os.path.join(store.path, (stream_name + '.txt')), stream_name)
return tee
stderr_tee = make_err_redirector('stderr')
stdout_tee = make_err_redirector('stdout')
metadata_schema.update({'store_path': str, 'git_commit': str})
repo = git.Repo(path=os.path.dirname(os.path.realpath(__file__)), search_parent_directories=True)
metadata_table = store.add_table('metadata', metadata_schema)
metadata_table.update_row(params)
metadata_table.update_row({'store_path': store.path, 'git_commit': repo.head.object.hexsha})
metadata_table.flush_row()
if (params['save_iters'] > 0):
store.add_table('checkpoints', {'val_model': store.PYTORCH_STATE, 'policy_model': store.PYTORCH_STATE, 'envs': store.PICKLE, 'policy_opt': store.PYTORCH_STATE, 'val_opt': store.PYTORCH_STATE, 'iteration': int})
p = Trainer.agent_from_params(params, store=store)
rewards = []
final_table = store.add_table('final_results', {'iteration': int, '5_rewards': float, 'terminated_early': bool})
def finalize_table(iteration, terminated_early, rewards):
final_5_rewards = np.array(rewards)[(- 5):].mean()
final_table.append_row({'iteration': iteration, '5_rewards': final_5_rewards, 'terminated_early': terminated_early})
try:
for i in range(params['train_steps']):
print(('Step %d' % (i,)))
if ((params['save_iters'] > 0) and ((i % params['save_iters']) == 0)):
store['checkpoints'].append_row({'iteration': i, 'val_model': p.val_model.state_dict(), 'policy_model': p.policy_model.state_dict(), 'policy_opt': p.POLICY_ADAM.state_dict(), 'val_opt': p.val_opt.state_dict(), 'envs': p.envs})
mean_reward = p.train_step()
rewards.append(mean_reward)
finalize_table(i, False, rewards)
except KeyboardInterrupt:
torch.save(p.val_model, ('saved_experts/%s-expert-vf' % (params['game'],)))
torch.save(p.policy_model, ('saved_experts/%s-expert-pol' % (params['game'],)))
finalize_table(i, True, rewards) |
class MnliProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev_matched.tsv')), 'dev_matched')
def get_test_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test_matched.tsv')), 'test')
def get_labels(self):
return ['contradiction', 'entailment', 'neutral']
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if (i == 0):
continue
guid = ('%s-%s' % (set_type, tokenization.convert_to_unicode(line[0])))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
if (set_type == 'test'):
label = 'contradiction'
else:
label = tokenization.convert_to_unicode(line[(- 1)])
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples |
def parse_args_and_arch(parser, input_args=None, parse_known=False):
(args, _) = parser.parse_known_args(input_args)
if hasattr(args, 'arch'):
model_specific_group = parser.add_argument_group('Model-specific configuration', argument_default=argparse.SUPPRESS)
ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)
if hasattr(args, 'criterion'):
CRITERION_REGISTRY[args.criterion].add_args(parser)
if hasattr(args, 'optimizer'):
OPTIMIZER_REGISTRY[args.optimizer].add_args(parser)
if hasattr(args, 'lr_scheduler'):
LR_SCHEDULER_REGISTRY[args.lr_scheduler].add_args(parser)
if hasattr(args, 'gs_scheduler'):
GS_SCHEDULER_REGISTRY[args.gs_scheduler].add_args(parser)
if hasattr(args, 'task'):
TASK_REGISTRY[args.task].add_args(parser)
if parse_known:
(args, extra) = parser.parse_known_args(input_args)
else:
args = parser.parse_args(input_args)
extra = None
if (hasattr(args, 'max_sentences_valid') and (args.max_sentences_valid is None)):
args.max_sentences_valid = args.max_sentences
if getattr(args, 'memory_efficient_fp16', False):
args.fp16 = True
if hasattr(args, 'arch'):
ARCH_CONFIG_REGISTRY[args.arch](args)
if parse_known:
return (args, extra)
else:
return args |
def start(fn_name, use_stack=True):
global _running_timer
if use_stack:
if (_running_timer is not None):
stop(_running_timer, use_stack=False)
_timer_stack.append(_running_timer)
start(fn_name, use_stack=False)
_running_timer = fn_name
else:
_start_times[fn_name] = time.perf_counter() |
def cifarnet(nfilters, avgpool=None, nclasses=10, nmasks=32, level=0.1, filter_size=3, first_filter_size=0, pool_type=None, input_size=None, scale_noise=1, act='relu', use_act=True, dropout=0.5, unique_masks=False, debug=False, noise_type='uniform', train_masks=False, mix_maps=None):
return CifarNet(nfilters=nfilters, nclasses=nclasses, nmasks=nmasks, level=level, filter_size=filter_size, pool_type=pool_type, scale_noise=scale_noise, act=act, use_act=use_act, first_filter_size=first_filter_size, input_size=input_size, dropout=dropout, unique_masks=unique_masks, debug=debug, noise_type=noise_type, train_masks=train_masks, mix_maps=mix_maps) |
class MYNET(Net):
def __init__(self, args, mode=None):
super().__init__(args, mode)
hdim = self.num_features
self.slf_attn = MultiHeadAttention(1, hdim, hdim, hdim, dropout=0.5)
def forward(self, input):
if (self.mode == 'encoder'):
input = self.encode(input)
return input
else:
(support_idx, query_idx) = input
logits = self._forward(support_idx, query_idx)
return logits
def _forward(self, support, query):
emb_dim = support.size((- 1))
proto = support.mean(dim=1)
num_batch = proto.shape[0]
num_proto = proto.shape[1]
num_query = (query.shape[1] * query.shape[2])
query = query.view((- 1), emb_dim).unsqueeze(1)
proto = proto.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
proto = proto.view((num_batch * num_query), num_proto, emb_dim)
combined = torch.cat([proto, query], 1)
combined = self.slf_attn(combined, combined, combined)
(proto, query) = combined.split(num_proto, 1)
logits = F.cosine_similarity(query, proto, dim=(- 1))
logits = (logits * self.args.temperature)
return logits |
class DatasetPASCAL(Dataset):
def __init__(self, datapath, fold, transform, split, shot, use_original_imgsize):
self.split = ('val' if (split in ['val', 'test']) else 'trn')
self.fold = fold
self.nfolds = 4
self.nclass = 20
self.benchmark = 'pascal'
self.shot = shot
self.use_original_imgsize = use_original_imgsize
self.img_path = os.path.join(datapath, 'VOC2012/JPEGImages/')
self.ann_path = os.path.join(datapath, 'VOC2012/SegmentationClassAug/')
self.transform = transform
self.class_ids = self.build_class_ids()
self.img_metadata = self.build_img_metadata()
self.img_metadata_classwise = self.build_img_metadata_classwise()
def __len__(self):
return (len(self.img_metadata) if (self.split == 'trn') else 1000)
def __getitem__(self, idx):
idx %= len(self.img_metadata)
(query_name, support_names, class_sample) = self.sample_episode(idx)
(query_img, query_cmask, support_imgs, support_cmasks, org_qry_imsize) = self.load_frame(query_name, support_names)
query_img = self.transform(query_img)
if (not self.use_original_imgsize):
query_cmask = F.interpolate(query_cmask.unsqueeze(0).unsqueeze(0).float(), query_img.size()[(- 2):], mode='nearest').squeeze()
(query_mask, query_ignore_idx) = self.extract_ignore_idx(query_cmask.float(), class_sample)
if self.shot:
support_imgs = torch.stack([self.transform(support_img) for support_img in support_imgs])
support_masks = []
support_ignore_idxs = []
for scmask in support_cmasks:
scmask = F.interpolate(scmask.unsqueeze(0).unsqueeze(0).float(), support_imgs.size()[(- 2):], mode='nearest').squeeze()
(support_mask, support_ignore_idx) = self.extract_ignore_idx(scmask, class_sample)
support_masks.append(support_mask)
support_ignore_idxs.append(support_ignore_idx)
support_masks = torch.stack(support_masks)
support_ignore_idxs = torch.stack(support_ignore_idxs)
else:
support_masks = []
support_ignore_idxs = []
batch = {'query_img': query_img, 'query_mask': query_mask, 'query_name': query_name, 'query_ignore_idx': query_ignore_idx, 'org_query_imsize': org_qry_imsize, 'support_imgs': support_imgs, 'support_masks': support_masks, 'support_names': support_names, 'support_ignore_idxs': support_ignore_idxs, 'class_id': torch.tensor(class_sample)}
return batch
def extract_ignore_idx(self, mask, class_id):
boundary = (mask / 255).floor()
mask[(mask != (class_id + 1))] = 0
mask[(mask == (class_id + 1))] = 1
return (mask, boundary)
def load_frame(self, query_name, support_names):
query_img = self.read_img(query_name)
query_mask = self.read_mask(query_name)
support_imgs = [self.read_img(name) for name in support_names]
support_masks = [self.read_mask(name) for name in support_names]
org_qry_imsize = query_img.size
return (query_img, query_mask, support_imgs, support_masks, org_qry_imsize)
def read_mask(self, img_name):
mask = torch.tensor(np.array(Image.open((os.path.join(self.ann_path, img_name) + '.png'))))
return mask
def read_img(self, img_name):
return Image.open((os.path.join(self.img_path, img_name) + '.jpg'))
def sample_episode(self, idx):
(query_name, class_sample) = self.img_metadata[idx]
support_names = []
if self.shot:
while True:
support_name = np.random.choice(self.img_metadata_classwise[class_sample], 1, replace=False)[0]
if (query_name != support_name):
support_names.append(support_name)
if (len(support_names) == self.shot):
break
return (query_name, support_names, class_sample)
def build_class_ids(self):
nclass_trn = (self.nclass // self.nfolds)
class_ids_val = [((self.fold * nclass_trn) + i) for i in range(nclass_trn)]
class_ids_trn = [x for x in range(self.nclass) if (x not in class_ids_val)]
if (self.split == 'trn'):
return class_ids_trn
else:
return class_ids_val
def build_img_metadata(self):
def read_metadata(split, fold_id):
fold_n_metadata = os.path.join(('fewshot_data/data/splits/pascal/%s/fold%d.txt' % (split, fold_id)))
with open(fold_n_metadata, 'r') as f:
fold_n_metadata = f.read().split('\n')[:(- 1)]
fold_n_metadata = [[data.split('__')[0], (int(data.split('__')[1]) - 1)] for data in fold_n_metadata]
return fold_n_metadata
img_metadata = []
if (self.split == 'trn'):
for fold_id in range(self.nfolds):
if (fold_id == self.fold):
continue
img_metadata += read_metadata(self.split, fold_id)
elif (self.split == 'val'):
img_metadata = read_metadata(self.split, self.fold)
else:
raise Exception(('Undefined split %s: ' % self.split))
print(('Total (%s) images are : %d' % (self.split, len(img_metadata))))
return img_metadata
def build_img_metadata_classwise(self):
img_metadata_classwise = {}
for class_id in range(self.nclass):
img_metadata_classwise[class_id] = []
for (img_name, img_class) in self.img_metadata:
img_metadata_classwise[img_class] += [img_name]
return img_metadata_classwise |
def random_crop():
data = np.arange((3 * 5)).reshape(3, 5)
print(data)
m = RandomCrop(size=(3, 3), p=1.0)
print(m)
res = m(data)
print(res) |
def copy_parameter_from_resnet(model, resnet_dict):
cur_state_dict = model.state_dict()
for (name, param) in list(resnet_dict.items())[0:None]:
if (name not in cur_state_dict):
continue
if isinstance(param, Parameter):
param = param.data
try:
cur_state_dict[name].copy_(param)
except:
continue |
def run(model, data_iter, data_iter2, data_iter3, data_iter4, train_mode):
(model.train() if train_mode else model.eval())
losses = []
losses_der1 = []
losses_der2 = []
losses_docking = []
losses_screening = []
if args.with_uncertainty:
losses_var = []
save_pred = {}
save_true = {}
save_pred_docking = {}
save_true_docking = {}
save_pred_screening = {}
save_true_screening = {}
i_batch = 0
while True:
model.zero_grad()
sample = next(data_iter, None)
if (sample is None):
break
sample = utils.dic_to_device(sample, device)
(keys, affinity) = (sample['key'], sample['affinity'])
loss_all = 0.0
cal_der_loss = False
if ((args.loss_der1_ratio > 0) or (args.loss_der2_ratio > 0.0)):
cal_der_loss = True
data = model(sample, cal_der_loss=cal_der_loss)
if args.with_uncertainty:
(pred, loss_der1, loss_der2, var) = data
else:
(pred, loss_der1, loss_der2) = data
total_pred = pred.sum((- 1))
loss = loss_fn(total_pred, affinity)
loss_der2 = loss_der2.clamp(min=args.min_loss_der2)
loss_all += loss
loss_all += (loss_der1.sum() * args.loss_der1_ratio)
loss_all += (loss_der2.sum() * args.loss_der2_ratio)
if args.with_uncertainty:
loss_var = utils.loss_var(var, total_pred, affinity, log=args.var_log)
loss_all += (loss_var * args.loss_var_ratio)
losses_var.append(loss_var.data.cpu().numpy())
loss_docking = torch.zeros((1,))
keys_docking = []
if (args.loss_docking_ratio > 0.0):
sample_docking = next(data_iter2, None)
sample_docking = utils.dic_to_device(sample_docking, device)
(keys_docking, affinity_docking) = (sample_docking['key'], sample_docking['affinity'])
pred_docking = model(sample_docking)[0]
loss_docking = (affinity_docking - pred_docking.sum((- 1)))
loss_docking = loss_docking.clamp(args.min_loss_docking).mean()
loss_all += (loss_docking * args.loss_docking_ratio)
loss_screening = torch.zeros((1,))
keys_screening = []
if (args.loss_screening_ratio > 0.0):
sample_screening = next(data_iter3, None)
sample_screening = utils.dic_to_device(sample_screening, device)
(keys_screening, affinity_screening) = (sample_screening['key'], sample_screening['affinity'])
pred_screening = model(sample_screening)[0]
loss_screening = (affinity_screening - pred_screening.sum((- 1)))
loss_screening = loss_screening.clamp(min=0.0).mean()
loss_all += (loss_screening * args.loss_screening_ratio)
loss_screening2 = torch.zeros((1,))
keys_screening2 = []
if (args.loss_screening2_ratio > 0.0):
sample_screening2 = next(data_iter4, None)
sample_screening2 = utils.dic_to_device(sample_screening2, device)
(keys_screening2, affinity_screening2) = (sample_screening2['key'], sample_screening2['affinity'])
pred_screening2 = model(sample_screening2)[0]
loss_screening2 = (affinity_screening2 - pred_screening2.sum((- 1)))
loss_screening2 = loss_screening2.clamp(min=0.0).mean()
loss_all += (loss_screening2 * args.loss_screening2_ratio)
if train_mode:
loss_all.backward(retain_graph=True)
optimizer.step()
losses.append(loss.data.cpu().numpy())
losses_der1.append(loss_der1.data.cpu().numpy())
losses_der2.append(loss_der2.data.cpu().numpy())
losses_docking.append(loss_docking.data.cpu().numpy())
losses_screening.append(loss_screening.data.cpu().numpy())
losses_screening.append(loss_screening2.data.cpu().numpy())
affinity = affinity.data.cpu().numpy()
pred = pred.data.cpu().numpy()
for i in range(len(keys)):
save_pred[keys[i]] = pred[i]
save_true[keys[i]] = affinity[i]
if (len(keys_docking) > 0):
pred_docking = pred_docking.data.cpu().numpy()
for i in range(len(keys_docking)):
save_pred_docking[keys_docking[i]] = pred_docking[i]
save_true_docking[keys_docking[i]] = affinity_docking[i]
if (len(keys_screening) > 0):
pred_screening = pred_screening.data.cpu().numpy()
for i in range(len(keys_screening)):
save_pred_screening[keys_screening[i]] = pred_screening[i]
save_true_screening[keys_screening[i]] = affinity_screening[i]
if (len(keys_screening2) > 0):
pred_screening2 = pred_screening2.data.cpu().numpy()
for i in range(len(keys_screening2)):
save_pred_screening[keys_screening2[i]] = pred_screening2[i]
save_true_screening[keys_screening2[i]] = affinity_screening2[i]
i_batch += 1
losses = np.mean(np.array(losses))
losses_der1 = np.mean(np.array(losses_der1))
losses_der2 = np.mean(np.array(losses_der2))
losses_var = np.mean(np.array(losses_var))
losses_docking = np.mean(np.array(losses_docking))
losses_screening = np.mean(np.array(losses_screening))
total_losses = (((((losses + losses_der1) + losses_der2) + losses_var) + losses_docking) + losses_screening)
return (losses, losses_der1, losses_der2, losses_docking, losses_screening, losses_var, save_pred, save_true, save_pred_docking, save_true_docking, save_pred_screening, save_true_screening, total_losses) |
def sol_norm(summary_pdf, name_string, abundances, cube, elements_to_trace, element_names, sol_table, number_of_models_overplotted, produce_mock_data, use_mock_data, error_inflation):
elements_to_trace = element_names
if ('C+N' in element_names):
new_array = np.log10((np.power(10, abundances['C']) + np.power(10, abundances['N'])))
abundances = append_fields(abundances, 'C+N', new_array)
time_sun = (cube['time'][(- 1)] - 4.5)
cut = [np.where((np.abs((cube['time'] - time_sun)) == np.min(np.abs((cube['time'] - time_sun)))))]
if (len(cut[0][0]) != 1):
cut = cut[0][0][0]
time_model = cube['time'][cut]
probabilities = []
abundance_list = []
error_list = []
sun_list = []
for (i, item) in enumerate(elements_to_trace):
abundance_list.append(float(abundances[item][cut]))
error = (sol_table['error'][np.where((sol_table['Symbol'] == item))] + 0.01)
error_list.append(error)
if (item != 'C+N'):
if (item == 'He'):
sun_list.append(0.05)
else:
sun_list.append(0.04)
else:
sun_list.append(np.log10(2.0))
if produce_mock_data:
mock_abundance_list = list(np.random.normal(loc=list(np.hstack(abundance_list)), scale=list((error_inflation * np.hstack(error_list)))))
np.save('mock_data_temp/solar_abundances', mock_abundance_list)
if use_mock_data:
error_list = list((np.hstack(error_list) * error_inflation))
sun_list = np.load('mock_data_temp/solar_abundances.npy')
for (i, item) in enumerate(elements_to_trace):
probabilities.append(np.log(float(gaussian(abundance_list[i], sun_list[i], error_list[i]))))
probability = np.sum(probabilities)
if (number_of_models_overplotted > 1):
if os.path.isfile('output/comparison/sun.npy'):
old = np.load('output/comparison/sun.npy')
old = list(old)
else:
old = []
old.append(np.array(abundance_list))
np.save('output/comparison/sun', old)
if os.path.isfile('output/comparison/sun_likelihood.npy'):
old_likelihood = np.load('output/comparison/sun_likelihood.npy')
old_likelihood = list(old_likelihood)
else:
old_likelihood = []
old_likelihood.append(np.array(probabilities))
np.save('output/comparison/sun_likelihood', old_likelihood)
if summary_pdf:
text_size = 12
plt.rc('font', family='serif', size=text_size)
plt.rc('xtick', labelsize=text_size)
plt.rc('ytick', labelsize=text_size)
plt.rc('axes', labelsize=text_size, lw=2.0)
plt.rc('lines', linewidth=2)
plt.rcParams['ytick.major.pad'] = '8'
plt.clf()
fig = plt.figure(figsize=(30.69, 8.27), dpi=100)
ax = fig.add_subplot(111)
plt.errorbar(np.arange(len(elements_to_trace)), sun_list, xerr=None, yerr=error_list, linestyle='', mew=3, marker='x', capthick=3, capsize=20, ms=10, elinewidth=3, label='solar')
plt.plot(np.arange(len(elements_to_trace)), np.array(abundance_list), label=('model after %.2f Gyr' % time_model), linestyle='-')
if (number_of_models_overplotted > 1):
for item in old:
plt.plot(np.arange(len(elements_to_trace)), np.array(item), linestyle='-', color='g', alpha=0.2)
for i in range(len(elements_to_trace)):
plt.annotate(xy=(i, (- 0.4)), s=('%.2f' % probabilities[i]))
plt.grid('on')
plt.ylim(((- 0.5), 0.5))
elements_to_trace = [('[%s/H]' % item) for item in elements_to_trace]
plt.xticks(np.arange(len(elements_to_trace)), elements_to_trace)
plt.ylabel('abundance relative to solar in dex')
plt.xlabel('Element')
plt.title(('joint probability of agreeing with the sun (normed to pmax) = %.2f' % probability))
plt.legend(loc='best', numpoints=1).get_frame().set_alpha(0.5)
plt.savefig(('sol_norm_%s.png' % name_string))
return (probabilities, abundance_list, elements_to_trace) |
_builder('vg_vqa')
class VGVQABuilder(BaseDatasetBuilder):
train_dataset_cls = VGVQADataset
DATASET_CONFIG_DICT = {'default': 'configs/datasets/vg/defaults_vqa.yaml'} |
def split_by_ratio(num_v: int, v_label: Union[(list, torch.Tensor, np.ndarray)], train_ratio: float, val_ratio: Optional[float]=None, test_ratio: Optional[float]=None):
if isinstance(v_label, list):
v_label = np.array(v_label)
if isinstance(v_label, torch.Tensor):
v_label = v_label.detach().cpu().numpy()
assert isinstance(v_label, np.ndarray)
v_label = v_label.squeeze().astype(int)
assert (v_label.ndim == 1)
if (v_label.min() == 1):
v_label -= 1
num_classes = np.unique(v_label).shape[0]
val_ratio = (val_ratio if (val_ratio is not None) else 0)
if (test_ratio is not None):
assert (((train_ratio + val_ratio) + test_ratio) <= 1)
else:
assert ((train_ratio + val_ratio) < 1)
(train_idx, test_idx) = ([], [])
if (val_ratio != 0):
val_idx = []
else:
val_idx = None
for lbl_idx in range(num_classes):
lbl_v_idx = np.where((v_label == lbl_idx))[0]
_num = lbl_v_idx.shape[0]
random.shuffle(lbl_v_idx)
train_num = int((_num * train_ratio))
val_num = int((_num * val_ratio))
train_idx.extend(lbl_v_idx[:train_num])
if (val_ratio != 0):
val_idx.extend(lbl_v_idx[train_num:(train_num + val_num)])
if (test_ratio is not None):
test_num = int((_num * test_ratio))
test_idx.extend(lbl_v_idx[(train_num + val_num):((train_num + val_num) + test_num)])
else:
test_idx.extend(lbl_v_idx[(train_num + val_num):])
return _idx2mask(num_v, train_idx, test_idx, val_idx) |
def _add_to_tfrecord(dataset_dir, name, tfrecord_writer):
(image_data, shape, bboxes, labels, labels_text, difficult, truncated) = _process_image(dataset_dir, name)
example = _convert_to_example(image_data, labels, labels_text, bboxes, shape, difficult, truncated)
tfrecord_writer.write(example.SerializeToString()) |
class TestCluster(unittest.TestCase):
def setUp(self):
node_lst = [Node('node1', 'localhost', 2, 4), Node('node2', 'localhost', 2, 4)]
self.cluster = Cluster(node_lst, db_path=db_path)
self.task = Task(task_id='1', arguments=['arg1', 'arg2'], workers=2, status='pending', script_url=' optimized=True, approach='static', requirement=['req1', 'req2'], result='', q_model_path='q_model_path')
def tearDownClass(self):
shutil.rmtree('ns_workspace')
def test_reserve_resource(self):
task = self.task
reserved_resource_lst = self.cluster.reserve_resource(task)
self.assertEqual(len(reserved_resource_lst), 2)
self.assertEqual(self.cluster.socket_queue, ['2 node2', '2 node2'])
def test_free_resource(self):
task = self.task
reserved_resource_lst = self.cluster.reserve_resource(task)
self.cluster.free_resource(reserved_resource_lst)
self.assertEqual(self.cluster.socket_queue, ['2 node2', '2 node2', '1 node1', '1 node1'])
def test_get_free_socket(self):
free_socket_lst = self.cluster.get_free_socket(4)
self.assertEqual(len(free_socket_lst), 4)
self.assertEqual(free_socket_lst, ['1 node1', '1 node1', '2 node2', '2 node2'])
self.assertEqual(self.cluster.socket_queue, [])
free_socket_lst = self.cluster.get_free_socket(10)
self.assertEqual(free_socket_lst, 0) |
def concat_hunks(file_patches: list[AvgFilePatch], delim: str='') -> str:
return delim.join((cast(str, hunk_patch.result.hunk) for file_patch in file_patches for hunk_patch in file_patch.hunks)) |
class SubPolicy(object):
def __init__(self, p1, operation1, magnitude_idx1, p2, operation2, magnitude_idx2, fillcolor=(128, 128, 128), magnitude_factor=1):
ranges = {'shearX': np.linspace(0, 0.3, 10), 'shearY': np.linspace(0, 0.3, 10), 'translateX': np.linspace(0, (150 / 331), 10), 'translateY': np.linspace(0, (150 / 331), 10), 'rotate': np.linspace(0, 30, 10), 'color': np.linspace(0.0, 0.9, 10), 'posterize': np.round(np.linspace(8, 4, 10), 0).astype(np.int), 'solarize': np.linspace(256, 0, 10), 'contrast': np.linspace(0.0, 0.9, 10), 'sharpness': np.linspace(0.0, 0.9, 10), 'brightness': np.linspace(0.0, 0.9, 10), 'autocontrast': ([0] * 10), 'equalize': ([0] * 10), 'invert': ([0] * 10)}
def rotate_with_fill(img, magnitude):
rot = img.convert('RGBA').rotate(magnitude)
return Image.composite(rot, Image.new('RGBA', rot.size, ((128,) * 4)), rot).convert(img.mode)
func = {'shearX': (lambda img, magnitude: img.transform(img.size, Image.AFFINE, (1, (magnitude * random.choice([(- 1), 1])), 0, 0, 1, 0), Image.BICUBIC, fillcolor=fillcolor)), 'shearY': (lambda img, magnitude: img.transform(img.size, Image.AFFINE, (1, 0, 0, (magnitude * random.choice([(- 1), 1])), 1, 0), Image.BICUBIC, fillcolor=fillcolor)), 'translateX': (lambda img, magnitude: img.transform(img.size, Image.AFFINE, (1, 0, ((magnitude * img.size[0]) * random.choice([(- 1), 1])), 0, 1, 0), fillcolor=fillcolor)), 'translateY': (lambda img, magnitude: img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, ((magnitude * img.size[1]) * random.choice([(- 1), 1]))), fillcolor=fillcolor)), 'rotate': (lambda img, magnitude: rotate_with_fill(img, magnitude)), 'color': (lambda img, magnitude: ImageEnhance.Color(img).enhance((1 + (magnitude * random.choice([(- 1), 1]))))), 'posterize': (lambda img, magnitude: ImageOps.posterize(img, magnitude)), 'solarize': (lambda img, magnitude: ImageOps.solarize(img, magnitude)), 'contrast': (lambda img, magnitude: ImageEnhance.Contrast(img).enhance((1 + (magnitude * random.choice([(- 1), 1]))))), 'sharpness': (lambda img, magnitude: ImageEnhance.Sharpness(img).enhance((1 + (magnitude * random.choice([(- 1), 1]))))), 'brightness': (lambda img, magnitude: ImageEnhance.Brightness(img).enhance((1 + (magnitude * random.choice([(- 1), 1]))))), 'autocontrast': (lambda img, magnitude: ImageOps.autocontrast(img)), 'equalize': (lambda img, magnitude: ImageOps.equalize(img)), 'invert': (lambda img, magnitude: ImageOps.invert(img))}
magnitude_1 = max(0, min(9, int((magnitude_factor * magnitude_idx1))))
magnitude_2 = max(0, min(9, int((magnitude_factor * magnitude_idx2))))
self.p1 = p1
self.operation1 = func[operation1]
self.magnitude1 = ranges[operation1][magnitude_1]
self.p2 = p2
self.operation2 = func[operation2]
self.magnitude2 = ranges[operation2][magnitude_2]
def __call__(self, img):
if (random.random() < self.p1):
img = self.operation1(img, self.magnitude1)
if (random.random() < self.p2):
img = self.operation2(img, self.magnitude2)
return img |
class ChatCompletionRequest(BaseModel):
model: str
messages: Union[(str, List[Dict[(str, str)]])]
temperature: Optional[float] = 0.7
top_p: Optional[float] = 1.0
n: Optional[int] = 1
max_tokens: Optional[int] = None
stop: Optional[Union[(str, List[str])]] = Field(default_factory=list)
stream: Optional[bool] = False
presence_penalty: Optional[float] = 0.0
frequency_penalty: Optional[float] = 0.0
logit_bias: Optional[Dict[(str, float)]] = None
user: Optional[str] = None
best_of: Optional[int] = None
top_k: Optional[int] = (- 1)
ignore_eos: Optional[bool] = False
use_beam_search: Optional[bool] = False
stop_token_ids: Optional[List[int]] = Field(default_factory=list)
skip_special_tokens: Optional[bool] = True |
def read_annotation_file(config, filename, doc):
items = []
if (len(glob.glob(filename)) == 1):
for line in open(filename):
fields = line.strip().split()
spans = get_spans(line.split('-')[0], doc, config)
labels = get_labels('-'.join(line.split('-')[1:]), config)
items.append(Item(doc, spans, labels))
logging.info('Read {}'.format(filename))
return items |
class WebKB(InMemoryDataset):
url = '
def __init__(self, root, name, transform=None, pre_transform=None):
self.name = name.lower()
assert (self.name in ['cornell', 'texas', 'washington', 'wisconsin'])
super(WebKB, self).__init__(root, transform, pre_transform)
(self.data, self.slices) = torch.load(self.processed_paths[0])
def raw_dir(self):
return osp.join(self.root, self.name, 'raw')
def processed_dir(self):
return osp.join(self.root, self.name, 'processed')
def raw_file_names(self):
return ['out1_node_feature_label.txt', 'out1_graph_edges.txt']
def processed_file_names(self):
return 'data.pt'
def download(self):
for name in self.raw_file_names:
download_url(f'{self.url}/{self.name}/{name}', self.raw_dir)
def process(self):
with open(self.raw_paths[0], 'r') as f:
data = f.read().split('\n')[1:(- 1)]
x = [[float(v) for v in r.split('\t')[1].split(',')] for r in data]
x = torch.tensor(x, dtype=torch.float)
y = [int(r.split('\t')[2]) for r in data]
y = torch.tensor(y, dtype=torch.long)
with open(self.raw_paths[1], 'r') as f:
data = f.read().split('\n')[1:(- 1)]
data = [[int(v) for v in r.split('\t')] for r in data]
edge_index = torch.tensor(data, dtype=torch.long).t().contiguous()
edge_index = to_undirected(edge_index)
(edge_index, _) = coalesce(edge_index, None, x.size(0), x.size(0))
data = Data(x=x, edge_index=edge_index, y=y)
data = (data if (self.pre_transform is None) else self.pre_transform(data))
torch.save(self.collate([data]), self.processed_paths[0])
def __repr__(self):
return '{}()'.format(self.name) |
def get_filenames(dir, cifar_classnum):
assert ((cifar_classnum == 10) or (cifar_classnum == 100))
if (cifar_classnum == 10):
filenames = [os.path.join(dir, 'cifar-10-batches-py', ('data_batch_%d' % i)) for i in range(1, 6)]
filenames.append(os.path.join(dir, 'cifar-10-batches-py', 'test_batch'))
elif (cifar_classnum == 100):
filenames = [os.path.join(dir, 'cifar-100-python', 'train'), os.path.join(dir, 'cifar-100-python', 'test')]
return filenames |
_model
def regnety_160(pretrained=False, **kwargs):
return _regnet('regnety_160', pretrained, **kwargs) |
class LZ09_F6(LZ09):
def __init__(self, number_of_variables=10):
super(LZ09_F6, self).__init__(number_of_variables, dtype=1, ltype=32, ptype=31)
self.obj_directions = [self.MINIMIZE, self.MINIMIZE, self.MINIMIZE]
self.obj_labels = ['f(x)', 'f(y)', 'f(z)']
def number_of_objectives(self) -> int:
return len(self.obj_directions)
def name(self):
return 'LZ09_F6' |
def time_tensorflow_run(session, target, info_string):
num_steps_burn_in = 10
total_duration = 0.0
total_duration_squared = 0.0
for i in range((FLAGS.num_batches + num_steps_burn_in)):
start_time = time.time()
_ = session.run(target)
duration = (time.time() - start_time)
if (i >= num_steps_burn_in):
if (not (i % 10)):
print(('%s: step %d, duration = %.3f' % (datetime.now(), (i - num_steps_burn_in), duration)))
total_duration += duration
total_duration_squared += (duration * duration)
mn = (total_duration / FLAGS.num_batches)
vr = ((total_duration_squared / FLAGS.num_batches) - (mn * mn))
sd = math.sqrt(vr)
print(('%s: %s across %d steps, %.3f +/- %.3f sec / batch' % (datetime.now(), info_string, FLAGS.num_batches, mn, sd))) |
def read_cameras_binary(path_to_model_file):
cameras = {}
with open(path_to_model_file, 'rb') as fid:
num_cameras = read_next_bytes(fid, 8, 'Q')[0]
for _ in range(num_cameras):
camera_properties = read_next_bytes(fid, num_bytes=24, format_char_sequence='iiQQ')
camera_id = camera_properties[0]
model_id = camera_properties[1]
model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name
width = camera_properties[2]
height = camera_properties[3]
num_params = CAMERA_MODEL_IDS[model_id].num_params
params = read_next_bytes(fid, num_bytes=(8 * num_params), format_char_sequence=('d' * num_params))
cameras[camera_id] = Camera(id=camera_id, model=model_name, width=width, height=height, params=np.array(params))
assert (len(cameras) == num_cameras)
return cameras |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.