code stringlengths 101 5.91M |
|---|
def main():
import argparse
parser = argparse.ArgumentParser(description='The MSD provides data as 4D Niftis with the modality being the first dimension. We think this may be cumbersome for some users and therefore expect 3D niftixs instead, with one file per modality. This utility will convert 4D MSD data into the format nnU-Net expects')
parser.add_argument('-i', help='Input folder. Must point to a TaskXX_TASKNAME folder as downloaded from the MSD website', required=True)
parser.add_argument('-p', required=False, default=default_num_threads, type=int, help=('Use this to specify how many processes are used to run the script. Default is %d' % default_num_threads))
parser.add_argument('-output_task_id', required=False, default=None, type=int, help='If specified, this will overwrite the task id in the output folder. If unspecified, the task id of the input folder will be used.')
args = parser.parse_args()
crawl_and_remove_hidden_from_decathlon(args.i)
split_4d(args.i, args.p, args.output_task_id) |
_torch
class SelectiveCommonTest(unittest.TestCase):
all_model_classes = ((MBartForConditionalGeneration,) if is_torch_available() else ())
test_save_load__keys_to_ignore_on_save = ModelTesterMixin.test_save_load__keys_to_ignore_on_save
def setUp(self):
self.model_tester = ModelTester(self) |
def train(epoch, model, optimizer, train_loader, grapher, prefix='train'):
return execute_graph(epoch, model, train_loader, grapher, optimizer, prefix='train') |
class Radiopaedia(Repository):
case_class = RadiopaediaCase
cache_class = RadiopaediaMetadataCache
external_search_limiter = 'site:radiopaedia.org/cases/'
def is_end_of_results(cls, browser):
return ('No results were found with these refinement filters applied.' in browser.page_source)
def format_internal_search_url(cls, search_terms):
def format_radiopaedia_search_url(search_terms, scope):
base_url = '
return base_url.format('+'.join(search_terms), scope)
def format_search_url(search_terms):
return format_radiopaedia_search_url(search_terms, 'cases')
return format_search_url(search_terms)
def extract_results_from_search_page(cls, browser):
out = []
for a in browser.find_elements_by_xpath("//a[contains(,'/cases/') and = 'search-result search-result-case']"):
out.append(a.get_attribute('href'))
return list(set(out))
def get_next_search_page(cls, browser):
try:
next_ref = browser.find_element_by_xpath("//a[='next_page']")
return next_ref.get_attribute('href')
except:
return None |
def configure_metadata(metadata_root):
metadata = mch()
metadata.image_ids = join(metadata_root, 'image_ids.txt')
metadata.image_ids_proxy = join(metadata_root, 'image_ids_proxy.txt')
metadata.class_labels = join(metadata_root, 'class_labels.txt')
metadata.image_sizes = join(metadata_root, 'image_sizes.txt')
metadata.localization = join(metadata_root, 'localization.txt')
return metadata |
def advtest_fast(model, loader, adversary, args):
advDataset = torch.load(args.adv_data_dir)
test_loader = torch.utils.data.DataLoader(advDataset, batch_size=4, shuffle=False, num_workers=0, pin_memory=False)
model.eval()
total_ce = 0
total = 0
top1 = 0
total = 0
top1_clean = 0
top1_adv = 0
adv_success = 0
adv_trial = 0
for (i, (batch, label, adv_batch, adv_label)) in enumerate(test_loader):
(batch, label) = (batch.to('cuda'), label.to('cuda'))
adv_batch = adv_batch.to('cuda')
total += batch.size(0)
out_clean = model(batch)
out_adv = model(adv_batch)
(_, pred_clean) = out_clean.max(dim=1)
(_, pred_adv) = out_adv.max(dim=1)
clean_correct = pred_clean.eq(label)
adv_trial += int(clean_correct.sum().item())
adv_success += int(pred_adv[clean_correct].eq(label[clean_correct]).sum().detach().item())
top1_clean += int(pred_clean.eq(label).sum().detach().item())
top1_adv += int(pred_adv.eq(label).sum().detach().item())
print(f'Finish adv test fast')
del test_loader
del advDataset
return (((float(top1_clean) / total) * 100), ((float(top1_adv) / total) * 100), ((float((adv_trial - adv_success)) / adv_trial) * 100)) |
def parse_to_prune_tf(config, model):
modules = {}
classifier_head_name = parse_last_linear_tf(model)
if (classifier_head_name is not None):
config['excluded_op_names'].append(classifier_head_name)
if ((config['op_names'] is None) or (config['op_names'] == [])):
config['op_names'] = ['.*']
for layer in model.layers:
for layer_type in config['pruning_op_types']:
if ((layer_type in layer.__class__.__name__) and bool(layer.weights)):
modules[layer.name] = layer
'Drop non-pruned layers.'
exclude_names = config['excluded_op_names']
patterns = [re.compile(s) for s in exclude_names]
if (len(patterns) <= 0):
return modules
new_modules = {}
for name in modules.keys():
if any([p.search(name) for p in patterns]):
continue
new_modules[name] = modules[name]
return new_modules |
class TestSCEModel(unittest.TestCase):
def setUp(self) -> None:
self.trunk_cfg = DictConfig({'_target_': 'eztorch.models.trunks.create_resnet', 'name': 'resnet18', 'num_classes': 0, 'small_input': True})
self.projector_cfg = DictConfig({'_target_': 'eztorch.models.heads.MLPHead', 'input_dim': 512, 'output_dim': 2})
self.predictor_cfg = DictConfig({'_target_': 'eztorch.models.heads.MLPHead', 'input_dim': 512, 'output_dim': 2})
self.queue_cfg = DictConfig({'_target_': 'eztorch.models.queues.FIFOQueue', 'size': 8, 'feature_dim': 2})
self.temp = 0.1
self.temp_m = 0.05
self.coeff = 0.5
def test_sce_init(self):
SCEModel(trunk=self.trunk_cfg, projector=None, predictor=None, optimizer={}, queue=None, num_devices=1, simulate_n_devices=1, temp=self.temp, temp_m=self.temp_m, coeff=self.coeff)
SCEModel(trunk=self.trunk_cfg, projector=None, predictor=None, optimizer={}, queue=None, num_devices=1, simulate_n_devices=8, temp=self.temp, temp_m=self.temp_m, coeff=self.coeff)
SCEModel(trunk=self.trunk_cfg, projector=None, predictor=None, optimizer={}, queue=None, num_devices=2, simulate_n_devices=8, temp=self.temp, temp_m=self.temp_m, coeff=self.coeff)
SCEModel(trunk=self.trunk_cfg, projector=self.projector_cfg, predictor=None, optimizer={}, queue=None, num_devices=2, temp=self.temp, temp_m=self.temp_m, coeff=self.coeff)
SCEModel(trunk=self.trunk_cfg, projector=self.projector_cfg, predictor=self.predictor_cfg, optimizer={}, queue=None, num_devices=2, temp=self.temp, temp_m=self.temp_m, coeff=self.coeff)
SCEModel(trunk=self.trunk_cfg, projector=self.projector_cfg, predictor=self.predictor_cfg, optimizer={}, queue=self.queue_cfg, num_devices=2, temp=self.temp, temp_m=self.temp_m, coeff=self.coeff)
def test_sce_sym_fit(self):
optimizer_cfg = DictConfig({'_target_': 'eztorch.optimizers.optimizer_factory', 'name': 'adam', 'scheduler': None, 'initial_lr': 0.06})
transform_cfg = [{'num_views': 2, 'transform': nn.Identity()}]
model = SCEModel(trunk=self.trunk_cfg, optimizer=optimizer_cfg, use_keys=True, queue=None, num_devices=1, simulate_n_devices=1, sym=True, temp=self.temp, temp_m=self.temp_m, coeff=self.coeff)
datamodule = BoringDataModule(dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=MultiCropTransform(transform_cfg)), val_dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=None), batch_size=64)
trainer = Trainer(fast_dev_run=1, devices=1)
trainer.fit(model, datamodule)
def test_sce_split_fit(self):
optimizer_cfg = DictConfig({'_target_': 'eztorch.optimizers.optimizer_factory', 'name': 'adam', 'scheduler': None, 'initial_lr': 0.06})
transform_cfg = [{'num_views': 2, 'transform': nn.Identity()}]
model = SCEModel(trunk=self.trunk_cfg, optimizer=optimizer_cfg, use_keys=True, queue=None, num_devices=1, simulate_n_devices=1, sym=False, num_splits=2, temp=self.temp, temp_m=self.temp_m, coeff=self.coeff)
datamodule = BoringDataModule(dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=MultiCropTransform(transform_cfg)), val_dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=None), batch_size=64)
trainer = Trainer(fast_dev_run=1, devices=1)
trainer.fit(model, datamodule)
def test_sce_split_sym_fit(self):
optimizer_cfg = DictConfig({'_target_': 'eztorch.optimizers.optimizer_factory', 'name': 'adam', 'scheduler': None, 'initial_lr': 0.06})
transform_cfg = [{'num_views': 2, 'transform': nn.Identity()}]
model = SCEModel(trunk=self.trunk_cfg, optimizer=optimizer_cfg, use_keys=True, queue=None, num_devices=1, simulate_n_devices=1, sym=True, num_splits=2, temp=self.temp, temp_m=self.temp_m, coeff=self.coeff)
datamodule = BoringDataModule(dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=MultiCropTransform(transform_cfg)), val_dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=None), batch_size=64)
trainer = Trainer(fast_dev_run=1, devices=1)
trainer.fit(model, datamodule)
def test_sce_split_sym_with_queue_fit(self):
optimizer_cfg = DictConfig({'_target_': 'eztorch.optimizers.optimizer_factory', 'name': 'adam', 'scheduler': None, 'initial_lr': 0.06})
transform_cfg = [{'num_views': 2, 'transform': nn.Identity()}]
model = SCEModel(trunk=self.trunk_cfg, optimizer=optimizer_cfg, use_keys=True, queue=DictConfig({'size': 128, 'feature_dim': 512}), num_devices=1, simulate_n_devices=1, sym=True, num_splits=2, temp=self.temp, temp_m=self.temp_m, coeff=self.coeff)
datamodule = BoringDataModule(dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=MultiCropTransform(transform_cfg)), val_dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=None), batch_size=64)
trainer = Trainer(fast_dev_run=2, devices=1)
trainer.fit(model, datamodule)
def test_sce_sym_mutual_fit(self):
optimizer_cfg = DictConfig({'_target_': 'eztorch.optimizers.optimizer_factory', 'name': 'adam', 'scheduler': None, 'initial_lr': 0.06})
transform_cfg = [{'num_views': 2, 'transform': nn.Identity()}]
model = SCEModel(trunk=self.trunk_cfg, optimizer=optimizer_cfg, use_keys=True, queue=None, num_devices=1, simulate_n_devices=1, sym=True, temp=self.temp, temp_m=self.temp_m, coeff=self.coeff, mutual_pass=True)
datamodule = BoringDataModule(dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=MultiCropTransform(transform_cfg)), val_dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=None), batch_size=64)
trainer = Trainer(fast_dev_run=1, devices=1)
trainer.fit(model, datamodule)
def test_sce_fit(self):
optimizer_cfg = DictConfig({'_target_': 'eztorch.optimizers.optimizer_factory', 'name': 'adam', 'scheduler': None, 'initial_lr': 0.06})
transform_cfg = [{'num_views': 2, 'transform': nn.Identity()}]
model = SCEModel(trunk=self.trunk_cfg, optimizer=optimizer_cfg, projector=self.projector_cfg, use_keys=True, queue=None, num_devices=1, simulate_n_devices=1, initial_momentum=0.98, scheduler_momentum='cosine', temp=self.temp, temp_m=self.temp_m, coeff=self.coeff)
datamodule = BoringDataModule(dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=MultiCropTransform(transform_cfg)), val_dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=None), batch_size=64)
assert (model.current_momentum == 0.98)
trainer = Trainer(fast_dev_run=2, devices=1)
trainer.fit(model, datamodule)
assert (model.current_momentum == 0.98)
def test_sce_mutual_fit(self):
optimizer_cfg = DictConfig({'_target_': 'eztorch.optimizers.optimizer_factory', 'name': 'adam', 'scheduler': None, 'initial_lr': 0.06})
transform_cfg = [{'num_views': 2, 'transform': nn.Identity()}]
model = SCEModel(trunk=self.trunk_cfg, optimizer=optimizer_cfg, projector=self.projector_cfg, use_keys=True, queue=None, num_devices=1, simulate_n_devices=1, initial_momentum=0.98, scheduler_momentum='cosine', temp=self.temp, temp_m=self.temp_m, coeff=self.coeff, mutual_pass=True)
datamodule = BoringDataModule(dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=MultiCropTransform(transform_cfg)), val_dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=None), batch_size=64)
assert (model.current_momentum == 0.98)
trainer = Trainer(fast_dev_run=2, devices=1)
trainer.fit(model, datamodule)
assert (model.current_momentum == 0.98) |
def _assign_device_option(predict_net: caffe2_pb2.NetDef, init_net: caffe2_pb2.NetDef, tensor_inputs: List[torch.Tensor]):
def _get_device_type(torch_tensor):
assert (torch_tensor.device.type in ['cpu', 'cuda'])
assert (torch_tensor.device.index == 0)
return torch_tensor.device.type
def _assign_op_device_option(net_proto, net_ssa, blob_device_types):
for (op, ssa_i) in zip(net_proto.op, net_ssa):
if (op.type in ['CopyCPUToGPU', 'CopyGPUToCPU']):
op.device_option.CopyFrom(core.DeviceOption(caffe2_pb2.CUDA, 0))
else:
devices = [blob_device_types[b] for b in (ssa_i[0] + ssa_i[1])]
assert all(((d == devices[0]) for d in devices))
if (devices[0] == 'cuda'):
op.device_option.CopyFrom(core.DeviceOption(caffe2_pb2.CUDA, 0))
predict_net_input_device_types = {(name, 0): _get_device_type(tensor) for (name, tensor) in zip(predict_net.external_input, tensor_inputs)}
predict_net_device_types = infer_device_type(predict_net, known_status=predict_net_input_device_types, device_name_style='pytorch')
(predict_net_ssa, _) = core.get_ssa(predict_net)
_assign_op_device_option(predict_net, predict_net_ssa, predict_net_device_types)
(init_net_ssa, versions) = core.get_ssa(init_net)
init_net_output_device_types = {(name, versions[name]): predict_net_device_types[(name, 0)] for name in init_net.external_output}
init_net_device_types = infer_device_type(init_net, known_status=init_net_output_device_types, device_name_style='pytorch')
_assign_op_device_option(init_net, init_net_ssa, init_net_device_types) |
def rw_update(new_observation, new_response, current_belief, current_action_probability, learning_rate):
action_probability = (1 / (1 + pt.exp((- current_belief))))
error = ((new_response * pt.log(action_probability)) + ((1 - new_response) * pt.log((1 - action_probability))))
transformed_old_value = (1 / (1 + pt.exp((- current_belief))))
new_belief = (current_belief + (learning_rate * (new_observation - transformed_old_value)))
return (new_belief, error) |
class ToTensor(object):
def __init__(self):
self.worker = (lambda x: (F.to_tensor(x) * 255))
def __call__(self, img_group):
img_group = [self.worker(img) for img in img_group]
return torch.stack(img_group, 0) |
class RingBuffer(object):
def __init__(self, maxlen, shape, dtype='float32'):
self.maxlen = maxlen
self.start = 0
self.length = 0
self.data = np.zeros(((maxlen,) + shape)).astype(dtype)
def __len__(self):
return self.length
def __getitem__(self, idx):
if ((idx < 0) or (idx >= self.length)):
raise KeyError()
return self.data[((self.start + idx) % self.maxlen)]
def get_batch(self, idxs):
return self.data[((self.start + idxs) % self.maxlen)]
def append(self, v):
if (self.length < self.maxlen):
self.length += 1
elif (self.length == self.maxlen):
self.start = ((self.start + 1) % self.maxlen)
else:
raise RuntimeError()
self.data[(((self.start + self.length) - 1) % self.maxlen)] = v |
def create_emitter_tests(out):
out = Writer(out)
includes = ['handler_test.h', 'yaml-cpp-0.7.0/yaml.h', 'gmock/gmock.h', 'gtest/gtest.h']
for include in includes:
out.writeln(('#include "%s"' % include))
out.writeln('')
usings = ['::testing::_']
for using in usings:
out.writeln(('using %s;' % using))
out.writeln('')
with Scope(out, 'namespace YAML', 0) as _:
with Scope(out, 'namespace', 0) as _:
out.writeln('')
out.writeln('typedef HandlerTest GenEmitterTest;')
out.writeln('')
tests = list(gen_tests())
for test in tests:
with Scope(out, ('TEST_F(%s, %s)' % ('GenEmitterTest', test['name'])), 2) as _:
out.writeln('Emitter out;')
for event in test['events']:
emit = event['emit']
if isinstance(emit, list):
for e in emit:
out.writeln(('out << %s;' % e))
elif emit:
out.writeln(('out << %s;' % emit))
out.writeln('')
for event in test['events']:
handle = event['handle']
if isinstance(handle, list):
for e in handle:
out.writeln(('EXPECT_CALL(handler, %s);' % e))
elif handle:
out.writeln(('EXPECT_CALL(handler, %s);' % handle))
out.writeln('Parse(out.c_str());')
out.writeln('') |
class DSpritesTestXPositionGrouped(data_testing_lib.BaseVTABDataTest):
def setUp(self):
super(DSpritesTestXPositionGrouped, self).setUp(data_wrapper=dsprites.DSpritesData('label_x_position', 15), num_classes=15, expected_num_samples=dict(train=589824, val=73728, trainval=663552, test=73728, train800val200=1000, train800=800, val200=200), required_tensors_shapes={'image': (64, 64, 3), 'label': ()}, tfds_label_key_map={}) |
class TorchDataloader(Dataset):
def __init__(self, dataset=None, preprocess=None, transform=None, sampler=None, use_cache=True, steps_per_epoch=None, **kwargs):
self.dataset = dataset
self.preprocess = preprocess
self.steps_per_epoch = steps_per_epoch
if ((preprocess is not None) and use_cache):
cache_dir = getattr(dataset.cfg, 'cache_dir')
assert (cache_dir is not None), 'cache directory is not given'
self.cache_convert = Cache(preprocess, cache_dir=cache_dir, cache_key=get_hash(repr(preprocess)))
uncached = [idx for idx in range(len(dataset)) if (dataset.get_attr(idx)['name'] not in self.cache_convert.cached_ids)]
if (len(uncached) > 0):
for idx in tqdm(range(len(dataset)), desc='preprocess'):
attr = dataset.get_attr(idx)
name = attr['name']
if (name in self.cache_convert.cached_ids):
continue
data = dataset.get_data(idx)
self.cache_convert(name, data, attr)
else:
self.cache_convert = None
self.transform = transform
if (sampler is not None):
sampler.initialize_with_dataloader(self)
def __getitem__(self, index):
dataset = self.dataset
index = (index % len(dataset))
attr = dataset.get_attr(index)
if self.cache_convert:
data = self.cache_convert(attr['name'])
elif self.preprocess:
data = self.preprocess(dataset.get_data(index), attr)
else:
data = dataset.get_data(index)
if (self.transform is not None):
data = self.transform(data, attr)
inputs = {'data': data, 'attr': attr}
return inputs
def __len__(self):
if (self.steps_per_epoch is not None):
steps_per_epoch = self.steps_per_epoch
else:
steps_per_epoch = len(self.dataset)
return steps_per_epoch |
def PrintModelBase(mbIndi):
(m, n) = mbIndi.shape
for i in xrange(m):
for j in xrange(n):
if (not np.isnan(mbIndi[(i, j)])):
print(('[%d, %d]\t%f' % (i, j, mbIndi[(i, j)])))
print('\n') |
class ClusterInfo():
def ip_addr(self):
return ray._private.services.get_node_ip_address()
def set_cpu_affinity(self, core_list):
proclist_str = f"[{','.join([str(i) for i in core_list])}]"
os.environ['OMP_NUM_THREADS'] = str(len(core_list))
os.environ['OMP_SCHEDULE'] = 'STATIC'
os.environ['OMP_PROC_BIND'] = 'CLOSE'
os.environ['KMP_AFFINITY'] = f'verbose,granularity=fine,proclist={proclist_str},explicit'
os.environ['GOMP_CPU_AFFINITY'] = proclist_str
os.sched_setaffinity(0, set(core_list))
def disable_cpu_affinity(self, num_cores):
os.environ['OMP_NUM_THREADS'] = str(num_cores)
os.environ['KMP_AFFINITY'] = 'disabled'
os.environ['OMP_PROC_BIND'] = 'FALSE'
def run(self, func, *args, **kwargs):
return func(*args, **kwargs) |
class TestMXNetSymbol(TestCase):
def test_symbol(self):
config = create_config(log_interval=2, seed=42)
estimator = Estimator.from_mxnet(config=config, model_creator=get_model, validation_metrics_creator=get_metrics, eval_metrics_creator=get_metrics)
estimator.fit(get_train_data_iter, validation_data=get_test_data_iter, epochs=2, batch_size=16)
estimator.shutdown() |
def validate_minibatch_size_str(minibatch_size_str):
if (not isinstance(minibatch_size_str, str)):
return False
a = minibatch_size_str.split('/')
assert (len(a) != 0)
for elem in a:
b = elem.split('=')
if (len(b) != 2):
if ((len(a) == 1) and (len(b) == 1)):
return validate_range_str(elem)
else:
return False
try:
if (int(b[0]) <= 0):
return False
except:
return False
if (not validate_range_str(b[1])):
return False
return True |
class HighResolutionNet(nn.Module):
def __init__(self, norm_layer=nn.BatchNorm2d):
super(HighResolutionNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = norm_layer(64)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn2 = norm_layer(64)
self.relu = nn.ReLU(inplace=True)
self.stage1_cfg = cfg.MODEL.HRNET.STAGE1
num_channels = self.stage1_cfg['NUM_CHANNELS'][0]
block = blocks_dict[self.stage1_cfg['BLOCK']]
num_blocks = self.stage1_cfg['NUM_BLOCKS'][0]
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks, norm_layer=norm_layer)
stage1_out_channel = (block.expansion * num_channels)
self.stage2_cfg = cfg.MODEL.HRNET.STAGE2
num_channels = self.stage2_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage2_cfg['BLOCK']]
num_channels = [(num_channels[i] * block.expansion) for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer([stage1_out_channel], num_channels, norm_layer=norm_layer)
(self.stage2, pre_stage_channels) = self._make_stage(self.stage2_cfg, num_channels)
self.stage3_cfg = cfg.MODEL.HRNET.STAGE3
num_channels = self.stage3_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage3_cfg['BLOCK']]
num_channels = [(num_channels[i] * block.expansion) for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels)
(self.stage3, pre_stage_channels) = self._make_stage(self.stage3_cfg, num_channels)
self.stage4_cfg = cfg.MODEL.HRNET.STAGE4
num_channels = self.stage4_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage4_cfg['BLOCK']]
num_channels = [(num_channels[i] * block.expansion) for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels)
(self.stage4, pre_stage_channels) = self._make_stage(self.stage4_cfg, num_channels, multi_scale_output=True)
self.last_inp_channels = np.int(np.sum(pre_stage_channels))
def _make_head(self, pre_stage_channels):
head_block = Bottleneck
head_channels = [32, 64, 128, 256]
incre_modules = []
for (i, channels) in enumerate(pre_stage_channels):
incre_module = self._make_layer(head_block, channels, head_channels[i], 1, stride=1)
incre_modules.append(incre_module)
incre_modules = nn.ModuleList(incre_modules)
downsamp_modules = []
for i in range((len(pre_stage_channels) - 1)):
in_channels = (head_channels[i] * head_block.expansion)
out_channels = (head_channels[(i + 1)] * head_block.expansion)
downsamp_module = nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True))
downsamp_modules.append(downsamp_module)
downsamp_modules = nn.ModuleList(downsamp_modules)
final_layer = nn.Sequential(nn.Conv2d(in_channels=(head_channels[3] * head_block.expansion), out_channels=2048, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(2048), nn.ReLU(inplace=True))
return (incre_modules, downsamp_modules, final_layer)
def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer, norm_layer=nn.BatchNorm2d):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if (i < num_branches_pre):
if (num_channels_cur_layer[i] != num_channels_pre_layer[i]):
transition_layers.append(nn.Sequential(nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, 1, 1, bias=False), norm_layer(num_channels_cur_layer[i]), nn.ReLU(inplace=True)))
else:
transition_layers.append(None)
else:
conv3x3s = []
for j in range(((i + 1) - num_branches_pre)):
inchannels = num_channels_pre_layer[(- 1)]
outchannels = (num_channels_cur_layer[i] if (j == (i - num_branches_pre)) else inchannels)
conv3x3s.append(nn.Sequential(nn.Conv2d(inchannels, outchannels, 3, 2, 1, bias=False), norm_layer(outchannels), nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1, norm_layer=nn.BatchNorm2d):
downsample = None
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(inplanes, planes, stride, downsample))
inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_inchannels, multi_scale_output=True):
num_modules = layer_config['NUM_MODULES']
num_branches = layer_config['NUM_BRANCHES']
num_blocks = layer_config['NUM_BLOCKS']
num_channels = layer_config['NUM_CHANNELS']
block = blocks_dict[layer_config['BLOCK']]
fuse_method = layer_config['FUSE_METHOD']
modules = []
for i in range(num_modules):
if ((not multi_scale_output) and (i == (num_modules - 1))):
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(HighResolutionModule(num_branches, block, num_blocks, num_inchannels, num_channels, fuse_method, reset_multi_scale_output))
num_inchannels = modules[(- 1)].get_num_inchannels()
return (nn.Sequential(*modules), num_inchannels)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['NUM_BRANCHES']):
if (self.transition1[i] is not None):
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['NUM_BRANCHES']):
if (self.transition2[i] is not None):
x_list.append(self.transition2[i](y_list[(- 1)]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['NUM_BRANCHES']):
if (self.transition3[i] is not None):
x_list.append(self.transition3[i](y_list[(- 1)]))
else:
x_list.append(y_list[i])
y_list = self.stage4(x_list)
return tuple(y_list)
def init_weights(self, pretrained=''):
logging.info('=> init weights from normal distribution')
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if os.path.isfile(pretrained):
pretrained_dict = torch.load(pretrained)
logging.info('=> loading pretrained model {}'.format(pretrained))
model_dict = self.state_dict()
pretrained_dict = {k: v for (k, v) in pretrained_dict.items() if (k in model_dict.keys())}
for (k, _) in pretrained_dict.items():
logging.info('=> loading {} pretrained model {}'.format(k, pretrained))
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict) |
class Grayscale(FeatureBase):
def dim(self):
return 1
def stride(self):
return self.pool_stride
def extract(self, im: torch.Tensor):
return torch.mean(((im / 255) - 0.5), 1, keepdim=True) |
class AdaboostRegressor(AutotabularRegressionAlgorithm):
def __init__(self, n_estimators, learning_rate, loss, max_depth, random_state=None):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.random_state = random_state
self.max_depth = max_depth
self.estimator = None
def fit(self, X, Y):
import sklearn.ensemble
import sklearn.tree
self.n_estimators = int(self.n_estimators)
self.learning_rate = float(self.learning_rate)
self.max_depth = int(self.max_depth)
base_estimator = sklearn.tree.DecisionTreeRegressor(max_depth=self.max_depth)
self.estimator = sklearn.ensemble.AdaBoostRegressor(base_estimator=base_estimator, n_estimators=self.n_estimators, learning_rate=self.learning_rate, loss=self.loss, random_state=self.random_state)
self.estimator.fit(X, Y)
return self
def predict(self, X):
if (self.estimator is None):
raise NotImplementedError
return self.estimator.predict(X)
def get_properties(dataset_properties=None):
return {'shortname': 'AB', 'name': 'AdaBoost Regressor', 'handles_regression': True, 'handles_classification': False, 'handles_multiclass': False, 'handles_multilabel': False, 'handles_multioutput': False, 'is_deterministic': True, 'input': (DENSE, SPARSE, UNSIGNED_DATA), 'output': (PREDICTIONS,)}
def get_hyperparameter_search_space(dataset_properties=None):
cs = ConfigurationSpace()
n_estimators = UniformIntegerHyperparameter(name='n_estimators', lower=50, upper=500, default_value=50, log=False)
learning_rate = UniformFloatHyperparameter(name='learning_rate', lower=0.01, upper=2, default_value=0.1, log=True)
loss = CategoricalHyperparameter(name='loss', choices=['linear', 'square', 'exponential'], default_value='linear')
max_depth = UniformIntegerHyperparameter(name='max_depth', lower=1, upper=10, default_value=1, log=False)
cs.add_hyperparameters([n_estimators, learning_rate, loss, max_depth])
return cs |
class ConvNorm2D(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=None, dilation=1, bias=True, w_init_gain='linear'):
super(ConvNorm2D, self).__init__()
self.conv = torch.nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=1, bias=bias)
torch.nn.init.xavier_uniform_(self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, signal):
conv_signal = self.conv(signal)
return conv_signal |
def softmax(logits, temp=1.0):
y = np.minimum(np.exp((logits * temp)), 10000.0)
st = (y / np.sum(y))
return st |
_loss
def cosine_similarity_loss(pred, target, cos_func):
assert isinstance(cos_func, nn.Module)
assert ((pred.size() == target.size()) and (target.numel() > 0))
loss = cos_func(pred, target, torch.tensor(1, device=pred.device))
return loss |
def genCOCOJson(movie_list, img_root, json_path, min_json_path):
movie_ids = {}
with open(movie_list) as movief:
for (idx, line) in enumerate(movief):
name = line[:line.find('.')]
movie_ids[name] = idx
movie_infos = {}
for movie_name in tqdm(movie_ids):
movie_infos[movie_name] = {}
movie_infos[movie_name]['img_infos'] = {}
img_path = os.path.join(movie_name, '902.jpg')
movie_infos[movie_name]['size'] = Image.open(os.path.join(img_root, img_path)).size
movie_info = movie_infos[movie_name]
img_infos = movie_info['img_infos']
(width, height) = movie_info['size']
movie_id = (movie_ids[movie_name] * 10000)
for tid in range(902, 1799):
img_id = (movie_id + tid)
img_path = os.path.join(movie_name, '{}.jpg'.format(tid))
video_path = os.path.join(movie_name, '{}.mp4'.format(tid))
img_infos[tid] = {'id': img_id, 'img_path': img_path, 'video_path': video_path, 'height': height, 'width': width, 'movie': movie_name, 'timestamp': tid, 'annotations': {}}
tic = time.time()
print('Writing into json file...')
jsondata = {}
jsondata['categories'] = [{'supercategory': 'person', 'id': 1, 'name': 'person'}]
anns = [img_info.pop('annotations').values() for movie_info in movie_infos.values() for img_info in movie_info['img_infos'].values()]
anns = list(itertools.chain.from_iterable(anns))
jsondata['annotations'] = anns
imgs = [movie_info['img_infos'].values() for movie_info in movie_infos.values()]
imgs = list(itertools.chain.from_iterable(imgs))
jsondata['images'] = imgs
with open(json_path, 'w') as jsonf:
json.dump(jsondata, jsonf, indent=4)
print('Write json dataset into json file {} successfully.'.format(json_path))
with open(min_json_path, 'w') as jsonminf:
json.dump(jsondata, jsonminf)
print('Write json dataset with no indent into json file {} successfully.'.format(min_json_path))
print('Done (t={:0.2f}s)'.format((time.time() - tic))) |
def gen_load_func(parser, func):
def load(args, cmdline):
(sub_args, cmdline) = parser.parse_known_args(cmdline)
for (k, v) in sub_args.__dict__.items():
args.__dict__[k] = v
return (func(**sub_args.__dict__), cmdline)
return load |
def get_seg_model(cfg, imgnet_pretrained):
if ('s' in cfg.MODEL.NAME):
model = PIDNet(m=2, n=3, num_classes=cfg.DATASET.NUM_CLASSES, planes=32, ppm_planes=96, head_planes=128, augment=True)
elif ('m' in cfg.MODEL.NAME):
model = PIDNet(m=2, n=3, num_classes=cfg.DATASET.NUM_CLASSES, planes=64, ppm_planes=96, head_planes=128, augment=True)
else:
model = PIDNet(m=3, n=4, num_classes=cfg.DATASET.NUM_CLASSES, planes=64, ppm_planes=112, head_planes=256, augment=True)
if imgnet_pretrained:
pretrained_state = torch.load(cfg.MODEL.PRETRAINED, map_location='cpu')['state_dict']
model_dict = model.state_dict()
pretrained_state = {k: v for (k, v) in pretrained_state.items() if ((k in model_dict) and (v.shape == model_dict[k].shape))}
model_dict.update(pretrained_state)
msg = 'Loaded {} parameters!'.format(len(pretrained_state))
logging.info('Attention!!!')
logging.info(msg)
logging.info('Over!!!')
model.load_state_dict(model_dict, strict=False)
return model |
def save_checkpoint(args, epoch, model, optimizer, scheduler):
logger.info('==> Saving...')
state = {'opt': args, 'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict(), 'epoch': epoch}
torch.save(state, os.path.join(args.output_dir, 'current.pth'))
if ((epoch % args.save_freq) == 0):
torch.save(state, os.path.join(args.output_dir, 'ckpt_epoch_{}.pth'.format(epoch))) |
def check_accuracy(true_mean, true_cov, var_param, approx):
(approx_mean, approx_cov) = approx.mean_and_cov(var_param)
true_std = np.sqrt(np.diag(true_cov))
approx_std = np.sqrt(np.diag(approx_cov))
mean_error = np.linalg.norm((true_mean - approx_mean))
cov_error_2 = np.linalg.norm((true_cov - approx_cov), ord=2)
cov_norm_2 = np.linalg.norm(true_cov, ord=2)
std_error = np.linalg.norm((true_std - approx_std))
print('mean error = {:.3g}'.format(mean_error))
print('stdev error = {:.3g}'.format(std_error))
print('||cov error||_2^{{1/2}} = {:.3g}'.format(np.sqrt(cov_error_2)))
print('||true cov||_2^{{1/2}} = {:.3g}'.format(np.sqrt(cov_norm_2))) |
def str2list(v):
if (('[' in v) and (']' in v)):
return list(map(int, v.strip('[]').split(',')))
else:
raise argparse.ArgumentTypeError('Input expected in the form [b1,b2,b3,...]') |
def pnasnet_large_arg_scope(weight_decay=4e-05, batch_norm_decay=0.9997, batch_norm_epsilon=0.001):
return nasnet.nasnet_large_arg_scope(weight_decay, batch_norm_decay, batch_norm_epsilon) |
class TruncationStrategy(ExplicitEnum):
ONLY_FIRST = 'only_first'
ONLY_SECOND = 'only_second'
LONGEST_FIRST = 'longest_first'
DO_NOT_TRUNCATE = 'do_not_truncate' |
def main():
parser = argparse.ArgumentParser(fromfile_prefix_chars='')
parser.add_argument('--word-dim', required=True, type=int)
parser.add_argument('--hidden-dim', required=True, type=int)
parser.add_argument('--clf-hidden-dim', required=True, type=int)
parser.add_argument('--clf-num-layers', required=True, type=int)
parser.add_argument('--leaf-rnn', default=False, action='store_true')
parser.add_argument('--bidirectional', default=False, action='store_true')
parser.add_argument('--intra-attention', default=False, action='store_true')
parser.add_argument('--batchnorm', default=False, action='store_true')
parser.add_argument('--dropout', default=0.0, type=float)
parser.add_argument('--l2reg', default=0.0, type=float)
parser.add_argument('--pretrained', default=None)
parser.add_argument('--fix-word-embedding', default=False, action='store_true')
parser.add_argument('--device', default='cpu')
parser.add_argument('--batch-size', required=True, type=int)
parser.add_argument('--max-epoch', required=True, type=int)
parser.add_argument('--save-dir', required=True)
parser.add_argument('--omit-prob', default=0.0, type=float)
parser.add_argument('--optimizer', default='adam')
parser.add_argument('--fine-grained', default=False, action='store_true')
parser.add_argument('--halve-lr-every', default=2, type=int)
parser.add_argument('--lower', default=False, action='store_true')
args = parser.parse_args()
train(args) |
class IdObsFilter(ObsFilter):
def sense(self, scenario: DgScenario, full_obs: SimObservations, pov: PlayerName) -> SimObservations:
return full_obs |
def get_answer_text(example, feature, pred, args):
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = ' '.join(tok_tokens)
tok_text = tok_text.replace(' ##', '')
tok_text = tok_text.replace('##', '')
tok_text = tok_text.strip()
tok_text = ' '.join(tok_text.split())
orig_text = ' '.join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, args.do_lower_case, args.verbose_logging)
return final_text |
def make_image(map, ns: str):
img = Image.fromarray((((map - 1) ** 2) * 255).astype('uint8'))
imgrgb = img.convert('RGB')
map_name = 'random_map'
dir_path = os.path.dirname(os.path.realpath(__file__))
try:
os.mkdir(((dir_path + '/') + map_name))
except:
pass
imgrgb.save((dir_path + '/{0}/{1}_{0}.png'.format(map_name, ns)))
create_yaml_files(map_name, dir_path, ns)
create_empty_map(map.shape[0], map.shape[1], map_name, dir_path, ns) |
def main():
args = docopt(__doc__, version='Wikt2Dict - Find anomalies 1.0')
if args['unigram']:
read_unigrams(args['<unigram_file>'])
scan_stdin(args) |
def _create_fake_setuptools_pkg_info(placeholder):
if ((not placeholder) or (not os.path.exists(placeholder))):
log.warn('Could not find the install location')
return
pyver = ('%s.%s' % (sys.version_info[0], sys.version_info[1]))
setuptools_file = ('setuptools-%s-py%s.egg-info' % (SETUPTOOLS_FAKED_VERSION, pyver))
pkg_info = os.path.join(placeholder, setuptools_file)
if os.path.exists(pkg_info):
log.warn('%s already exists', pkg_info)
return
if (not os.access(pkg_info, os.W_OK)):
log.warn("Don't have permissions to write %s, skipping", pkg_info)
log.warn('Creating %s', pkg_info)
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
pth_file = os.path.join(placeholder, 'setuptools.pth')
log.warn('Creating %s', pth_file)
f = open(pth_file, 'w')
try:
f.write(os.path.join(os.curdir, setuptools_file))
finally:
f.close() |
def syntax_fix_remove_last_line(original):
code = original
while (len(code) > 0):
syntax_error = False
try:
o_ast = ast.parse(code)
node = ast.fix_missing_locations(o_ast)
code = astunparse.unparse(node).strip()
node = ast.parse(code)
except Exception as e:
syntax_error = True
if syntax_error:
code = '\n'.join(code.splitlines()[:(- 1)])
else:
return code
return code |
class NNServiceServicer(object):
def train(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def evaluate(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def predict(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def upload_meta(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def upload_file(self, request_iterator, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def save_server_model(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def load_server_model(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') |
def deconv_bn(batchNorm, in_planes, out_planes, kernel_size=4, stride=2, padding=1, output_padding=0, bias=True):
if batchNorm:
return nn.Sequential(nn.ConvTranspose2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, output_padding=output_padding, bias=bias), nn.BatchNorm2d(out_planes))
else:
return nn.Sequential(nn.ConvTranspose2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, output_padding=output_padding, bias=bias)) |
class NNClassifierModel(NNModel, HasThreshold):
def __init__(self, model, feature_preprocessing=None, jvalue=None, bigdl_type='float'):
super(NNClassifierModel, self).__init__(model, feature_preprocessing, jvalue, bigdl_type)
def load(path):
jvalue = callZooFunc('float', 'loadNNClassifierModel', path)
return NNClassifierModel(model=None, feature_preprocessing=None, jvalue=jvalue) |
def save_pred(pred, root):
with open(os.path.join(root, 'prediction.pkl'), 'wb') as f:
pickle.dump(pred, f) |
class TransposeLast(nn.Module):
def __init__(self, deconstruct_idx=None, tranpose_dim=(- 2)):
super().__init__()
self.deconstruct_idx = deconstruct_idx
self.tranpose_dim = tranpose_dim
def forward(self, x):
if (self.deconstruct_idx is not None):
x = x[self.deconstruct_idx]
return x.transpose(self.tranpose_dim, (- 1)) |
def get_auto_reg_predictions(model, row, window, teacher_forcing=True, exponentiate=False, predict_deaths=True):
if predict_deaths:
key = 'deaths'
else:
key = 'cases'
deaths = row[key]
predictions = [0]
if teacher_forcing:
for i in range((len(deaths) - window)):
x = deaths[i:(i + window)]
cur_prediction = model.predict([x])
if exponentiate:
cur_prediction = np.exp(cur_prediction)
predictions.append(cur_prediction)
else:
raise NotImplementedError
return predictions |
def load_dataset(dataset):
base_path = join(realpath(dirname(__file__)), 'data', dataset)
if (not exists(base_path)):
print('Please download OTB dataset into `data` folder!')
exit()
json_path = join(realpath(dirname(__file__)), 'data', (dataset + '.json'))
info = json.load(open(json_path, 'r'))
for v in info.keys():
path_name = info[v]['name']
info[v]['image_files'] = [join(base_path, path_name, 'img', im_f) for im_f in info[v]['image_files']]
info[v]['gt'] = (np.array(info[v]['gt_rect']) - [1, 1, 0, 0])
info[v]['name'] = v
return info |
class StandardScaler():
def __init__(self, means: np.ndarray=None, stds: np.ndarray=None, replace_nan_token: Any=None):
self.means = means
self.stds = stds
self.replace_nan_token = replace_nan_token
def fit(self, X: List[List[float]]) -> 'StandardScaler':
X = np.array(X).astype(float)
self.means = np.nanmean(X, axis=0)
self.stds = np.nanstd(X, axis=0)
self.means = np.where(np.isnan(self.means), np.zeros(self.means.shape), self.means)
self.stds = np.where(np.isnan(self.stds), np.ones(self.stds.shape), self.stds)
self.stds = np.where((self.stds == 0), np.ones(self.stds.shape), self.stds)
return self
def transform(self, X: List[List[float]]):
X = np.array(X).astype(float)
transformed_with_nan = ((X - self.means) / self.stds)
transformed_with_none = np.where(np.isnan(transformed_with_nan), self.replace_nan_token, transformed_with_nan)
return transformed_with_none
def inverse_transform(self, X: List[List[float]]):
X = np.array(X).astype(float)
transformed_with_nan = ((X * self.stds) + self.means)
transformed_with_none = np.where(np.isnan(transformed_with_nan), self.replace_nan_token, transformed_with_nan)
return transformed_with_none |
def parse_args():
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--cfg', dest='cfg_file', help='optional config file', default=None, type=str)
parser.add_argument('--weight', dest='weight', help='initialize with pretrained model weights', type=str)
parser.add_argument('--imdb', dest='imdb_name', help='dataset to train on', default='voc_2007_trainval', type=str)
parser.add_argument('--imdbval', dest='imdbval_name', help='dataset to validate on', default='voc_2007_test', type=str)
parser.add_argument('--iters', dest='max_iters', help='number of iterations to train', default=70000, type=int)
parser.add_argument('--tag', dest='tag', help='tag of the model', default=None, type=str)
parser.add_argument('--net', dest='net', help='vgg16, res50, res101, res152', default='res50', type=str)
parser.add_argument('--set', dest='set_cfgs', help='set config keys', default=None, nargs=argparse.REMAINDER)
if (len(sys.argv) == 1):
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args |
class DocSceneGraphTrainer(ModifiedDefaultTrainer):
def __init__(self, cfg):
super(DocSceneGraphTrainer, self).__init__(cfg)
def build_train_loader(cls, cfg):
return build_detection_train_loader(cfg, mapper=SceneGraphDatasetMapper(cfg, True))
def build_test_loader(cls, cfg, dataset_name):
return build_detection_test_loader(cfg, dataset_name, mapper=SceneGraphDatasetMapper(cfg, False))
def build_hooks(self):
cfg = self.cfg.clone()
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 0
ret = [hooks.IterationTimer(), hooks.LRScheduler(self.optimizer, self.scheduler), (hooks.PreciseBN(cfg.TEST.EVAL_PERIOD, self.model, self.build_train_loader(cfg), cfg.TEST.PRECISE_BN.NUM_ITER) if (cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model)) else None)]
if comm.is_main_process():
ret.append(hooks.PeriodicCheckpointer(self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_to_keep=100))
def test_and_save_results():
self._last_eval_results = self.test(self.cfg, self.model)
return self._last_eval_results
ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results))
if comm.is_main_process():
ret.append(hooks.PeriodicWriter(self.build_writers(), period=20))
return ret
def test(cls, cfg, model, evaluators=None):
logger = logging.getLogger(__name__)
results = OrderedDict()
for (idx, dataset_name) in enumerate(cfg.DATASETS.TEST):
data_loader = cls.build_test_loader(cfg, dataset_name)
output_folder = os.path.join(cfg.OUTPUT_DIR, 'inference')
eval_metrics = set(['SGRecall', 'SGExactMatches'])
evaluator = DocSceneGraphEvaluator(dataset_name, cfg, True, output_folder, metrics=eval_metrics)
results_i = scenegraph_inference_on_dataset(cfg, model, data_loader, evaluator)
results[dataset_name] = results_i
if comm.is_main_process():
assert isinstance(results_i, dict), 'Evaluator must return a dict on the main process. Got {} instead.'.format(results_i)
logger.info('Evaluation results for {} in csv format:'.format(dataset_name))
print_csv_format(results_i)
comm.synchronize()
if (len(results) == 1):
results = list(results.values())[0]
return results |
def _remove_flat_installation(placeholder):
if (not os.path.isdir(placeholder)):
log.warn('Unkown installation at %s', placeholder)
return False
found = False
for file in os.listdir(placeholder):
if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
found = True
break
if (not found):
log.warn('Could not locate setuptools*.egg-info')
return
log.warn('Removing elements out of the way...')
pkg_info = os.path.join(placeholder, file)
if os.path.isdir(pkg_info):
patched = _patch_egg_dir(pkg_info)
else:
patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
if (not patched):
log.warn('%s already patched.', pkg_info)
return False
for element in ('setuptools', 'pkg_resources.py', 'site.py'):
element = os.path.join(placeholder, element)
if os.path.exists(element):
_rename_path(element)
else:
log.warn('Could not find the %s element of the Setuptools distribution', element)
return True |
def test_CRN_encoder(pickle_map, models_dir, encoder_model_name, encoder_hyperparams_file, b_encoder_hyperparm_tuning):
training_data = pickle_map['training_data']
validation_data = pickle_map['validation_data']
test_data = pickle_map['test_data']
scaling_data = pickle_map['scaling_data']
training_processed = get_processed_data(training_data, scaling_data)
validation_processed = get_processed_data(validation_data, scaling_data)
test_processed = get_processed_data(test_data, scaling_data)
fit_CRN_encoder(dataset_train=training_processed, dataset_val=validation_processed, model_name=encoder_model_name, model_dir=models_dir, hyperparams_file=encoder_hyperparams_file, b_hyperparam_opt=b_encoder_hyperparm_tuning)
CRN_encoder = load_trained_model(validation_processed, encoder_hyperparams_file, encoder_model_name, models_dir)
(mean_mse, mse) = CRN_encoder.evaluate_predictions(test_processed)
rmse = ((np.sqrt(np.mean(mse)) / 1150) * 100)
return rmse |
class KeyPoint2DAnnotationList(Annotation):
def __init__(self, ontology, pointlist):
super().__init__(ontology)
assert isinstance(self._ontology, KeyPointOntology), 'Trying to load annotation with wrong type of ontology!'
for point in pointlist:
assert isinstance(point, KeyPoint2D), f'Can only instantate an annotation from a list of KeyPoint2D, not {type(point)}'
self.pointlist = pointlist
def load(cls, annotation_file, ontology):
_annotation_pb2 = parse_pbobject(annotation_file, KeyPoint2DAnnotations)
pointlist = [KeyPoint2D(point=np.float32([ann.point.x, ann.point.y]), class_id=ontology.class_id_to_contiguous_id[ann.class_id], color=ontology.colormap[ann.class_id], attributes=getattr(ann, 'attributes', {})) for ann in _annotation_pb2.annotations]
return cls(ontology, pointlist)
def to_proto(self):
return KeyPoint2DAnnotations(annotations=[KeyPoint2DAnnotation(class_id=self._ontology.contiguous_id_to_class_id[point.class_id], point=point.to_proto(), attributes=point.attributes) for point in self.pointlist])
def save(self, save_dir):
return save_pbobject_as_json(self.to_proto(), save_path=save_dir)
def __len__(self):
return len(self.pointlist)
def __getitem__(self, index):
return self.pointlist[index]
def render(self):
raise NotImplementedError
def xy(self):
return np.array([point.xy for point in self.pointlist], dtype=np.float32)
def class_ids(self):
return np.array([point.class_id for point in self.pointlist], dtype=np.int64)
def attributes(self):
return [point.attributes for point in self.pointlist]
def instance_ids(self):
return np.array([point.instance_id for point in self.pointlist], dtype=np.int64)
def hexdigest(self):
return generate_uid_from_pbobject(self.to_proto()) |
class ModelTest(tf.test.TestCase):
def assertLen(self, container, expected_len):
self.assertEqual(expected_len, len(container))
def testDNN(self):
predictor = _build_model()
z = tf.constant([1, 2, 3, 4], dtype=tf.float32)
z = tf.reshape(z, [1, 2, 2, 1])
predictor(z)
expected_shapes = {'predictor/conv2d_0/w:0': (2, 2, 1, 4), 'predictor/conv2d_0/b:0': (4,), 'predictor/linear_0/w:0': (4, 10), 'predictor/linear_0/b:0': (10,), 'predictor/linear_1/w:0': (10, 3), 'predictor/linear_1/b:0': (3,)}
for v in predictor.get_variables():
self.assertEqual(expected_shapes[v.name], v.shape)
def _propagation_test(self, wrapper, inputs, outputs):
input_bounds = ibp.IntervalBounds(inputs, inputs)
output_bounds = wrapper.propagate_bounds(input_bounds)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
(o, l, u) = sess.run([outputs, output_bounds.lower, output_bounds.upper])
self.assertAlmostEqual(o.tolist(), l.tolist())
self.assertAlmostEqual(o.tolist(), u.tolist())
def testVerifiableModelWrapperDNN(self):
predictor = _build_model()
z = tf.constant([1, 2, 3, 4], dtype=tf.float32)
z = tf.reshape(z, [1, 2, 2, 1])
wrapper = ibp.VerifiableModelWrapper(predictor)
wrapper(z)
self.assertEqual(predictor, wrapper.wrapped_network)
self.assertEqual(3, wrapper.output_size)
self.assertEqual((1, 3), tuple(wrapper.logits.shape.as_list()))
self.assertEqual(z, wrapper.inputs)
z2 = tf.constant([1, 2, 3, 4], dtype=tf.float32)
z2 = tf.reshape(z, [1, 2, 2, 1])
logits = wrapper(z2, passthrough=True)
self.assertEqual(z, wrapper.inputs)
self.assertNotEqual(z2, wrapper.inputs)
self.assertLen(wrapper.modules, 6)
self.assertTrue(isinstance(wrapper.modules[0].module, snt.Conv2D))
self.assertEqual(wrapper.modules[1].module, tf.nn.relu)
self.assertTrue(isinstance(wrapper.modules[2].module, snt.BatchFlatten))
self.assertTrue(isinstance(wrapper.modules[3].module, snt.Linear))
self.assertEqual(wrapper.modules[4].module, tf.nn.relu)
self.assertTrue(isinstance(wrapper.modules[5].module, snt.Linear))
self._propagation_test(wrapper, z2, logits)
def testVerifiableModelWrapperResnet(self):
def _build(z0, is_training=False):
input_size = np.prod(z0.shape[1:])
z = snt.Linear(input_size)(z0)
z_left = tf.nn.relu(z)
z_left = snt.Linear(input_size)(z_left)
z = (z_left + z0)
return snt.Linear(2)(z)
z = tf.constant([[1, 2, 3, 4]], dtype=tf.float32)
wrapper = ibp.VerifiableModelWrapper(_build)
logits = wrapper(z)
self.assertLen(wrapper.modules, 5)
self._propagation_test(wrapper, z, logits)
def testVerifiableModelWrapperPool(self):
def _build(z0, is_training=False):
z = tf.reduce_mean(z0, axis=1, keep_dims=True)
z = tf.reduce_max(z, axis=2, keep_dims=False)
return snt.Linear(2)(z)
z = tf.constant([[1, 2, 3, 4]], dtype=tf.float32)
z = tf.reshape(z, [1, 2, 2])
wrapper = ibp.VerifiableModelWrapper(_build)
logits = wrapper(z)
self.assertLen(wrapper.modules, 3)
self._propagation_test(wrapper, z, logits)
def testVerifiableModelWrapperConcat(self):
def _build(z0, is_training=False):
z = snt.Linear(10)(z0)
z = tf.concat([z, z0], axis=1)
return snt.Linear(2)(z)
z = tf.constant([[1, 2, 3, 4]], dtype=tf.float32)
wrapper = ibp.VerifiableModelWrapper(_build)
logits = wrapper(z)
self.assertLen(wrapper.modules, 3)
self._propagation_test(wrapper, z, logits)
def testVerifiableModelWrapperExpandAndSqueeze(self):
def _build(z0, is_training=False):
z = snt.Linear(10)(z0)
z = tf.expand_dims(z, axis=(- 1))
z = tf.squeeze(z, axis=(- 1))
return snt.Linear(2)(z)
z = tf.constant([[1, 2, 3, 4]], dtype=tf.float32)
wrapper = ibp.VerifiableModelWrapper(_build)
logits = wrapper(z)
self.assertLen(wrapper.modules, 4)
self._propagation_test(wrapper, z, logits) |
class InjectorGenerator(Generator):
def __init__(self, solutions: List[Solution]):
super(InjectorGenerator, self).__init__()
self.population = copy.deepcopy(solutions)
def new(self, problem: Problem):
if (len(self.population) > 0):
return self.population.pop()
else:
solution = problem.create_solution()
return solution |
def main(args):
path = os.path.join('../tcgnn-ae-graphs', (args.dataset + '.npz'))
data = TCGNN_dataset(path, args.dim, args.num_classes, load_from_txt=False)
g = data.g.int().to(args.gpu)
features = data.x
labels = data.y
in_feats = features.size(1)
n_classes = data.num_classes
degs = g.in_degrees().float()
norm = torch.pow(degs, (- 0.5)).cuda()
g.ndata['norm'] = norm.unsqueeze(1)
if (args.model == 'gcn'):
model = GCN(g, in_feats, args.n_hidden, n_classes, args.n_layers, F.relu)
if (args.model == 'agnn'):
model = AGNN(g, in_feats, args.n_hidden, n_classes, args.n_layers)
loss_fcn = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=0.0005)
model = model.cuda()
model.train()
for _ in range(3):
model(features)
torch.cuda.synchronize()
t0 = time.perf_counter()
for _ in tqdm(range(1, (args.n_epochs + 1))):
logits = model(features)
loss = loss_fcn(logits[:], labels[:])
optimizer.zero_grad()
loss.backward()
optimizer.step()
torch.cuda.synchronize()
dur = (time.perf_counter() - t0)
print('Train (ms): {:.3f}'.format(((dur * 1000.0) / args.n_epochs))) |
def test_snapshotKeplerPotential_eval_naz():
s = pynbody.new(star=1)
s['mass'] = 1.0
s['eps'] = 0.0
sp = potential.SnapshotRZPotential(s, num_threads=1)
spaz = potential.SnapshotRZPotential(s, num_threads=1, nazimuths=12)
assert (numpy.fabs((sp(1.0, 0.0) - spaz(1.0, 0.0))) < (10.0 ** (- 8.0))), 'SnapshotRZPotential with single unit mass for naz=4 does not agree with naz=12'
assert (numpy.fabs((sp(0.5, 0.0) - spaz(0.5, 0.0))) < (10.0 ** (- 8.0))), 'SnapshotRZPotential with single unit mass for naz=4 does not agree with naz=12'
assert (numpy.fabs((sp(1.0, 0.5) - spaz(1.0, 0.5))) < (10.0 ** (- 8.0))), 'SnapshotRZPotential with single unit mass for naz=4 does not agree with naz=12'
assert (numpy.fabs((sp(1.0, (- 0.5)) - spaz(1.0, (- 0.5)))) < (10.0 ** (- 8.0))), 'SnapshotRZPotential with single unit mass for naz=4 does not agree with naz=12'
return None |
.register_measure
class EpisodeInfoExample(habitat.Measure):
def __init__(self, sim, config, **kwargs: Any):
self._config = config
super().__init__()
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return 'episode_info'
def reset_metric(self, *args: Any, episode, **kwargs: Any):
self._metric = vars(episode).copy()
self._metric['my_value'] = self._config.VALUE
def update_metric(self, *args: Any, episode, action, **kwargs: Any):
self._metric = vars(episode).copy() |
class TimeDistributed(Layer):
def __init__(self, model, bigdl_type='float'):
super(TimeDistributed, self).__init__(None, bigdl_type, model) |
def test_iter_path_splits():
assert (list(iter_path_splits('foo.bar.baz')) == [('', 'foo.bar.baz'), ('foo', 'bar.baz'), ('foo.bar', 'baz')]) |
def train_input_fn(data_dir, batch_size, epochs, **kargs):
filenames = [os.path.join(data_dir, ('train/train-%05d-of-01024' % i)) for i in range(1024)]
for path in filenames:
if (not os.path.exists(path)):
raise ValueError((path + ' not found'))
dataset = tf.data.Dataset.list_files(filenames, shuffle=True)
dataset = tf.data.TFRecordDataset(dataset)
dataset = dataset.apply(tf.data.experimental.shuffle_and_repeat((100 * batch_size), epochs))
dataset = dataset.apply(tf.data.experimental.map_and_batch((lambda record: _parse_one_record(record, True, kargs)), batch_size))
return dataset |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
send_example_telemetry('run_translation', model_args, data_args)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
if training_args.should_log:
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
logger.info(f'Training/evaluation parameters {training_args}')
if ((data_args.source_prefix is None) and (model_args.model_name_or_path in ['t5-small', 't5-base', 't5-large', 't5-3b', 't5-11b'])):
logger.warning("You're running a t5 model but didn't provide a source prefix, which is expected, e.g. with `--source_prefix 'translate English to German: ' `")
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif ((last_checkpoint is not None) and (training_args.resume_from_checkpoint is None)):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
set_seed(training_args.seed)
if (data_args.dataset_name is not None):
raw_datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None))
else:
data_files = {}
if (data_args.train_file is not None):
data_files['train'] = data_args.train_file
extension = data_args.train_file.split('.')[(- 1)]
if (data_args.validation_file is not None):
data_files['validation'] = data_args.validation_file
extension = data_args.validation_file.split('.')[(- 1)]
if (data_args.test_file is not None):
data_files['test'] = data_args.test_file
extension = data_args.test_file.split('.')[(- 1)]
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None))
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
model = AutoModelForSeq2SeqLM.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
embedding_size = model.get_input_embeddings().weight.shape[0]
if (len(tokenizer) > embedding_size):
model.resize_token_embeddings(len(tokenizer))
if ((model.config.decoder_start_token_id is None) and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast))):
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.target_lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.target_lang)
if (model.config.decoder_start_token_id is None):
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined')
prefix = (data_args.source_prefix if (data_args.source_prefix is not None) else '')
if training_args.do_train:
column_names = raw_datasets['train'].column_names
elif training_args.do_eval:
column_names = raw_datasets['validation'].column_names
elif training_args.do_predict:
column_names = raw_datasets['test'].column_names
else:
logger.info('There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.')
return
if isinstance(tokenizer, tuple(MULTILINGUAL_TOKENIZERS)):
assert ((data_args.target_lang is not None) and (data_args.source_lang is not None)), f'{tokenizer.__class__.__name__} is a multilingual tokenizer which requires --source_lang and --target_lang arguments.'
tokenizer.src_lang = data_args.source_lang
tokenizer.tgt_lang = data_args.target_lang
forced_bos_token_id = (tokenizer.lang_code_to_id[data_args.forced_bos_token] if (data_args.forced_bos_token is not None) else None)
model.config.forced_bos_token_id = forced_bos_token_id
source_lang = data_args.source_lang.split('_')[0]
target_lang = data_args.target_lang.split('_')[0]
max_target_length = data_args.max_target_length
padding = ('max_length' if data_args.pad_to_max_length else False)
if ((training_args.label_smoothing_factor > 0) and (not hasattr(model, 'prepare_decoder_input_ids_from_labels'))):
logger.warning(f'label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory')
def preprocess_function(examples):
inputs = [ex[source_lang] for ex in examples['translation']]
targets = [ex[target_lang] for ex in examples['translation']]
inputs = [(prefix + inp) for inp in inputs]
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
labels = tokenizer(text_target=targets, max_length=max_target_length, padding=padding, truncation=True)
if ((padding == 'max_length') and data_args.ignore_pad_token_for_loss):
labels['input_ids'] = [[(l if (l != tokenizer.pad_token_id) else (- 100)) for l in label] for label in labels['input_ids']]
model_inputs['labels'] = labels['input_ids']
return model_inputs
if training_args.do_train:
if ('train' not in raw_datasets):
raise ValueError('--do_train requires a train dataset')
train_dataset = raw_datasets['train']
if (data_args.max_train_samples is not None):
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
with training_args.main_process_first(desc='train dataset map pre-processing'):
train_dataset = train_dataset.map(preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on train dataset')
if training_args.do_eval:
max_target_length = data_args.val_max_target_length
if ('validation' not in raw_datasets):
raise ValueError('--do_eval requires a validation dataset')
eval_dataset = raw_datasets['validation']
if (data_args.max_eval_samples is not None):
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
with training_args.main_process_first(desc='validation dataset map pre-processing'):
eval_dataset = eval_dataset.map(preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on validation dataset')
if training_args.do_predict:
max_target_length = data_args.val_max_target_length
if ('test' not in raw_datasets):
raise ValueError('--do_predict requires a test dataset')
predict_dataset = raw_datasets['test']
if (data_args.max_predict_samples is not None):
max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)
predict_dataset = predict_dataset.select(range(max_predict_samples))
with training_args.main_process_first(desc='prediction dataset map pre-processing'):
predict_dataset = predict_dataset.map(preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on prediction dataset')
label_pad_token_id = ((- 100) if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id)
if data_args.pad_to_max_length:
data_collator = default_data_collator
else:
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=(8 if training_args.fp16 else None))
metric = evaluate.load('sacrebleu')
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [[label.strip()] for label in labels]
return (preds, labels)
def compute_metrics(eval_preds):
(preds, labels) = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
if data_args.ignore_pad_token_for_loss:
labels = np.where((labels != (- 100)), labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
(decoded_preds, decoded_labels) = postprocess_text(decoded_preds, decoded_labels)
result = metric.compute(predictions=decoded_preds, references=decoded_labels)
result = {'bleu': result['score']}
prediction_lens = [np.count_nonzero((pred != tokenizer.pad_token_id)) for pred in preds]
result['gen_len'] = np.mean(prediction_lens)
result = {k: round(v, 4) for (k, v) in result.items()}
return result
trainer = Seq2SeqTrainer(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), tokenizer=tokenizer, data_collator=data_collator, compute_metrics=(compute_metrics if training_args.predict_with_generate else None))
if training_args.do_train:
checkpoint = None
if (training_args.resume_from_checkpoint is not None):
checkpoint = training_args.resume_from_checkpoint
elif (last_checkpoint is not None):
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model()
metrics = train_result.metrics
max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(train_dataset))
metrics['train_samples'] = min(max_train_samples, len(train_dataset))
trainer.log_metrics('train', metrics)
trainer.save_metrics('train', metrics)
trainer.save_state()
results = {}
max_length = (training_args.generation_max_length if (training_args.generation_max_length is not None) else data_args.val_max_target_length)
num_beams = (data_args.num_beams if (data_args.num_beams is not None) else training_args.generation_num_beams)
if training_args.do_eval:
logger.info('*** Evaluate ***')
metrics = trainer.evaluate(max_length=max_length, num_beams=num_beams, metric_key_prefix='eval')
max_eval_samples = (data_args.max_eval_samples if (data_args.max_eval_samples is not None) else len(eval_dataset))
metrics['eval_samples'] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics('eval', metrics)
trainer.save_metrics('eval', metrics)
if training_args.do_predict:
logger.info('*** Predict ***')
predict_results = trainer.predict(predict_dataset, metric_key_prefix='predict', max_length=max_length, num_beams=num_beams)
metrics = predict_results.metrics
max_predict_samples = (data_args.max_predict_samples if (data_args.max_predict_samples is not None) else len(predict_dataset))
metrics['predict_samples'] = min(max_predict_samples, len(predict_dataset))
trainer.log_metrics('predict', metrics)
trainer.save_metrics('predict', metrics)
if trainer.is_world_process_zero():
if training_args.predict_with_generate:
predictions = tokenizer.batch_decode(predict_results.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True)
predictions = [pred.strip() for pred in predictions]
output_prediction_file = os.path.join(training_args.output_dir, 'generated_predictions.txt')
with open(output_prediction_file, 'w', encoding='utf-8') as writer:
writer.write('\n'.join(predictions))
kwargs = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'translation'}
if (data_args.dataset_name is not None):
kwargs['dataset_tags'] = data_args.dataset_name
if (data_args.dataset_config_name is not None):
kwargs['dataset_args'] = data_args.dataset_config_name
kwargs['dataset'] = f'{data_args.dataset_name} {data_args.dataset_config_name}'
else:
kwargs['dataset'] = data_args.dataset_name
languages = [l for l in [data_args.source_lang, data_args.target_lang] if (l is not None)]
if (len(languages) > 0):
kwargs['language'] = languages
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
return results |
def conv1d(in_, filter_size, height, padding, is_train=None, keep_prob=1.0, scope=None):
with tf.variable_scope((scope or 'conv1d')):
num_channels = in_.get_shape()[(- 1)]
filter_ = tf.get_variable('filter', shape=[1, height, num_channels, filter_size], dtype='float')
bias = tf.get_variable('bias', shape=[filter_size], dtype='float')
strides = [1, 1, 1, 1]
in_ = dropout(in_, keep_prob, is_train)
xxc = (tf.nn.conv2d(in_, filter_, strides, padding) + bias)
out = tf.reduce_max(tf.nn.relu(xxc), 2)
return out |
def add_capacity_constraints(routing, data, prize_evaluator):
capacity = 'Capacity'
routing.AddDimension(prize_evaluator, 0, data.vehicle.capacity, True, capacity) |
def process_in_parallel(tag, total_range_size, binary, output_dir, load_ckpt, load_detectron, opts=''):
cfg_file = os.path.join(output_dir, '{}_range_config.yaml'.format(tag))
with open(cfg_file, 'w') as f:
yaml.dump(cfg, stream=f)
subprocess_env = os.environ.copy()
processes = []
NUM_GPUS = torch.cuda.device_count()
subinds = np.array_split(range(total_range_size), NUM_GPUS)
cuda_visible_devices = os.environ.get('CUDA_VISIBLE_DEVICES')
if cuda_visible_devices:
gpu_inds = list(map(int, cuda_visible_devices.split(',')))
assert ((- 1) not in gpu_inds), "Hiding GPU indices using the '-1' index is not supported"
else:
gpu_inds = range(cfg.NUM_GPUS)
gpu_inds = list(gpu_inds)
for (i, gpu_ind) in enumerate(gpu_inds):
start = subinds[i][0]
end = (subinds[i][(- 1)] + 1)
subprocess_env['CUDA_VISIBLE_DEVICES'] = str(gpu_ind)
cmd = 'python {binary} --range {start} {end} --cfg {cfg_file} --set {opts} --output_dir {output_dir}'
if (load_ckpt is not None):
cmd += ' --load_ckpt {load_ckpt}'
elif (load_detectron is not None):
cmd += ' --load_detectron {load_detectron}'
cmd = cmd.format(binary=shlex_quote(binary), start=int(start), end=int(end), cfg_file=shlex_quote(cfg_file), output_dir=output_dir, load_ckpt=load_ckpt, load_detectron=load_detectron, opts=' '.join([shlex_quote(opt) for opt in opts]))
logger.info('{} range command {}: {}'.format(tag, i, cmd))
if (i == 0):
subprocess_stdout = subprocess.PIPE
else:
filename = os.path.join(output_dir, ('%s_range_%s_%s.stdout' % (tag, start, end)))
subprocess_stdout = open(filename, 'w')
p = subprocess.Popen(cmd, shell=True, env=subprocess_env, stdout=subprocess_stdout, stderr=subprocess.STDOUT, bufsize=1)
processes.append((i, p, start, end, subprocess_stdout))
outputs = []
for (i, p, start, end, subprocess_stdout) in processes:
log_subprocess_output(i, p, output_dir, tag, start, end)
if isinstance(subprocess_stdout, IOBase):
subprocess_stdout.close()
range_file = os.path.join(output_dir, ('%s_range_%s_%s.pkl' % (tag, start, end)))
range_data = pickle.load(open(range_file, 'rb'))
outputs.append(range_data)
return outputs |
class Element(Param):
def __init__(self, xml_var, value_type, required=True, default=None, var=None, is_raw=False):
Param.__init__(self, xml_var, value_type, required, default, var)
self.type = 'element'
self.is_raw = is_raw
def set_from_xml(self, obj, node, path):
value = self.value_type.from_xml(node, path)
setattr(obj, self.var, value)
def add_to_xml(self, obj, parent):
value = getattr(obj, self.xml_var)
if (value is None):
if self.required:
raise Exception('Required element not defined in object: {}'.format(self.var))
elif (not skip_default):
value = self.default
if (value is not None):
self.add_scalar_to_xml(parent, value)
def add_scalar_to_xml(self, parent, value):
if self.is_raw:
node = parent
else:
node = node_add(parent, self.xml_var)
self.value_type.write_xml(node, value) |
def cache_sample(iteration, data):
with open((dirname_samples + ('/%d' % iteration)), 'wb') as f:
cPickle.dump(data, f) |
class MViT(Backbone):
def __init__(self, img_size=224, patch_kernel=(7, 7), patch_stride=(4, 4), patch_padding=(3, 3), in_chans=3, embed_dim=96, depth=16, num_heads=1, last_block_indexes=(0, 2, 11, 15), qkv_pool_kernel=(3, 3), adaptive_kv_stride=4, adaptive_window_size=56, residual_pooling=True, mlp_ratio=4.0, qkv_bias=True, drop_path_rate=0.0, norm_layer=nn.LayerNorm, act_layer=nn.GELU, use_abs_pos=False, use_rel_pos=True, rel_pos_zero_init=True, use_act_checkpoint=False, pretrain_img_size=224, pretrain_use_cls_token=True, out_features=('scale2', 'scale3', 'scale4', 'scale5')):
super().__init__()
self.pretrain_use_cls_token = pretrain_use_cls_token
self.patch_embed = PatchEmbed(kernel_size=patch_kernel, stride=patch_stride, padding=patch_padding, in_chans=in_chans, embed_dim=embed_dim)
if use_abs_pos:
num_patches = ((pretrain_img_size // patch_stride[0]) * (pretrain_img_size // patch_stride[1]))
num_positions = ((num_patches + 1) if pretrain_use_cls_token else num_patches)
self.pos_embed = nn.Parameter(torch.zeros(1, num_positions, embed_dim))
else:
self.pos_embed = None
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
dim_out = embed_dim
stride_kv = adaptive_kv_stride
window_size = adaptive_window_size
input_size = ((img_size // patch_stride[0]), (img_size // patch_stride[1]))
stage = 2
stride = patch_stride[0]
self._out_feature_strides = {}
self._out_feature_channels = {}
self.blocks = nn.ModuleList()
for i in range(depth):
if ((i == last_block_indexes[1]) or (i == last_block_indexes[2])):
stride_kv_ = (stride_kv * 2)
else:
stride_kv_ = stride_kv
window_size_ = (0 if (i in last_block_indexes[1:]) else window_size)
block = MultiScaleBlock(dim=embed_dim, dim_out=dim_out, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop_path=dpr[i], norm_layer=norm_layer, qkv_pool_kernel=qkv_pool_kernel, stride_q=(2 if ((i - 1) in last_block_indexes) else 1), stride_kv=stride_kv_, residual_pooling=residual_pooling, window_size=window_size_, use_rel_pos=use_rel_pos, rel_pos_zero_init=rel_pos_zero_init, input_size=input_size)
if use_act_checkpoint:
block = checkpoint_wrapper(block)
self.blocks.append(block)
embed_dim = dim_out
if (i in last_block_indexes):
name = f'scale{stage}'
if (name in out_features):
self._out_feature_channels[name] = dim_out
self._out_feature_strides[name] = stride
self.add_module(f'{name}_norm', norm_layer(dim_out))
dim_out *= 2
num_heads *= 2
stride_kv = max((stride_kv // 2), 1)
stride *= 2
stage += 1
if ((i - 1) in last_block_indexes):
window_size = (window_size // 2)
input_size = [(s // 2) for s in input_size]
self._out_features = out_features
self._last_block_indexes = last_block_indexes
if (self.pos_embed is not None):
trunc_normal_(self.pos_embed, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if (isinstance(m, nn.Linear) and (m.bias is not None)):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
x = self.patch_embed(x)
if (self.pos_embed is not None):
x = (x + get_abs_pos(self.pos_embed, self.pretrain_use_cls_token, x.shape[1:3]))
outputs = {}
stage = 2
for (i, blk) in enumerate(self.blocks):
x = blk(x)
if (i in self._last_block_indexes):
name = f'scale{stage}'
if (name in self._out_features):
x_out = getattr(self, f'{name}_norm')(x)
outputs[name] = x_out.permute(0, 3, 1, 2)
stage += 1
return outputs |
def input_fn_builder(features, seq_length, max_predictions_per_seq, tokenizer):
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_masked_lm_positions = []
all_masked_lm_ids = []
all_masked_lm_weights = []
all_next_sentence_labels = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_masked_lm_positions.append(feature.masked_lm_positions)
all_masked_lm_ids.append(feature.masked_lm_ids)
all_masked_lm_weights.append(feature.masked_lm_weights)
all_next_sentence_labels.append(feature.next_sentence_labels)
def input_fn(params):
batch_size = params['batch_size']
num_examples = len(features)
d = tf.data.Dataset.from_tensor_slices({'input_ids': tf.constant(all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), 'input_mask': tf.constant(all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), 'segment_ids': tf.constant(all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32), 'masked_lm_positions': tf.constant(all_masked_lm_positions, shape=[num_examples, max_predictions_per_seq], dtype=tf.int32), 'masked_lm_ids': tf.constant(all_masked_lm_ids, shape=[num_examples, max_predictions_per_seq], dtype=tf.int32), 'masked_lm_weights': tf.constant(all_masked_lm_weights, shape=[num_examples, max_predictions_per_seq], dtype=tf.float32), 'next_sentence_labels': tf.constant(all_next_sentence_labels, shape=[num_examples, 1], dtype=tf.int32)})
d = d.batch(batch_size=batch_size, drop_remainder=False)
return d
return input_fn |
class ResnetDownsampleBlock2D(nn.Module):
def __init__(self, in_channels: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, output_scale_factor: float=1.0, add_downsample: bool=True, skip_time_act: bool=False):
super().__init__()
resnets = []
for i in range(num_layers):
in_channels = (in_channels if (i == 0) else out_channels)
resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act))
self.resnets = nn.ModuleList(resnets)
if add_downsample:
self.downsamplers = nn.ModuleList([ResnetBlock2D(in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, skip_time_act=skip_time_act, down=True)])
else:
self.downsamplers = None
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor]=None, scale: float=1.0) -> Tuple[(torch.FloatTensor, Tuple[(torch.FloatTensor, ...)])]:
output_states = ()
for resnet in self.resnets:
if (self.training and self.gradient_checkpointing):
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
if is_torch_version('>=', '1.11.0'):
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, use_reentrant=False)
else:
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
else:
hidden_states = resnet(hidden_states, temb, scale)
output_states = (output_states + (hidden_states,))
if (self.downsamplers is not None):
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states, temb, scale)
output_states = (output_states + (hidden_states,))
return (hidden_states, output_states) |
def cachedmethod(method, *args, **kwargs):
key = ('_' + method.__name__)
args_name = (key + '_args')
kwargs_name = (key + '_kwargs')
def cached_method(self, *args, **kwargs):
clear_data = kwargs.pop('clear', False)
if clear_data:
clear(self)
return
recalc = kwargs.pop('recalc', False)
if recalc:
clear(self)
d = self.__dict__
reload = (key not in d)
if ((method.__code__.co_argcount > 1) or ((method.__code__.co_flags & 12) > 0)):
if (not reload):
if (d.get(args_name, None) != args):
d[args_name] = args
reload = True
if (d.get(kwargs_name, None) != kwargs):
d[kwargs_name] = kwargs
reload = True
else:
if (not (args_name in d)):
d[args_name] = args
if (not (kwargs_name in d)):
d[kwargs_name] = kwargs
if reload:
d[key] = method(self, *args, **kwargs)
return d[key]
def clear(self):
d = self.__dict__
d.pop(key, None)
d.pop(args_name, None)
d.pop(kwargs_name, None)
cached_method.__dict__.update(method.__dict__)
cached_method.__dict__['clear'] = clear
cached_method.__dict__['method'] = method.__name__
if (method.__doc__ is not None):
cached_method.__doc__ = ((method.__doc__ + '\n') + cached_method.__doc__)
cached_method.__name__ = method.__name__
cached_method.__module__ = getattr(method, '__module__')
return cached_method |
def _make_scheduler(args, optimizer):
logger.info(f'Using {args.scheduler} Scheduler ......')
if (args.scheduler == 'StepLR'):
scheduler = lr_scheduler.StepLR(optimizer, step_size=50, gamma=args.lr_decay)
elif (args.scheduler == 'OneCycleLR'):
scheduler = lr_scheduler.OneCycleLR(optimizer, args.lr, (args.num_iter + 100), pct_start=0.05, cycle_momentum=False, anneal_strategy='linear')
return scheduler |
class ConvNet(nn.Module):
def __init__(self, opt, momentum=0.1, affine=True, track_running_stats=True):
super(ConvNet, self).__init__()
self.in_planes = opt['in_planes']
self.out_planes = opt['out_planes']
self.num_stages = opt['num_stages']
if (type(self.out_planes) == int):
self.out_planes = [self.out_planes for i in range(self.num_stages)]
assert ((type(self.out_planes) == list) and (len(self.out_planes) == self.num_stages))
num_planes = ([self.in_planes] + self.out_planes)
userelu = (opt['userelu'] if ('userelu' in opt) else True)
conv_blocks = []
for i in range(self.num_stages):
if (i == (self.num_stages - 1)):
conv_blocks.append(ConvBlock(num_planes[i], num_planes[(i + 1)], userelu=userelu))
else:
conv_blocks.append(ConvBlock(num_planes[i], num_planes[(i + 1)]))
self.conv_blocks = nn.Sequential(*conv_blocks)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
out = self.conv_blocks(x)
out = out.view(out.size(0), (- 1))
return out |
def test(test_loader, input_dim=1024):
model = Net(input_dim=input_dim)
criterion = nn.BCELoss()
if train_on_gpu:
model.cuda()
model.load_state_dict(torch.load('model.pt'))
test_loss = 0.0
num_correct = 0
y_true = np.array([])
y_pred = np.array([])
y_pred_proba = np.array([])
model.eval()
for (data, target) in test_loader:
if train_on_gpu:
(data, target) = (data.cuda(), target.cuda())
output = model(data)
loss = criterion(output.squeeze(), target.float())
test_loss += (loss.item() * data.size(0))
pred = torch.round(output.squeeze()).int()
correct_tensor = pred.eq(target.int().view_as(pred))
correct = (np.squeeze(correct_tensor.numpy()) if (not train_on_gpu) else np.squeeze(correct_tensor.cpu().numpy()))
num_correct += np.sum(correct)
y_true = np.concatenate([y_true, target.int().view_as(pred).detach().numpy()])
y_pred = np.concatenate([y_pred, pred.detach().numpy()])
y_pred_proba = np.concatenate([y_pred_proba, output.squeeze().detach().numpy()])
test_loss = (test_loss / len(test_loader.sampler))
print('Final test Loss: {:.6f}'.format(test_loss))
return (y_true, y_pred_proba) |
def psnr(img, ref):
psnr_slices = []
ref_abs = np.abs(ref)
img_abs = np.abs(img)
for i in range(ref_abs.shape[0]):
psnr_i = compare_psnr(ref_abs[i], img_abs[i], data_range=ref_abs[i].max())
psnr_slices.append(np.mean(psnr_i))
return np.mean(psnr_slices) |
def clone_and_find(nodes):
return_list = True
if (not isinstance(nodes, list)):
return_list = False
nodes = [nodes]
paths = []
for node in nodes:
paths.append([])
tree = node
while (tree.parent is not None):
prev = tree
tree = tree.parent
paths[(- 1)].append(tree.subtrees.index(prev))
ntree = nodes[0].root().clone()
ans = []
for path in paths:
tree = ntree
for index in path[::(- 1)]:
tree = tree.subtrees[index]
ans.append(tree)
if return_list:
return ans
else:
return ans[0] |
class CaptionDedupProcessor(object):
def __init__(self, pkl_file):
with open(pkl_file, 'rb') as fd:
self.data = pickle.load(fd)
self.stat = {'t_clip_len': [], 'video_len': [], 'clip_tps': [], 'video_tps': [], 'clip_len': []}
def __call__(self):
for (idx, video_id) in enumerate(tqdm(self.data)):
caption = json.loads(self.data[video_id])
caption = self._dedup(caption)
if (idx < 4096):
self.save_stat(video_id, caption)
self.data[video_id] = json.dumps(caption)
self.print_stat()
def single(self, video_id):
caption = json.loads(self.data[video_id])
for (clip_idx, (start, end, text)) in enumerate(zip(caption['start'], caption['end'], caption['text'])):
print(start, end, text)
print(('' * 100))
caption = self._dedup(caption)
for (clip_idx, (start, end, text)) in enumerate(zip(caption['start'], caption['end'], caption['text'])):
print(start, end, text)
print(('#' * 100))
self.save_stat(video_id, caption)
self.print_stat()
def finalize(self, tgt_fn):
with open(tgt_fn, 'wb') as fw:
pickle.dump(self.data, fw, pickle.HIGHEST_PROTOCOL)
def save_stat(self, video_id, caption):
video_fn = os.path.join('data/feat/feat_how2_s3d', (video_id + '.npy'))
if os.path.isfile(video_fn):
with open(video_fn, 'rb', 1) as fr:
version = np.lib.format.read_magic(fr)
(shape, fortran, dtype) = np.lib.format._read_array_header(fr, version)
video_len = shape[0]
t_clip_len = 0.0
t_tokens = 0
for (idx, (start, end, text)) in enumerate(zip(caption['start'], caption['end'], caption['text'])):
clip_len = ((end - max(caption['end'][(idx - 1)], start)) if (idx > 0) else (end - start))
t_clip_len += clip_len
t_tokens += len(text.split(' '))
self.stat['clip_len'].append(clip_len)
self.stat['t_clip_len'].append(t_clip_len)
self.stat['video_len'].append(video_len)
self.stat['clip_tps'].append((t_tokens / t_clip_len))
self.stat['video_tps'].append((t_tokens / video_len))
def print_stat(self):
result = {'t_clip_len': np.mean(self.stat['t_clip_len']), 'video_len': np.mean(self.stat['video_len']), 'clip_tps': np.mean(self.stat['clip_tps']), 'video_tps': np.mean(self.stat['video_tps']), 'min_clip_len': min(self.stat['clip_len']), 'max_clip_len': max(self.stat['clip_len']), 'mean_clip_len': np.mean(self.stat['clip_len']), 'num_clip': (len(self.stat['clip_len']) / len(self.stat['video_tps']))}
print(result)
def _dedup(self, caption):
def random_merge(end_idx, start, end, text, starts, ends, texts):
if (random.random() > 0.5):
ends[(- 1)] = max(ends[(- 1)], start)
rest_text = text[end_idx:].strip()
if rest_text:
starts.append(max(ends[(- 1)], start))
ends.append(max(end, starts[(- 1)]))
texts.append(rest_text)
else:
left_text = texts[(- 1)][:(- end_idx)].strip()
if left_text:
ends[(- 1)] = min(ends[(- 1)], start)
texts[(- 1)] = left_text
else:
starts.pop((- 1))
ends.pop((- 1))
texts.pop((- 1))
starts.append(start)
ends.append(end)
texts.append(text)
(starts, ends, texts) = ([], [], [])
for (clip_idx, (start, end, text)) in enumerate(zip(caption['start'], caption['end'], caption['text'])):
if (not isinstance(text, str)):
continue
text = text.replace('\n', ' ').strip()
if (len(text) == 0):
continue
starts.append(start)
ends.append(end)
texts.append(text)
break
for (clip_idx, (start, end, text)) in enumerate(zip(caption['start'][(clip_idx + 1):], caption['end'][(clip_idx + 1):], caption['text'][(clip_idx + 1):])):
if (not isinstance(text, str)):
continue
text = text.replace('\n', ' ').strip()
if (len(text) == 0):
continue
if texts[(- 1)].endswith(text):
ends[(- 1)] = max(ends[(- 1)], end)
elif text.startswith(texts[(- 1)]):
texts[(- 1)] = text
starts[(- 1)] = min(starts[(- 1)], start)
ends[(- 1)] = max(ends[(- 1)], end)
else:
for end_idx in range(1, (len(text) + 1)):
if texts[(- 1)].endswith(text[:end_idx]):
random_merge(end_idx, start, end, text, starts, ends, texts)
break
else:
starts.append(start)
ends.append(end)
texts.append(text)
assert (((ends[(- 1)] + 0.001) >= starts[(- 1)]) and (len(texts[(- 1)]) > 0)), '{} {} {} <- {} {} {}, {} {} {}'.format(str(starts[(- 1)]), str(ends[(- 1)]), texts[(- 1)], caption['start'][(clip_idx - 1)], caption['end'][(clip_idx - 1)], caption['text'][(clip_idx - 1)], str(start), str(end), text)
return {'start': starts, 'end': ends, 'text': texts} |
def train(log_interval, model, device, train_loader, optimizer, epoch):
model.train()
for (batch_idx, (data, target)) in enumerate(train_loader):
(data, target) = (data.to(device), target.to(device))
N = data.shape[0]
data = data.view(N, model.time_step, (- 1))
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
if ((batch_idx % log_interval) == 0):
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, (batch_idx * len(data)), len(train_loader.sampler), ((100.0 * batch_idx) / len(train_loader)), loss.item())) |
def wash_and_repair_reference_line(line):
line = repair_broken_urls(line)
line = replace_undesirable_characters(line)
line = re.sub('"([^"]+),"', '"\\g<1>",', line)
line = replace_undesirable_characters(line)
line = re_multiple_space.sub(u' ', line)
return line |
def stop_flops_count(self):
remove_batch_counter_hook_function(self)
self.apply(remove_flops_counter_hook_function) |
def initialize_dataloader(train_config, train_dataset, val_dataset):
batch_size = train_config['batch_size']
num_workers = train_config['num_workers']
pin_memory = train_config['pin_memory']
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, drop_last=True, pin_memory=pin_memory)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, drop_last=False, pin_memory=pin_memory)
return (train_loader, val_loader) |
def broadcast_object(obj: Any, src_rank: int, group: object, dist_device: Optional[torch.device]=None) -> Any:
if (dist_device is None):
if (torch.distributed.get_backend(group) == 'nccl'):
dist_device = torch.device('cuda')
else:
dist_device = torch.device('cpu')
if (get_rank(group) == src_rank):
tensors = []
obj = _split_tensors_from_obj(obj, tensors)
obj = _broadcast_object_slow(obj, src_rank, group, dist_device)
tensors = broadcast_tensors(tensors, src_rank, group, dist_device)
else:
obj = _broadcast_object_slow(None, src_rank, group, dist_device)
tensors = broadcast_tensors(None, src_rank, group, dist_device)
return _put_tensors_in_obj(obj, tensors) |
def sent_metric_detect(preds, targs):
assert (len(preds) == len(targs)), f'{len(preds)},{len(targs)}'
(tp, targ_p, pred_p, hit) = (0, 0, 0, 0)
for (pred_item, targ_item) in zip(preds, targs):
assert (pred_item[0] == targ_item[0])
(pred, targ) = (sorted(pred_item[1:]), sorted(targ_item[1:]))
if (targ != []):
targ_p += 1
if (pred != []):
pred_p += 1
if ((len(pred) == len(targ)) and all(((p[0] == t[0]) for (p, t) in zip(pred, targ)))):
hit += 1
if ((pred != []) and (len(pred) == len(targ)) and all(((p[0] == t[0]) for (p, t) in zip(pred, targ)))):
tp += 1
acc = (hit / len(targs))
p = (tp / pred_p)
r = (tp / targ_p)
f1 = ((((2 * p) * r) / (p + r)) if ((p + r) > 0) else 0.0)
results = {'sent-detect-acc': (acc * 100), 'sent-detect-p': (p * 100), 'sent-detect-r': (r * 100), 'sent-detect-f1': (f1 * 100)}
return results |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, base_width=64, stride=1, groups=1, dilation=1, norm_layer=None, downsample=None):
super().__init__()
if (stride not in [1, 2]):
raise ValueError(f'Bottlenet of ResNet only supports `stride=1` and `stride=2`, but {stride} received!')
width = (int((planes * (base_width / 64))) * groups)
self.stride = stride
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self.conv1 = nn.Conv2d(in_channels=inplanes, out_channels=width, kernel_size=1, stride=1, padding=0, dilation=1, groups=1, bias=False)
self.bn1 = norm_layer(width)
self.conv2 = nn.Conv2d(in_channels=width, out_channels=width, kernel_size=3, stride=stride, padding=dilation, groups=groups, dilation=dilation, bias=False)
self.bn2 = norm_layer(width)
self.conv3 = nn.Conv2d(in_channels=width, out_channels=(planes * self.expansion), kernel_size=1, stride=1, padding=0, dilation=1, groups=1, bias=False)
self.bn3 = norm_layer((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
identity = (self.downsample(x) if (self.downsample is not None) else x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.relu((out + identity))
return out |
.parametrize('dataset', ['CocoDataset', 'VOCDataset', 'CityscapesDataset'])
def test_custom_classes_override_default(dataset):
dataset_class = DATASETS.get(dataset)
dataset_class.load_annotations = MagicMock()
if (dataset in ['CocoDataset', 'CityscapesDataset']):
dataset_class.coco = MagicMock()
dataset_class.cat_ids = MagicMock()
original_classes = dataset_class.CLASSES
custom_dataset = dataset_class(ann_file=MagicMock(), pipeline=[], classes=('bus', 'car'), test_mode=True, img_prefix=('VOC2007' if (dataset == 'VOCDataset') else ''))
assert (custom_dataset.CLASSES != original_classes)
assert (custom_dataset.CLASSES == ('bus', 'car'))
assert custom_dataset.custom_classes
custom_dataset = dataset_class(ann_file=MagicMock(), pipeline=[], classes=['bus', 'car'], test_mode=True, img_prefix=('VOC2007' if (dataset == 'VOCDataset') else ''))
assert (custom_dataset.CLASSES != original_classes)
assert (custom_dataset.CLASSES == ['bus', 'car'])
assert custom_dataset.custom_classes
custom_dataset = dataset_class(ann_file=MagicMock(), pipeline=[], classes=['foo'], test_mode=True, img_prefix=('VOC2007' if (dataset == 'VOCDataset') else ''))
assert (custom_dataset.CLASSES != original_classes)
assert (custom_dataset.CLASSES == ['foo'])
assert custom_dataset.custom_classes
custom_dataset = dataset_class(ann_file=MagicMock(), pipeline=[], classes=None, test_mode=True, img_prefix=('VOC2007' if (dataset == 'VOCDataset') else ''))
assert (custom_dataset.CLASSES == original_classes)
assert (not custom_dataset.custom_classes)
import tempfile
tmp_file = tempfile.NamedTemporaryFile()
with open(tmp_file.name, 'w') as f:
f.write('bus\ncar\n')
custom_dataset = dataset_class(ann_file=MagicMock(), pipeline=[], classes=tmp_file.name, test_mode=True, img_prefix=('VOC2007' if (dataset == 'VOCDataset') else ''))
tmp_file.close()
assert (custom_dataset.CLASSES != original_classes)
assert (custom_dataset.CLASSES == ['bus', 'car'])
assert custom_dataset.custom_classes |
class TestPruning(unittest.TestCase):
def test_pruning_basic(self):
hidden_size = 32
model = NaiveMLP(hidden_size)
from neural_compressor.compression.pruner.model_slim.pattern_analyzer import ClassifierHeadSearcher
searcher = ClassifierHeadSearcher(model)
layer = searcher.search(return_name=True)
assert (layer == 'linear3')
del model
model_name_or_path = 'textattack/distilbert-base-uncased-MRPC'
task_name = 'mrpc'
num_labels = 2
config = AutoConfig.from_pretrained(model_name_or_path, num_labels=num_labels, finetuning_task=task_name)
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path, from_tf=bool(('.ckpt' in model_name_or_path)), config=config)
searcher = ClassifierHeadSearcher(model)
layer = searcher.search(return_name=True)
assert (layer == 'classifier') |
def expand_argument_values(argument_values: Sequence[TensorValue]) -> List[TensorValue]:
has_slot_var = False
for arg in argument_values:
if isinstance(arg, TensorValue):
for var in arg.batch_variables:
if (var == '??'):
has_slot_var = True
break
if has_slot_var:
return list(argument_values)
if (len(argument_values) < 2):
return list(argument_values)
argument_values = list(argument_values)
batch_variables = list()
batch_sizes = list()
for arg in argument_values:
if isinstance(arg, TensorValue):
for var in arg.batch_variables:
if (var not in batch_variables):
batch_variables.append(var)
batch_sizes.append(arg.get_variable_size(var))
else:
assert isinstance(arg, (int, slice)), arg
masks = list()
for (i, arg) in enumerate(argument_values):
if isinstance(arg, TensorValue):
argument_values[i] = arg.expand(batch_variables, batch_sizes)
if (argument_values[i].tensor_mask is not None):
masks.append(argument_values[i].tensor_mask)
if (len(masks) > 0):
final_mask = torch.stack(masks, dim=(- 1)).amin(dim=(- 1))
for arg in argument_values:
if isinstance(arg, TensorValue):
arg.tensor_mask = final_mask
arg._mask_certified_flag = True
return argument_values |
class TCCA(MCCA):
def fit(self, views: Iterable[np.ndarray], y=None, **kwargs):
views = self._validate_data(views)
self._check_params()
(whitened_views, covs_invsqrt) = self._setup_tensor(views)
for (i, el) in enumerate(whitened_views):
if (i == 0):
M = el
else:
for _ in range((len(M.shape) - 1)):
el = np.expand_dims(el, 1)
M = (np.expand_dims(M, (- 1)) el)
M = np.mean(M, 0)
tl.set_backend('numpy')
M_parafac = parafac(M, self.latent_dimensions, verbose=False, random_state=self.random_state)
self.weights_ = [(cov_invsqrt fac) for (i, (cov_invsqrt, fac)) in enumerate(zip(covs_invsqrt, M_parafac.factors))]
return self
def correlations(self, views: Iterable[np.ndarray], **kwargs):
transformed_views = self.transform(views, **kwargs)
transformed_views = [(transformed_view - transformed_view.mean(axis=0)) for transformed_view in transformed_views]
multiplied_views = np.stack(transformed_views, axis=0).prod(axis=0).sum(axis=0)
norms = np.stack([np.linalg.norm(transformed_view, axis=0) for transformed_view in transformed_views], axis=0).prod(axis=0)
corrs = (multiplied_views / norms)
return corrs
def average_pairwise_correlations(self, views: Iterable[np.ndarray], **kwargs) -> np.ndarray:
transformed_views = self.transform(views, **kwargs)
transformed_views = [(transformed_view - transformed_view.mean(axis=0)) for transformed_view in transformed_views]
multiplied_views = np.stack(transformed_views, axis=0).prod(axis=0).sum(axis=0)
norms = np.stack([np.linalg.norm(transformed_view, axis=0) for transformed_view in transformed_views], axis=0).prod(axis=0)
corrs = (multiplied_views / norms)
return corrs
def score(self, views: Iterable[np.ndarray], **kwargs):
return self.average_pairwise_correlations(views, **kwargs).mean()
def _setup_tensor(self, views: Iterable[np.ndarray], **kwargs):
covs = [(((1 - self.c[i]) * np.cov(view, rowvar=False)) + (self.c[i] * np.eye(view.shape[1]))) for (i, view) in enumerate(views)]
smallest_eigs = [(min(0, np.linalg.eigvalsh(cov).min()) - self.eps) for cov in covs]
covs = [(cov - (smallest_eig * np.eye(cov.shape[0]))) for (cov, smallest_eig) in zip(covs, smallest_eigs)]
covs_invsqrt = [np.linalg.inv(sqrtm(cov).real) for cov in covs]
views = [(train_view cov_invsqrt) for (train_view, cov_invsqrt) in zip(views, covs_invsqrt)]
return (views, covs_invsqrt)
def _more_tags(self):
return {'multiview': True} |
def getDomain(idx, log, domains, last_domain):
if (idx == 1):
active_domains = get_summary_bstate(log[idx]['metadata'], True)
crnt_doms = (active_domains[0] if (len(active_domains) != 0) else domains[0])
return crnt_doms
else:
ds_diff = get_ds_diff(log[(idx - 2)]['metadata'], log[idx]['metadata'])
if (len(ds_diff.keys()) == 0):
crnt_doms = last_domain
else:
crnt_doms = list(ds_diff.keys())
return crnt_doms[0] |
class BertGenerationTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
prefix_tokens: List[int] = []
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, bos_token='<s>', eos_token='</s>', unk_token='<unk>', pad_token='<pad>', sep_token='<::::>', sp_model_kwargs: Optional[Dict[(str, Any)]]=None, **kwargs) -> None:
self.sp_model_kwargs = ({} if (sp_model_kwargs is None) else sp_model_kwargs)
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, sep_token=sep_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
self.vocab_file = vocab_file
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(vocab_file)
def vocab_size(self):
return self.sp_model.get_piece_size()
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
return state
def __setstate__(self, d):
self.__dict__ = d
if (not hasattr(self, 'sp_model_kwargs')):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _tokenize(self, text: str) -> List[str]:
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token):
return self.sp_model.piece_to_id(token)
def _convert_id_to_token(self, index):
token = self.sp_model.IdToPiece(index)
return token
def convert_tokens_to_string(self, tokens):
out_string = self.sp_model.decode_pieces(tokens)
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
if ((os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)) and os.path.isfile(self.vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
elif (not os.path.isfile(self.vocab_file)):
with open(out_vocab_file, 'wb') as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,) |
def create_log(name, model_name):
log_dir = './log/{}'.format(model_name)
if (not os.path.exists(log_dir)):
os.makedirs(log_dir)
csv_name = os.path.join(log_dir, '{}.csv'.format(name))
log = '{}_log'.format('name')
log = open(csv_name, 'w')
if (name == 'train'):
log.write('epoch, iteration, batch, loss, base_psnr, train_psnr, base_ssim, train_ssim, base_nmse, train_nmse\n')
else:
log.write('epoch, iteration, batch, loss, base_psnr, train_psnr, base_ssim, train_ssim, base_nmse, train_nmse\n')
return log |
def get_test_loader(root, img_size=256, batch_size=32, shuffle=True, num_workers=4):
print('Preparing DataLoader for the generation phase...')
transform = transforms.Compose([transforms.Resize([img_size, img_size]), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])])
dataset = ImageFolder(root, transform)
return data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, pin_memory=True) |
def tokenize_dataset(args, model, raw_datasets, tokenizer):
def tokenize_function(examples):
return tokenizer(examples[text_column_name])
embedding_size = model.get_input_embeddings().weight.shape[0]
if (len(tokenizer) > embedding_size):
model.resize_token_embeddings(len(tokenizer))
column_names = raw_datasets['train'].column_names
text_column_name = ('text' if ('text' in column_names) else column_names[0])
with main_process_first():
tokenized_datasets = raw_datasets.map(tokenize_function, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not args.overwrite_cache), desc='Running tokenizer on dataset')
return tokenized_datasets |
def get_pairs(word):
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
pairs = set(pairs)
return pairs |
def uniform(shape, scale=0.05, name=None):
initial = tf.random_uniform(shape, minval=(- scale), maxval=scale, dtype=tf.float32)
return tf.Variable(initial, name=name) |
def EfficientNetB4(include_top=False, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, stride_size=2, classifier_activation='softmax', **kwargs):
return EfficientNet(1.4, 1.8, 380, 0.4, model_name='efficientnetb4', include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, stride_size=stride_size, classifier_activation=classifier_activation, **kwargs) |
def pad_width(img, stride, pad_value, min_dims):
(h, w, _) = img.shape
h = min(min_dims[0], h)
min_dims[0] = (math.ceil((min_dims[0] / float(stride))) * stride)
min_dims[1] = max(min_dims[1], w)
min_dims[1] = (math.ceil((min_dims[1] / float(stride))) * stride)
pad = []
pad.append(int(math.floor(((min_dims[0] - h) / 2.0))))
pad.append(int(math.floor(((min_dims[1] - w) / 2.0))))
pad.append(int(((min_dims[0] - h) - pad[0])))
pad.append(int(((min_dims[1] - w) - pad[1])))
padded_img = cv2.copyMakeBorder(img, pad[0], pad[2], pad[1], pad[3], cv2.BORDER_CONSTANT, value=pad_value)
return (padded_img, pad) |
def test_accellsrframe_funcomegaz_2d():
lp = potential.LogarithmicHaloPotential(normalize=1.0)
omega = lp.omegac(1.0)
omegadot = 0.02
omega_func = (lambda t: (lp.omegac(1.0) + (0.02 * t)))
omegadot_func = (lambda t: 0.02)
diskpot = lp
framepot = potential.NonInertialFrameForce(Omega=omega_func, Omegadot=omegadot_func)
diskframepot = (lp + framepot)
def check_orbit(method='odeint', tol=1e-09):
o = Orbit().toPlanar()
o.turn_physical_off()
ts = numpy.linspace(0.0, 20.0, 1001)
o.integrate(ts, diskpot)
op = Orbit([o.R(), o.vR(), (o.vT() - (omega * o.R())), o.phi()])
op.integrate(ts, diskframepot, method=method)
o_xs = (o.R(ts) * numpy.cos(((o.phi(ts) - (omega * ts)) - ((omegadot * (ts ** 2.0)) / 2.0))))
o_ys = (o.R(ts) * numpy.sin(((o.phi(ts) - (omega * ts)) - ((omegadot * (ts ** 2.0)) / 2.0))))
op_xs = op.x(ts)
op_ys = op.y(ts)
assert (numpy.amax(numpy.fabs((o_xs - op_xs))) < tol), f'Integrating an orbit in the acceleratingly-rotating LSR frame does not agree with the equivalent orbit in the inertial frame for method {method}'
assert (numpy.amax(numpy.fabs((o_ys - op_ys))) < tol), f'Integrating an orbit in the acceleratingly-rotating LSR frame does not agree with the equivalent orbit in the inertial frame for method {method}'
check_orbit(method='odeint', tol=1e-06)
check_orbit(method='dop853_c', tol=1e-09)
return None |
def generate_reverberated_wav_scp(wav_scp, durations, output_dir, room_dict, pointsource_noise_list, iso_noise_dict, foreground_snr_array, background_snr_array, num_replicas, include_original, prefix, speech_rvb_probability, shift_output, isotropic_noise_addition_probability, pointsource_noise_addition_probability, max_noises_per_minute):
foreground_snrs = list_cyclic_iterator(foreground_snr_array)
background_snrs = list_cyclic_iterator(background_snr_array)
corrupted_wav_scp = {}
keys = sorted(wav_scp.keys())
if include_original:
start_index = 0
else:
start_index = 1
for i in range(start_index, (num_replicas + 1)):
for recording_id in keys:
wav_original_pipe = wav_scp[recording_id]
if (len(wav_original_pipe.split()) == 1):
wav_original_pipe = 'cat {0} |'.format(wav_original_pipe)
speech_dur = durations[recording_id]
max_noises_recording = math.floor(((max_noises_per_minute * speech_dur) / 60))
reverberate_opts = generate_reverberation_opts(room_dict, pointsource_noise_list, iso_noise_dict, foreground_snrs, background_snrs, speech_rvb_probability, isotropic_noise_addition_probability, pointsource_noise_addition_probability, speech_dur, max_noises_recording)
if ((reverberate_opts == '') or (i == 0)):
wav_corrupted_pipe = '{0}'.format(wav_original_pipe)
else:
wav_corrupted_pipe = '{0} wav-reverberate --shift-output={1} {2} - - |'.format(wav_original_pipe, shift_output, reverberate_opts)
new_recording_id = get_new_id(recording_id, prefix, i)
corrupted_wav_scp[new_recording_id] = wav_corrupted_pipe
write_dict_to_file(corrupted_wav_scp, (output_dir + '/wav.scp')) |
class LSTMModel(nn.Module):
def __init__(self, ntoken=10, ninp=512, nhid=256, nlayers=5, dropout=0.5):
super(LSTMModel, self).__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
self.rnn = nn.LSTM(ninp, nhid, nlayers, dropout=dropout)
self.decoder = nn.Linear(nhid, ntoken)
self.init_weights()
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_((- initrange), initrange)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_((- initrange), initrange)
def forward(self, input):
input = torch.ones((3, 10), dtype=torch.int32)
h0 = torch.randn(2, 10, 256)
c0 = torch.randn(2, 10, 256)
hidden = (h0, c0)
emb = self.encoder(input)
(output, hidden) = self.rnn(emb, hidden)
output = self.drop(output)
decoded = self.decoder(output)
return (decoded, hidden) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.