code stringlengths 101 5.91M |
|---|
def write_wav_scpf(fn, utts, audio_dir, audio_ext='.flac'):
with open(fn, 'wb') as f:
for utt in sorted(utts):
if (audio_ext == '.flac'):
wav_str = '{} sox -t flac {}/{}.flac -t wav -r 16k -b 16 --channels 1 - |\n'.format(utt, audio_dir, utt)
elif (audio_ext == '.wav'):
wav_str = '{} sox -t wav {}/{}.wav -t wav -r 16k -b 16 --channels 1 - |\n'.format(utt, audio_dir, utt)
f.write(wav_str.encode('utf-8')) |
class ShakeDropFunction(torch.autograd.Function):
def forward(ctx, x, training=True, p_drop=0.5, alpha_range=[(- 1), 1]):
ctx.training = training
ctx.p_drop = p_drop
if training:
gate = torch.empty(1, device=x.device).bernoulli_((1 - p_drop))
ctx.save_for_backward(gate)
if (gate.item() == 0):
alpha = torch.empty(x.size(0), device=x.device).uniform_(*alpha_range)
alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x)
return (alpha * x)
else:
return x
else:
return ((1 - p_drop) * x)
def backward(ctx, grad_output):
training = ctx.training
p_drop = ctx.p_drop
if training:
gate = ctx.saved_tensors[0]
if (gate.item() == 0):
beta = torch.empty(grad_output.size(0), device=grad_output.device).uniform_(0, 1)
beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)
beta = Variable(beta)
return ((beta * grad_output), None, None, None)
else:
return (grad_output, None, None, None)
else:
return (((1 - p_drop) * grad_output), None, None, None) |
def recall(gold, pred):
tp = 0
fn = 0
assert (len(gold) == len(pred))
for sent_idx in pred.keys():
p = pred[sent_idx]
g = gold[sent_idx]
for edge_label in g:
if (edge_label in p):
tp += 1
else:
fn += 1
try:
return ((tp / (tp + fn)), tp, fn)
except ZeroDivisionError:
return (0, tp, fn) |
def DistributedFairseqModel(args, model, process_group=None):
assert isinstance(model, nn.Module)
if ((args.distributed_wrapper == 'DDP') and (args.ddp_backend == 'c10d')):
ddp_class = nn.parallel.DistributedDataParallel
init_kwargs = dict(module=model, device_ids=[args.device_id], output_device=args.device_id, broadcast_buffers=args.broadcast_buffers, bucket_cap_mb=args.bucket_cap_mb, process_group=process_group)
if ('check_reduction' in inspect.getargspec(ddp_class)[0]):
init_kwargs['check_reduction'] = True
if ('find_unused_parameters' in inspect.getargspec(ddp_class)[0]):
init_kwargs['find_unused_parameters'] = args.find_unused_parameters
elif ((args.distributed_wrapper == 'DDP') and (args.ddp_backend == 'no_c10d')):
ddp_class = LegacyDistributedDataParallel
init_kwargs = dict(module=model, world_size=args.distributed_world_size, buffer_size=(2 ** 28), process_group=process_group)
elif (args.distributed_wrapper == 'SlowMo'):
if _GOSSIP_DISABLED:
raise ImportError('Cannot find gossip library. Please install from: github.com/facebookresearch/stochastic_gradient_push')
ddp_class = gossip.GossipDataParallel
if (args.slowmo_momentum is None):
if (args.distributed_world_size <= 16):
args.slowmo_momentum = 0.0
elif (args.distributed_world_size <= 32):
args.slowmo_momentum = 0.2
elif (args.distributed_world_size <= 64):
args.slowmo_momentum = 0.5
else:
args.slowmo_momentum = 0.6
init_kwargs = dict(module=model, device_ids=[args.device_id], output_device=args.device_id, broadcast_buffers=args.broadcast_buffers, nprocs_per_node=args.nprocs_per_node, slowmo_momentum=args.slowmo_momentum, localsgd=(args.slowmo_algorithm == 'LocalSGD'), localsgd_frequency=args.localsgd_frequency)
else:
raise ValueError(('Unknown --ddp-backend: ' + args.ddp_backend))
class _DistributedFairseqModel(ddp_class):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __getattr__(self, name):
wrapped_module = super().__getattr__('module')
if hasattr(wrapped_module, name):
return getattr(wrapped_module, name)
return super().__getattr__(name)
return _DistributedFairseqModel(**init_kwargs) |
def load_pretrained_model(model_name='resnet18', device='cuda', num_params=3, inplace=True, data_path='/scratch/users/vision/data/cosmo'):
if (model_name == 'resnet18'):
model_ft = models.resnet18(pretrained=False)
model_ft.conv1 = nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_params)
if (inplace == False):
mods = list(model_ft.modules())
for mod in mods:
t = str(type(mod))
if ('ReLU' in t):
mod.inplace = False
model_ft = model_ft.to(device)
if (data_path is not None):
model_ft.load_state_dict(torch.load(opj(data_path, 'resnet18_state_dict')))
elif (model_name == 'vgg16'):
model_ft = models.vgg16(pretrained=False)
model_ft.features[0] = nn.Conv2d(1, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
num_ftrs = 4096
model_ft.classifier[6] = nn.Linear(num_ftrs, 3)
model_ft = model_ft.to(device)
model_ft.load_state_dict(torch.load(opj(data_path, 'vgg16_adam_9_0.012')))
model_ft.eval()
for param in model_ft.parameters():
param.requires_grad = False
return model_ft |
_registry('Basic')
class BasicNAS(NASBase):
def __init__(self, conf_fname_or_obj, search_space=None, model_builder=None):
NASBase.__init__(self, search_space=search_space, model_builder=model_builder)
self._train_func = None
self._eval_func = None
self.init_by_cfg(conf_fname_or_obj)
def execute(self, res_save_path=None):
return self.search(res_save_path)
def estimate(self, model):
assert ((self._train_func is not None) and (self._eval_func is not None)), 'train_func and eval_func must be set.'
self._train_func(model)
return self._eval_func(model)
def init_by_cfg(self, conf_fname_or_obj):
if isinstance(conf_fname_or_obj, str):
if os.path.isfile(conf_fname_or_obj):
self.conf = Conf(conf_fname_or_obj).usr_cfg
else:
raise FileNotFoundError('{} is not a file, please provide a NAS config file path.'.format(conf_fname_or_obj))
elif isinstance(conf_fname_or_obj, NASConfig):
self.config = conf_fname_or_obj.config
else:
raise NotImplementedError('Please provide a str path to the config file or an object of NASConfig.')
assert (self.config.nas is not None), 'nas section must be set'
self.init_search_cfg(self.config.nas)
def train_func(self):
assert False, 'Should not try to get the value of `train_func` attribute.'
_func.setter
def train_func(self, user_train_func):
self._train_func = user_train_func
def eval_func(self):
assert False, 'Should not try to get the value of `eval_func` attribute.'
_func.setter
def eval_func(self, user_eval_func):
self._eval_func = user_eval_func
def __repr__(self):
return 'BasicNAS' |
def _clip_actions(algo, actions):
epsilon = 1e-06
lower = (torch.from_numpy(algo._env_spec.action_space.low).to(algo.device) + epsilon)
upper = (torch.from_numpy(algo._env_spec.action_space.high).to(algo.device) - epsilon)
clip_up = (actions > upper).float()
clip_down = (actions < lower).float()
with torch.no_grad():
clip = (((upper - actions) * clip_up) + ((lower - actions) * clip_down))
return (actions + clip) |
def fuse_conv_and_bn(conv, bn):
fusedconv = nn.Conv2d(conv.in_channels, conv.out_channels, kernel_size=conv.kernel_size, stride=conv.stride, padding=conv.padding, groups=conv.groups, bias=True).requires_grad_(False).to(conv.weight.device)
w_conv = conv.weight.clone().view(conv.out_channels, (- 1))
w_bn = torch.diag(bn.weight.div(torch.sqrt((bn.eps + bn.running_var))))
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
b_conv = (torch.zeros(conv.weight.size(0), device=conv.weight.device) if (conv.bias is None) else conv.bias)
b_bn = (bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt((bn.running_var + bn.eps))))
fusedconv.bias.copy_((torch.mm(w_bn, b_conv.reshape((- 1), 1)).reshape((- 1)) + b_bn))
return fusedconv |
def test_isotropic_hernquist_sigmar():
pot = potential.HernquistPotential(amp=2.3, a=1.3)
dfh = isotropicHernquistdf(pot=pot)
numpy.random.seed(10)
samp = dfh.sample(n=300000)
tol = 0.05
check_sigmar_against_jeans(samp, pot, tol, beta=0.0, rmin=(pot._scale / 10.0), rmax=(pot._scale * 10.0), bins=31)
return None |
def splice(s):
buf = []
ans = []
for c in s:
if is_all_chinese(c):
if buf:
buf_str = ''.join(buf)
buf = []
ans.append(buf_str)
ans.append(c)
else:
buf.append(c)
if buf:
buf_str = ''.join(buf)
ans.append(buf_str)
ans = ' '.join(ans)
return ans |
class RLlibMetricLogger(DefaultCallbacks):
def __init__(self, metrics: Mapping[(str, 'Metric')]) -> None:
super().__init__()
self.metrics = metrics
def on_episode_start(self, *, episode, **kwargs) -> None:
for metric_id in self.metrics.keys():
episode.user_data[metric_id] = []
def on_episode_step(self, *, base_env, episode, **kwargs) -> None:
env = base_env.envs[0]
logging_helper(env, self.metrics, episode.user_data)
def on_episode_end(self, *, episode, **kwargs) -> None:
for (metric_id, metric) in self.metrics.items():
episode.custom_metrics[metric_id] = metric.reduce(episode.user_data[metric_id], mode='train')
def __call__(self) -> 'RLlibMetricLogger':
return self |
def trace_back(error_msg):
exc = traceback.format_exc()
msg = f'''[Error]: {error_msg}.
[Traceback]: {exc}'''
return msg |
def gen_space_config(opt_lib_group, opt_lib_methods, total_process, included_opts=[]):
space_config = []
for (group_name, opt_candidates_raw) in opt_lib_group.items():
opt_candidates = []
if ((group_name == 'module_replace') and opt_lib_methods['module_replace'].disabled):
continue
if (group_name == 'parallel'):
continue
if (group_name == 'half'):
continue
if ((group_name == 'amp') and (('half' in included_opts) or (('half', 'bf16') in included_opts) or (('half', 'fp16') in included_opts))):
continue
if ((total_process == 1) and (group_name == 'zero')):
continue
if ((total_process == 1) and (group_name == 'parallel_mode')):
continue
if (group_name == 'dynamo'):
continue
for opt_name in opt_candidates_raw:
if (opt_name in included_opts):
opt_candidates = [opt_name]
break
if (not opt_lib_methods[opt_name].disabled):
opt_candidates.append(opt_name)
if (group_name == 'parallel_mode'):
if opt_lib_methods['tensor_parallel'].disabled:
space_config.append({'name': group_name, 'type': 'cat', 'categories': [json.dumps({'data': total_process, 'tensor': 1})]})
else:
space_config.append({'name': group_name, 'type': 'cat', 'categories': [json.dumps({'data': total_process, 'tensor': 1})]})
elif (group_name == 'zero'):
if (len(opt_candidates) > 0):
if ((len(opt_candidates) == 1) and (opt_candidates[0] in included_opts)):
if (opt_candidates == 'zero2'):
space_config.append({'name': group_name, 'type': 'cat', 'categories': ['zero2_fsdp']})
else:
space_config.append({'name': group_name, 'type': 'cat', 'categories': opt_candidates})
elif ('zero2' in opt_candidates):
opt_candidates.remove('zero2')
if (group_name in included_opts):
var_cats = (['zero2_fsdp'] + opt_candidates)
else:
var_cats = ((['zero2_fsdp'] + opt_candidates) + ['NotChosen'])
space_config.append({'name': group_name, 'type': 'cat', 'categories': var_cats})
else:
if (group_name in included_opts):
var_cats = opt_candidates
else:
var_cats = (opt_candidates + ['NotChosen'])
space_config.append({'name': group_name, 'type': 'cat', 'categories': var_cats})
elif (len(opt_candidates) > 0):
if ((len(opt_candidates) == 1) and (opt_candidates[0] in included_opts)):
space_config.append({'name': group_name, 'type': 'cat', 'categories': opt_candidates})
else:
if (group_name in included_opts):
var_cats = opt_candidates
else:
var_cats = (opt_candidates + ['NotChosen'])
space_config.append({'name': group_name, 'type': 'cat', 'categories': var_cats})
return space_config |
class Args(Tap):
data_path: str
smiles_column: str = None
features_generator: str = 'rdkit_2d_normalized'
save_path: str
save_frequency: int = 10000
restart: bool = False
sequential: bool = False
def add_arguments(self) -> None:
self.add_argument('--features_generator', choices=get_available_features_generators()) |
class VocabInfoTest(tf.test.TestCase):
def setUp(self):
super(VocabInfoTest, self).setUp()
tf.logging.set_verbosity(tf.logging.INFO)
self.vocab_list = ['Hello', '.', 'Bye']
self.vocab_file = test_utils.create_temporary_vocab_file(self.vocab_list)
def tearDown(self):
super(VocabInfoTest, self).tearDown()
self.vocab_file.close()
def test_vocab_info(self):
vocab_info = vocab.get_vocab_info(self.vocab_file.name)
self.assertEqual(vocab_info.vocab_size, 3)
self.assertEqual(vocab_info.path, self.vocab_file.name)
self.assertEqual(vocab_info.special_vocab.UNK, 3)
self.assertEqual(vocab_info.special_vocab.SEQUENCE_START, 4)
self.assertEqual(vocab_info.special_vocab.SEQUENCE_END, 5)
self.assertEqual(vocab_info.total_size, 6) |
def text_ontonotes(tree, filename='filename', words=None, tree_text=None, depth=0):
resolve = False
if (words is None):
resolve = True
words = []
tree_text = ''
if (tree.word is None):
tree_text += (('(' + tree.label) + '_')
else:
words.append((tree.word, tree.label))
tree_text += '*'
for subtree in tree.subtrees:
tree_text = text_ontonotes(subtree, filename, words, tree_text, depth)
if (tree.word is None):
tree_text += ')'
if resolve:
ans = ''
cpos = 0
cword = 0
while (cpos < len(tree_text)):
ctext = ''
while ((cpos < len(tree_text)) and (tree_text[cpos] != '*')):
ctext += tree_text[cpos]
cpos += 1
ctext += tree_text[cpos]
cpos += 1
while ((cpos < len(tree_text)) and (tree_text[cpos] == ')')):
ctext += tree_text[cpos]
cpos += 1
ans += ('%s %9s %9d %9s %9s %9s' % (filename, 0, cword, words[cword][0], words[cword][1], ctext))
for val in ['-', '-', '-', '-', '*', '*', '*', '*', '*', '*', '-']:
ans += (' %9s' % val)
ans += '\n'
cword += 1
return ans
else:
return tree_text |
_comparison(baseline_images=['3d_custom_order'], remove_text=False, extensions=['png'])
def test_3d_custom_order(grid_archive_3d):
plt.figure(figsize=(8, 6))
parallel_axes_plot(grid_archive_3d, measure_order=[1, 2, 0]) |
def AccWordStatsForUtterance(split_lines_of_utt, segments_for_utterance):
global word_count_pair
line_is_in_segment = ([False] * len(split_lines_of_utt))
for segment in segments_for_utterance:
for i in range(segment.start_index, segment.end_index):
line_is_in_segment[i] = True
for i in range(len(split_lines_of_utt)):
this_ref_word = split_lines_of_utt[i][6]
if (this_ref_word != '<eps>'):
word_count_pair[this_ref_word][0] += 1
if (not line_is_in_segment[i]):
word_count_pair[this_ref_word][1] += 1 |
_builder('ok_vqa')
class OKVQABuilder(COCOVQABuilder):
DATASET_CONFIG_DICT = {'default': 'configs/datasets/okvqa/defaults.yaml'} |
def find_free_port() -> int:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('localhost', 0))
sockname = sock.getsockname()
sock.close()
return sockname[1] |
_processor('alpro_video_train')
class AlproVideoTrainProcessor(AlproVideoBaseProcessor):
def __init__(self, image_size=384, mean=None, std=None, min_scale=0.5, max_scale=1.0, n_frms=MAX_INT):
super().__init__(mean=mean, std=std, n_frms=n_frms)
self.image_size = image_size
self.transform = transforms.Compose([transforms_video.RandomResizedCropVideo(image_size, scale=(min_scale, max_scale), interpolation_mode='bicubic'), transforms_video.RandomHorizontalFlipVideo(), ToTHWC(), VideoRandomAugment(2, 5, augs=['Identity', 'AutoContrast', 'Brightness', 'Sharpness', 'Equalize', 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']), ToUint8(), transforms_video.ToTensorVideo(), self.normalize])
def __call__(self, vpath):
clip = load_video(video_path=vpath, n_frms=self.n_frms, height=self.image_size, width=self.image_size, sampling='headtail')
return self.transform(clip)
def from_config(cls, cfg=None):
if (cfg is None):
cfg = OmegaConf.create()
image_size = cfg.get('image_size', 256)
mean = cfg.get('mean', None)
std = cfg.get('std', None)
min_scale = cfg.get('min_scale', 0.5)
max_scale = cfg.get('max_scale', 1.0)
n_frms = cfg.get('n_frms', MAX_INT)
return cls(image_size=image_size, mean=mean, std=std, min_scale=min_scale, max_scale=max_scale, n_frms=n_frms) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--saveas', metavar='S', type=str, required=True, help='Name of the merged predictions file')
parser.add_argument('--filenames', nargs='+', type=str, help='names of predictions files to merge separated by spaces')
args = parser.parse_args()
merge_predictions(args.saveas, args.filenames) |
def save_checkpoint(state, is_best, epoch, save_path='./'):
print("=> saving checkpoint '{}'".format(epoch))
torch.save(state, os.path.join(save_path, 'checkpoint.pth.tar'))
if ((epoch % 10) == 0):
torch.save(state, os.path.join(save_path, ('checkpoint_%03d.pth.tar' % epoch)))
if is_best:
if (epoch >= 90):
shutil.copyfile(os.path.join(save_path, 'checkpoint.pth.tar'), os.path.join(save_path, 'model_best_in_100_epochs.pth.tar'))
else:
shutil.copyfile(os.path.join(save_path, 'checkpoint.pth.tar'), os.path.join(save_path, 'model_best_in_090_epochs.pth.tar')) |
def magspec_vad(wav, n_fft=1024, hop_length=256):
stft = librosa.stft(wav, n_fft=n_fft, hop_length=hop_length, center=False)
(mag, phase) = librosa.magphase(stft)
mag = (mag / np.max(mag))
mag_sum = mag.sum(0)
mag_sum[(mag_sum >= 0.1)] = 1
mag_sum[(mag_sum != 1)] = 0
diff = np.diff(np.pad(mag_sum, (1, 1)))
seg_start_pos_list = np.where((diff == 1))[0]
segment_end_pos_list = np.where((diff == (- 1)))[0]
return ((seg_start_pos_list * hop_length), (segment_end_pos_list * hop_length)) |
def plot_curves_parser(txtfile, multi=True):
lines = read_lines(txtfile)
if multi:
val_losses = {'total': [], 'iou': [], 'stop': [], 'class': []}
train_losses = {'total': [], 'iou': [], 'stop': [], 'class': []}
else:
val_loss = []
train_loss = []
print('Scanning text file...')
for line in lines:
if (('(val)' in line) or ('(train)' in line)):
if multi:
(total_loss, iou_loss, stop_loss, class_loss) = extract_losses(line)
total_loss = float(total_loss.rstrip())
iou_loss = float(iou_loss.rstrip())
stop_loss = float(stop_loss.rstrip())
class_loss = float(class_loss.rstrip())
else:
chunks = line.split('\t')
loss = float(chunks[1].split('loss:')[1].rstrip())
if ('(val)' in line):
if multi:
val_losses['total'].append(total_loss)
val_losses['class'].append(class_loss)
val_losses['iou'].append(iou_loss)
val_losses['stop'].append(stop_loss)
else:
val_loss.append(loss)
elif ('(train)' in line):
if multi:
train_losses['total'].append(total_loss)
train_losses['class'].append(class_loss)
train_losses['iou'].append(iou_loss)
train_losses['stop'].append(stop_loss)
else:
train_loss.append(loss)
print('Done.')
if multi:
nb_epoch = len(val_losses['total'])
(f, (ax1, ax2, ax3, ax4)) = plt.subplots(1, 4, figsize=(30, 10))
else:
nb_epoch = len(val_loss)
t = np.arange(0, nb_epoch, 1)
ax1.plot(t, train_losses['total'][0:nb_epoch], 'r-*')
ax1.plot(t, val_losses['total'], 'b-*')
ax1.set_ylabel('loss')
ax1.set_xlabel('epoch')
ax1.set_title('Total loss')
ax1.legend(['train_loss', 'val_loss'], loc='upper right')
ax2.plot(t, train_losses['iou'][0:nb_epoch], 'r-*')
ax2.plot(t, val_losses['iou'], 'b-*')
ax2.set_ylabel('loss')
ax2.set_xlabel('epoch')
ax2.set_title('iou loss')
ax2.legend(['train_loss', 'val_loss'], loc='upper right')
ax3.plot(t, train_losses['stop'][0:nb_epoch], 'r-*')
ax3.plot(t, val_losses['stop'], 'b-*')
ax3.set_ylabel('loss')
ax3.set_xlabel('epoch')
ax3.set_title('Stop loss')
ax3.legend(['train_loss', 'val_loss'], loc='upper right')
ax4.plot(t, train_losses['class'][0:nb_epoch], 'r-*')
ax4.plot(t, val_losses['class'], 'b-*')
ax4.set_ylabel('loss')
ax4.set_xlabel('epoch')
ax4.set_title('Class loss')
ax4.legend(['train_loss', 'val_loss'], loc='upper right')
save_file = (txtfile[:(- 4)] + '.png')
plt.savefig(save_file)
print(('Figure saved in %s' % save_file)) |
def transform_state_dict_to_dtype(original_state_dict, dtype='bf16'):
sd_copy = copy.deepcopy(original_state_dict)
for name in original_state_dict:
if sd_copy[name].is_floating_point():
if (dtype == 'bf16'):
sd_copy[name] = original_state_dict[name].bfloat16()
if (dtype == 'fp32'):
sd_copy[name] = original_state_dict[name].float()
return sd_copy |
class Lin(nn.Module):
def __init__(self, in_channels, out_channels):
super(Lin, self).__init__()
self.model = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=True), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True))
def forward(self, x):
return self.model(x) |
_comparison(baseline_images=['2d_long_square'], remove_text=False, extensions=['png'])
def test_2d_long_square(sliding_archive_2d_long):
plt.figure(figsize=(8, 6))
sliding_boundaries_archive_heatmap(sliding_archive_2d_long, aspect='equal') |
class SingleStepGaussian():
def __init__(self, means, sigma=0.0001):
self.sigma = sigma
sigmas = (torch.ones_like(means) * sigma)
self.dist = torch.distributions.Normal(means, sigmas)
def sample(self, condition_dict=None):
samples = self.dist.sample()
return samples
def log_prob(self, x, condition_dict=None):
log_prob = self.dist.log_prob(x).sum((- 1))
return log_prob
def grad_logp(self, x, condition_dict=None):
logp = self.log_prob(x)
grad_logp = torch.autograd.grad(logp.sum(), x, retain_graph=True, create_graph=True)[0]
return grad_logp |
def main(args):
if args.output_dir:
utils.mkdir(args.output_dir)
utils.init_distributed_mode(args)
print(args)
if args.cifar10:
args.val_resize_size = 32
args.val_crop_size = 32
args.train_crop_size = 32
if (args.post_training_quantize and args.distributed):
raise RuntimeError('Post training quantization example should not be performed on distributed mode')
if (args.backend not in torch.backends.quantized.supported_engines):
raise RuntimeError(('Quantized backend not supported: ' + str(args.backend)))
torch.backends.quantized.engine = args.backend
device = torch.device(args.device)
torch.backends.cudnn.benchmark = True
print('Loading data')
train_dir = os.path.join(args.data_path, 'train')
val_dir = os.path.join(args.data_path, 'val')
(dataset, dataset_test, train_sampler, test_sampler) = load_data(train_dir, val_dir, args)
data_loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, num_workers=args.workers, pin_memory=True)
data_loader_test = torch.utils.data.DataLoader(dataset_test, batch_size=args.eval_batch_size, sampler=test_sampler, num_workers=args.workers, pin_memory=True)
print('Creating model', args.model)
prefix = 'quantized_'
num_classes = len(dataset.classes)
print(dataset.classes)
model_name = args.model
if (model_name == 'resnet9'):
model = ResNet9(3, num_classes)
base_model_fp32_path = '/usr/scratch2/vilan1/janniss/model_checkpoints/resnet9-lr-0.001-no-dropout-flatten/model_best-93.82.pth'
checkpoint = torch.load(base_model_fp32_path, map_location='cpu')
model.load_state_dict(checkpoint['model'], strict=False)
else:
raise RuntimeError(('Unknown model name: ' + str(model_name)))
if (not model_name.startswith(prefix)):
model_name = (prefix + model_name)
model.to(device)
if (not (args.test_only or args.post_training_quantize)):
model.fuse_model(is_qat=True)
bitwidth = 8
intB_act_fq = FakeQuantize.with_args(observer=HistogramObserver, quant_min=0, quant_max=int(((2 ** bitwidth) - 1)), dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=False)
intB_weight_fq = FakeQuantize.with_args(observer=HistogramObserver, quant_min=int(((- (2 ** bitwidth)) / 2)), quant_max=int((((2 ** bitwidth) / 2) - 1)), dtype=torch.qint8, qscheme=torch.per_tensor_symmetric, reduce_range=False)
intB_qconfig = QConfig(activation=intB_act_fq, weight=intB_weight_fq)
model.qconfig = intB_qconfig
torch.ao.quantization.prepare_qat(model, inplace=True)
if (args.distributed and args.sync_bn):
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_step_size, gamma=args.lr_gamma)
criterion = nn.CrossEntropyLoss()
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
if args.resume:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = (checkpoint['epoch'] + 1)
if args.post_training_quantize:
ds = torch.utils.data.Subset(dataset, indices=list(range((args.batch_size * args.num_calibration_batches))))
data_loader_calibration = torch.utils.data.DataLoader(ds, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
model.eval()
model.fuse_model(is_qat=False)
model.qconfig = torch.ao.quantization.get_default_qconfig(args.backend)
torch.ao.quantization.prepare(model, inplace=True)
print('Calibrating')
evaluate(model, criterion, data_loader_calibration, device=device, print_freq=1)
torch.ao.quantization.convert(model, inplace=True)
if args.output_dir:
print('Saving quantized model')
if utils.is_main_process():
torch.save(model.state_dict(), os.path.join(args.output_dir, 'quantized_post_train_model.pth'))
print('Evaluating post-training quantized model')
evaluate(model, criterion, data_loader_test, device=device)
return
if args.test_only:
evaluate(model, criterion, data_loader_test, device=device)
return
model.apply(torch.ao.quantization.enable_observer)
model.apply(torch.ao.quantization.enable_fake_quant)
start_time = time.time()
best_acc = 0.0
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
print('Starting training for epoch', epoch)
train_one_epoch(model, criterion, optimizer, data_loader, device, epoch, args)
lr_scheduler.step()
with torch.inference_mode():
if (epoch >= args.num_observer_update_epochs):
print('Disabling observer for subseq epochs, epoch = ', epoch)
model.apply(torch.ao.quantization.disable_observer)
if (epoch >= args.num_batch_norm_update_epochs):
print('Freezing BN for subseq epochs, epoch = ', epoch)
model.apply(torch.nn.intrinsic.qat.freeze_bn_stats)
print('Evaluate QAT model')
(acc, _, _) = evaluate(model, criterion, data_loader_test, device=device, log_suffix='QAT')
quantized_eval_model = copy.deepcopy(model_without_ddp)
quantized_eval_model.eval()
quantized_eval_model.to(torch.device('cpu'))
torch.ao.quantization.convert(quantized_eval_model, inplace=True)
model.train()
if args.output_dir:
checkpoint = {'model': model_without_ddp.state_dict(), 'eval_model': quantized_eval_model.state_dict(), 'optimizer': optimizer.state_dict(), 'lr_scheduler': lr_scheduler.state_dict(), 'epoch': epoch, 'args': args}
if (acc > best_acc):
best_acc = acc
utils.save_on_master(checkpoint, os.path.join(args.output_dir, f'model_best_{best_acc:.2f}.pth'))
print('Saving model with best accuracy ', best_acc)
utils.save_on_master(checkpoint, os.path.join(args.output_dir, 'checkpoint.pth'))
print('Saving models after epoch ', epoch)
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(f'Training time {total_time_str}')
print(f'Best accuracy {best_acc:.3f}') |
def get_string_lvl(array, index2str):
result = ''
for y in range(array.shape[0]):
for x in range(array.shape[1]):
result += index2str[array[y][x]]
result += '\n'
return result |
_registry(op_types='Pad')
class QPadOperator(QOperator):
def __init__(self, onnx_node, children, initializers):
super().__init__(onnx_node, children, initializers) |
def main():
args = get_args()
lexiconp = read_lexiconp(args.lexiconp)
write_position_dependent_lexicon(lexiconp, args.separator) |
def l2norm(X, dim=(- 1), eps=1e-08):
norm = (torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps)
X = torch.div(X, norm)
return X |
def _duration_to_string(duration, precision=2):
if (duration > 1):
return (str(round(duration, precision)) + ' s')
elif ((duration * (10 ** 3)) > 1):
return (str(round((duration * (10 ** 3)), precision)) + ' ms')
elif ((duration * (10 ** 6)) > 1):
return (str(round((duration * (10 ** 6)), precision)) + ' us')
else:
return str(duration) |
def synthesize_training_data(nexamples, vocab_size, min_length=10, max_length=30, seed=None):
if (seed is not None):
set_random_seed(seed)
dataset = []
for i in range(nexamples):
length = np.random.randint(min_length, max_length)
example = np.random.randint(0, vocab_size, size=length).tolist()
dataset.append(example)
return dataset |
class StableDiffusionParadigmsPipeline(metaclass=DummyObject):
_backends = ['torch', 'transformers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch', 'transformers'])
def from_config(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'transformers'])
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'transformers']) |
class MAMLFirstOrderOptimizer(Optimizer):
def __init__(self, tf_optimizer_cls=tf.train.AdamOptimizer, tf_optimizer_args=None, learning_rate=0.001, max_epochs=1, tolerance=1e-06, num_minibatches=1, verbose=False):
self._target = None
if (tf_optimizer_args is None):
tf_optimizer_args = dict()
tf_optimizer_args['learning_rate'] = learning_rate
self._tf_optimizer = tf_optimizer_cls(**tf_optimizer_args)
self._max_epochs = max_epochs
self._tolerance = tolerance
self._num_minibatches = num_minibatches
self._verbose = verbose
self._all_inputs = None
self._train_op = None
self._loss = None
self._input_ph_dict = None
def build_graph(self, loss, target, input_ph_dict):
assert isinstance(loss, tf.Tensor)
assert hasattr(target, 'get_params')
assert isinstance(input_ph_dict, dict)
self._target = target
self._input_ph_dict = input_ph_dict
self._loss = loss
self._train_op = self._tf_optimizer.minimize(loss, var_list=target.get_params())
def loss(self, input_val_dict):
sess = tf.get_default_session()
feed_dict = self.create_feed_dict(input_val_dict)
loss = sess.run(self._loss, feed_dict=feed_dict)
return loss
def optimize(self, input_val_dict):
sess = tf.get_default_session()
feed_dict = self.create_feed_dict(input_val_dict)
loss_before_opt = None
for epoch in range(self._max_epochs):
if self._verbose:
logger.log(('Epoch %d' % epoch))
(loss, _) = sess.run([self._loss, self._train_op], feed_dict)
if (not loss_before_opt):
loss_before_opt = loss
return loss_before_opt |
def ra2idx(rng, agl):
(rng_id, _) = find_nearest(range_grid, rng)
(agl_id, _) = find_nearest(angle_grid, agl)
return (rng_id, agl_id) |
class Candidates(object):
def __init__(self, language, Load, association_dict=None, freq_threshold=1, delta_threshold=0.1):
self.language = language
self.Load = Load
self.freq_threshold = freq_threshold
self.delta_threshold = delta_threshold
self.association_dict = association_dict
self.Parse = Parser(self.Load)
def get_candidates(self, input_data):
candidates = []
starting = time.time()
BS = BeamSearch(self.delta_threshold, self.freq_threshold, self.association_dict)
candidates = [BS.beam_search(x) for x in input_data]
candidates = list(ct.concat(candidates))
print('\t\tChunks before duplicate removal: ', len(candidates))
candidates = list(set(candidates))
print('\t\tChunks after duplicate removal: ', len(candidates))
print('\t\tNow parsing candidates to find frequencies')
candidates_pool = list(ct.partition_all(1000, candidates))
pool_instance = mp.Pool(processes=min(25, mp.cpu_count()), maxtasksperchild=1)
frequencies = pool_instance.map(partial(process_parsing, data=input_data), candidates_pool, chunksize=1)
pool_instance.close()
pool_instance.join()
frequencies = list(ct.concat(frequencies))
del candidates_pool
final_candidates = {}
for i in range(len(candidates)):
if True:
final_candidates[candidates[i]] = frequencies[i]
print((((('\t\tExtracted: ' + str(len(final_candidates))) + ' in ') + str((time.time() - starting))) + ' seconds.'))
return final_candidates |
def bar_plot(ax, data, colors=None, total_width=0.8, single_width=1, legend=True, ns=''):
if (colors is None):
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
n_bars = len(data)
print(data)
if (n_bars > 0):
bar_width = (total_width / n_bars)
bars = []
for (i, (name, values)) in enumerate(data.items()):
if ('kuka' in ns):
x_offset = ((4 + ((i - (n_bars / 2)) * bar_width)) + (bar_width / 2))
else:
x_offset = ((1 + ((i - (n_bars / 2)) * bar_width)) + (bar_width / 2))
planner = ''
for (x, y) in enumerate(values):
clr = planner_stl[name]
bar = ax.bar((x + x_offset), y, width=(bar_width * single_width), color=clr)
bars.append(bar[0])
if legend:
ax.legend(bars, data.keys(), loc=0)
for bars in ax.containers:
ax.bar_label(bars, fmt='%.2f', fontweight='bold')
else:
print('No data to plot: ', data) |
def get_cls_doc(elt, full_name: str) -> str:
parent_class = inspect.getclasstree([elt])[(- 1)][0][1][0]
(name, args) = format_ft_def(elt, full_name)
if (parent_class != object):
args += f' :: {link_type(parent_class, include_bt=True)}'
return (name, args) |
class TestRGBfromDisp():
def test_default(self):
x = torch.rand(2, 1, 10, 20)
out = rgb_from_disp(x)
out2 = rgb_from_disp(x, cmap='turbo', vmin=0, vmax=[np.percentile(x[0], 95), np.percentile(x[1], 95)])
assert np.allclose(out, out2), 'Incorrect default params.'
def test_range(self):
arr = np.array([[0, 0, 0.5, 0.5, 1, 1]])
out = rgb_from_disp(arr).squeeze()
out2 = rgb_from_disp(arr, vmin=0.5, vmax=1).squeeze()
assert np.allclose(out2[2], out2[3]), 'Incorrect sanity check for same value.'
assert (not np.allclose(out2[3], out2[4])), 'Incorrect sanity check for different value.'
assert np.allclose(out2[0], out2[2]), 'Incorrect clipping to min value.'
assert np.allclose(out[0], out2[0]), 'Inconsistent min value.'
assert (not np.allclose(out2[2], out[2])), 'Incorrect clipping to min value.'
out3 = rgb_from_disp(arr, vmin=0, vmax=0.5).squeeze()
assert np.allclose(out3[2], out3[3]), 'Incorrect sanity check for same value.'
assert (not np.allclose(out3[2], out3[0])), 'Incorrect sanity check for different value.'
assert np.allclose(out3[2], out3[4]), 'Incorrect clipping to max value.'
assert np.allclose(out[5], out3[5]), 'Inconsistent max value.'
assert (not np.allclose(out3[2], out[2])), 'Incorrect clipping to max value.'
def test_inv(self):
x = torch.rand(2, 1, 10, 20)
x_inv = (1 / x)
out = rgb_from_disp(x, invert=True)
out2 = rgb_from_disp(x_inv, invert=False)
assert np.allclose(out, out2), 'Incorrect inversion.'
def test_shape(self):
x = torch.rand(1, 1, 10, 20)
out = rgb_from_disp(x)
out2 = rgb_from_disp(x.squeeze())
assert np.allclose(out[0], out2), 'Incorrect out with different ndim.'
assert (out.ndim == 4), 'Incorrect dim for 4D input.'
assert (out2.ndim == 3), 'Incorrect dim for 2D input.'
def test_np(self):
x = torch.rand(2, 1, 10, 20)
x_np = x.permute(0, 2, 3, 1).numpy()
out = rgb_from_disp(x)
out = out.permute(0, 2, 3, 1).numpy()
out2 = rgb_from_disp(x_np)
assert np.allclose(out, out2), 'Incorrect conversion to np.' |
def rgb_loader(path):
with open(path, 'rb') as f:
with Image.open(f) as img:
return img.convert('RGB') |
def palette_val(palette):
new_palette = []
for color in palette:
color = [(c / 255) for c in color]
new_palette.append(tuple(color))
return new_palette |
def get_checkpoint_files(model_name_or_path, local_rank, token=None):
cached_repo_dir = get_repo_root(model_name_or_path, local_rank, token)
file_list = [str(entry) for entry in Path(cached_repo_dir).rglob('*.[bp][it][n]') if entry.is_file()]
return file_list |
.parametrize('kernel_size, out_channels, in_channels, with_inp_importance, with_neighbors_importance, with_normalization', [(1, 2, 7, True, False, False), (2, 1, 1, False, False, False), (3, 5, 3, False, True, True), (33, 3, 4, False, True, False)])
.ml
.parametrize('dtype', [np.float32])
def test_sparseconv_gradient(ml, dtype, kernel_size, out_channels, in_channels, with_inp_importance, with_neighbors_importance, with_normalization):
if (dtype == np.float64):
tolerance = {'atol': 1e-05, 'rtol': 0.01, 'epsilon': 1e-06}
elif (dtype == np.float32):
tolerance = {'atol': 0.01, 'rtol': 0.1, 'epsilon': 0.001}
rng = np.random.RandomState(123)
conv_attrs = {'normalize': with_normalization}
filters = rng.random(size=(kernel_size, in_channels, out_channels)).astype(dtype)
num_inp = 33
num_out = 16
inp_features = rng.uniform(size=(num_inp, in_channels)).astype(dtype)
if with_inp_importance:
inp_importance = rng.random(num_inp).astype(dtype)
else:
inp_importance = np.empty((0,)).astype(dtype)
neighbors_row_splits = np.zeros(((num_out + 1),), dtype=np.int64)
for i in range(num_out):
neighbors_row_splits[(i + 1)] = (rng.randint((kernel_size + 1)) + neighbors_row_splits[i])
neighbors_index = np.zeros((neighbors_row_splits[(- 1)],), dtype=np.int32)
neighbors_kernel_index = np.zeros((neighbors_row_splits[(- 1)],), dtype=np.uint8)
for i in range(num_out):
start = neighbors_row_splits[i]
end = neighbors_row_splits[(i + 1)]
neighbors_kernel_index[start:end] = rng.choice(kernel_size, [(end - start)], replace=False)
neighbors_index[start:end] = rng.choice(num_inp, [(end - start)], replace=False)
arange = np.arange(neighbors_index.shape[0])
(inv_neighbors_index, inv_neighbors_row_splits, inv_arange) = mltest.run_op(ml, ml.device, False, ml.ops.invert_neighbors_list, num_inp, neighbors_index, neighbors_row_splits, arange)
inv_neighbors_kernel_index = neighbors_kernel_index[inv_arange]
if with_neighbors_importance:
neighbors_importance = (rng.random(neighbors_index.shape[0]).astype(dtype) - 0.5)
neighbors_importance_sum = mltest.run_op(ml, ml.device, False, ml.ops.reduce_subarrays_sum, neighbors_importance, neighbors_row_splits)
inv_neighbors_importance = neighbors_importance[inv_arange]
else:
neighbors_importance = np.empty((0,), dtype=dtype)
neighbors_importance_sum = np.empty((0,), dtype=dtype)
inv_neighbors_importance = np.empty((0,), dtype=dtype)
def sparse_conv_infeats(inp_features):
return mltest.run_op(ml, ml.device, True, ml.ops.sparse_conv, filters, inp_features, inp_importance, neighbors_index, neighbors_kernel_index, neighbors_importance, neighbors_row_splits, **conv_attrs)
def sparse_conv_filter(filters):
return mltest.run_op(ml, ml.device, True, ml.ops.sparse_conv, filters, inp_features, inp_importance, neighbors_index, neighbors_kernel_index, neighbors_importance, neighbors_row_splits, **conv_attrs)
def sparse_conv_filter_backprop(out_features_gradient, filters):
return mltest.run_op_grad(ml, ml.device, True, ml.ops.sparse_conv, filters, '', out_features_gradient, filters=filters, inp_features=inp_features, inp_importance=inp_importance, neighbors_index=neighbors_index, neighbors_kernel_index=neighbors_kernel_index, neighbors_importance=neighbors_importance, neighbors_row_splits=neighbors_row_splits, **conv_attrs)
def sparse_conv_infeat_backprop(out_features_gradient, inp_features):
return mltest.run_op_grad(ml, ml.device, True, ml.ops.sparse_conv, inp_features, '', out_features_gradient, filters=filters, inp_features=inp_features, inp_importance=inp_importance, neighbors_index=neighbors_index, neighbors_kernel_index=neighbors_kernel_index, neighbors_importance=neighbors_importance, neighbors_row_splits=neighbors_row_splits, **conv_attrs)
def sparse_conv_transpose_filter(filters):
return mltest.run_op(ml, ml.device, True, ml.ops.sparse_conv_transpose, filters, inp_importance, y_arr, neighbors_index, neighbors_importance_sum, neighbors_row_splits, inv_neighbors_index, inv_neighbors_kernel_index, inv_neighbors_importance, inv_neighbors_row_splits, **conv_attrs)
def sparse_conv_transpose_infeats(inp_features):
return mltest.run_op(ml, ml.device, True, ml.ops.sparse_conv_transpose, filters.transpose([0, 2, 1]), inp_importance, inp_features, neighbors_index, neighbors_importance_sum, neighbors_row_splits, inv_neighbors_index, inv_neighbors_kernel_index, inv_neighbors_importance, inv_neighbors_row_splits, **conv_attrs)
def sparse_conv_transpose_filter_backprop(out_features_gradient, filters):
return mltest.run_op_grad(ml, ml.device, True, ml.ops.sparse_conv_transpose, filters, '', out_features_gradient, filters=filters, out_importance=inp_importance, inp_features=y_arr, inp_neighbors_index=neighbors_index, inp_neighbors_importance_sum=neighbors_importance_sum, inp_neighbors_row_splits=neighbors_row_splits, neighbors_index=inv_neighbors_index, neighbors_kernel_index=inv_neighbors_kernel_index, neighbors_importance=inv_neighbors_importance, neighbors_row_splits=inv_neighbors_row_splits, **conv_attrs)
def sparse_conv_transpose_infeat_backprop(out_features_gradient, inp_features):
return mltest.run_op_grad(ml, ml.device, True, ml.ops.sparse_conv_transpose, inp_features, '', out_features_gradient, filters=filters.transpose([0, 2, 1]), out_importance=inp_importance, inp_features=inp_features, inp_neighbors_index=neighbors_index, inp_neighbors_importance_sum=neighbors_importance_sum, inp_neighbors_row_splits=neighbors_row_splits, neighbors_index=inv_neighbors_index, neighbors_kernel_index=inv_neighbors_kernel_index, neighbors_importance=inv_neighbors_importance, neighbors_row_splits=inv_neighbors_row_splits, **conv_attrs)
y_arr = sparse_conv_infeats(inp_features)
dbg = {}
filter_gradient_OK = check_gradients(filters, sparse_conv_filter, sparse_conv_filter_backprop, debug_outputs=dbg, **tolerance)
assert filter_gradient_OK
feature_gradient_OK = check_gradients(inp_features, sparse_conv_infeats, sparse_conv_infeat_backprop, debug_outputs=dbg, **tolerance)
assert feature_gradient_OK
transpose_filter_gradient_OK = check_gradients(filters.transpose([0, 2, 1]), sparse_conv_transpose_filter, sparse_conv_transpose_filter_backprop, debug_outputs=dbg, **tolerance)
assert transpose_filter_gradient_OK
transpose_feature_gradient_OK = check_gradients(y_arr, sparse_conv_transpose_infeats, sparse_conv_transpose_infeat_backprop, debug_outputs=dbg, **tolerance)
assert transpose_feature_gradient_OK |
_config
def student_taskonomy_encoder_penultimate():
cfg = {'learner': {'model': 'TaskonomyEncoder', 'model_kwargs': {'train': True, 'eval_only': False}}} |
def plot_data_and_recon(data_tensor, recon_tensor):
data_tensor = convert_tensor(data_tensor)
recon_tensor = convert_tensor(recon_tensor)
n_frames = data_tensor.shape[0]
for frame_num in range(1, (n_frames + 1)):
plt.subplot(2, n_frames, frame_num)
plt.imshow(data_tensor[(frame_num - 1)].astype('uint8'))
plt.axis('off')
plt.title(('t = ' + str(frame_num)))
plt.subplot(2, n_frames, (frame_num + n_frames))
plt.imshow(recon_tensor[(frame_num - 1)].astype('uint8'))
plt.axis('off')
plt.show() |
def get_kpis(env: CityLearnEnv) -> pd.DataFrame:
kpis = env.evaluate()
kpi_names = ['electricity_consumption', 'cost', 'carbon_emissions', 'average_daily_peak', 'ramping', '1 - load_factor']
kpis = kpis[kpis['cost_function'].isin(kpi_names)].dropna()
kpis['value'] = kpis['value'].round(3)
kpis = kpis.rename(columns={'cost_function': 'kpi'})
return kpis |
def transpile_circuit(circuit, transpile_config):
if transpile_config.pass_manager:
pass_manager = transpile_config.pass_manager
elif (transpile_config.optimization_level is not None):
level = transpile_config.optimization_level
if (level == 0):
pass_manager = level_0_pass_manager(transpile_config)
elif (level == 1):
pass_manager = level_1_pass_manager(transpile_config)
elif (level == 2):
pass_manager = level_2_pass_manager(transpile_config)
elif (level == 3):
pass_manager = level_3_pass_manager(transpile_config)
else:
raise TranspilerError('optimization_level can range from 0 to 3.')
elif transpile_config.coupling_map:
pass_manager = default_pass_manager(transpile_config)
else:
pass_manager = default_pass_manager_simulator(transpile_config)
return pass_manager.run(circuit) |
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0.0, proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
head_dim = (dim // num_heads)
self.scale = (head_dim ** (- 0.5))
self.qkv = nn.Linear(dim, (3 * dim), bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
(B, T, N, C) = x.shape
qkv = self.qkv(x).reshape(B, T, N, 3, self.num_heads, (C // self.num_heads)).permute(3, 0, 4, 1, 2, 5)
(q, k, v) = qkv.unbind(0)
attn = ((q k.transpose((- 2), (- 1))) * self.scale)
attn = attn.softmax(dim=(- 1))
attn = self.attn_drop(attn)
x = (attn v).permute(0, 2, 3, 4, 1).reshape(B, T, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x |
def dict_mean(dicts):
means = {}
for key in dicts[0].keys():
means[key] = (sum((d[key] for d in dicts)) / len(dicts))
return means |
class LatentSpacePolicy(BasePolicy):
def __init__(self, *args, smoothing_coefficient=None, **kwargs):
super(LatentSpacePolicy, self).__init__(*args, **kwargs)
assert ((smoothing_coefficient is None) or (0 <= smoothing_coefficient <= 1))
self._smoothing_alpha = (smoothing_coefficient or 0)
self._smoothing_beta = (np.sqrt((1.0 - np.power(self._smoothing_alpha, 2.0))) / (1.0 - self._smoothing_alpha))
self._reset_smoothing_x()
self._smooth_latents = False
def _reset_smoothing_x(self):
self._smoothing_x = np.zeros((1, *self._output_shape))
def actions_np(self, conditions):
if self._deterministic:
return self.deterministic_actions_model.predict(conditions)
elif (self._smoothing_alpha == 0):
return self.actions_model.predict(conditions)
else:
(alpha, beta) = (self._smoothing_alpha, self._smoothing_beta)
raw_latents = self.latents_model.predict(conditions)
self._smoothing_x = ((alpha * self._smoothing_x) + ((1.0 - alpha) * raw_latents))
latents = (beta * self._smoothing_x)
return self.actions_model_for_fixed_latents.predict([*conditions, latents])
def reset(self):
self._reset_smoothing_x() |
class Config(NamedTuple):
seed: int = 3431
batch_size: int = 32
lr: int = 5e-05
n_epochs: int = 10
warmup: float = 0.1
save_steps: int = 100
total_steps: int = 100000
data_parallel: bool = False
comments: str = '' |
class TestSNNBiasFit(TrainDiffPOSNN, GenDiffSigmoidSNNWithoutKernel, DiffTestBase, unittest.TestCase):
def mod_params(self):
self.n_epochs = 10
self.sample_size = 100
self.length = 50
self.obj_func_kwargs = {'n_pos': 50, 'n_neg': 50, 'n_sampling': 1, 'beta': 1.0}
def preprocess(self):
self.trainable_model.params['kernel_weight'].data = deepcopy(self.gen_model.params['kernel_weight'].data)
self.trainable_model.params['kernel_weight'].requires_grad = False
def check_fit(self):
print(' * true snn\n', self.gen_model)
print(' * learned snn\n', self.trainable_model)
self.assertTrue(torch.allclose(self.trainable_model.params['bias'][:self.n_obs_neurons], self.gen_model.params['bias'][:self.n_obs_neurons], atol=0.2, rtol=0.5)) |
def test_config_build_detector():
from mmcv import Config
from mmdet.models import build_detector
config_dpath = _get_config_directory()
print(f'Found config_dpath = {config_dpath}')
import glob
config_fpaths = list(glob.glob(join(config_dpath, '**', '*.py')))
config_fpaths = [p for p in config_fpaths if (p.find('_base_') == (- 1))]
config_names = [relpath(p, config_dpath) for p in config_fpaths]
print(f'Using {len(config_names)} config files')
for config_fname in config_names:
config_fpath = join(config_dpath, config_fname)
config_mod = Config.fromfile(config_fpath)
config_mod.model
config_mod.train_cfg
config_mod.test_cfg
print(f'Building detector, config_fpath = {config_fpath}')
if ('pretrained' in config_mod.model):
config_mod.model['pretrained'] = None
detector = build_detector(config_mod.model, train_cfg=config_mod.train_cfg, test_cfg=config_mod.test_cfg)
assert (detector is not None)
optimizer = build_optimizer(detector, config_mod.optimizer)
assert isinstance(optimizer, torch.optim.Optimizer)
if ('roi_head' in config_mod.model.keys()):
assert (detector.roi_head.with_bbox and detector.with_bbox)
assert (detector.roi_head.with_mask == detector.with_mask)
head_config = config_mod.model['roi_head']
_check_roi_head(head_config, detector.roi_head) |
def read_lines(filepath):
with open(filepath, 'r') as f:
return [e.strip('\n') for e in f.readlines()] |
_TFVolume.register('soft')
class TFSoftVolume(_TFVolume):
def __init__(self, log_scale: bool=True, volume_temperature: float=1.0) -> None:
super().__init__(log_scale)
self.volume_temperature = volume_temperature
def __call__(self, box_tensor: TFBoxTensor) -> tf.Tensor:
return tf_soft_volume(box_tensor, volume_temperature=self.volume_temperature, log_scale=self.log_scale) |
_torch
class AutoModelTest(unittest.TestCase):
def test_model_from_pretrained(self):
logging.basicConfig(level=logging.INFO)
for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModel.from_pretrained(model_name)
(model, loading_info) = AutoModel.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertModel)
for value in loading_info.values():
self.assertEqual(len(value), 0)
def test_model_for_pretraining_from_pretrained(self):
logging.basicConfig(level=logging.INFO)
for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForPreTraining.from_pretrained(model_name)
(model, loading_info) = AutoModelForPreTraining.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForPreTraining)
for value in loading_info.values():
self.assertEqual(len(value), 0)
def test_lmhead_model_from_pretrained(self):
logging.basicConfig(level=logging.INFO)
for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelWithLMHead.from_pretrained(model_name)
(model, loading_info) = AutoModelWithLMHead.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForMaskedLM)
def test_sequence_classification_model_from_pretrained(self):
logging.basicConfig(level=logging.INFO)
for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
(model, loading_info) = AutoModelForSequenceClassification.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForSequenceClassification)
def test_question_answering_model_from_pretrained(self):
logging.basicConfig(level=logging.INFO)
for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForQuestionAnswering.from_pretrained(model_name)
(model, loading_info) = AutoModelForQuestionAnswering.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForQuestionAnswering)
def test_from_pretrained_identifier(self):
logging.basicConfig(level=logging.INFO)
model = AutoModelWithLMHead.from_pretrained(SMALL_MODEL_IDENTIFIER)
self.assertIsInstance(model, BertForMaskedLM)
self.assertEqual(model.num_parameters(), 14830)
self.assertEqual(model.num_parameters(only_trainable=True), 14830)
def test_from_identifier_from_model_type(self):
logging.basicConfig(level=logging.INFO)
model = AutoModelWithLMHead.from_pretrained(DUMMY_UNKWOWN_IDENTIFIER)
self.assertIsInstance(model, RobertaForMaskedLM)
self.assertEqual(model.num_parameters(), 14830)
self.assertEqual(model.num_parameters(only_trainable=True), 14830)
def test_parents_and_children_in_mappings(self):
mappings = (MODEL_MAPPING, MODEL_FOR_PRETRAINING_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, MODEL_WITH_LM_HEAD_MAPPING)
for mapping in mappings:
mapping = tuple(mapping.items())
for (index, (child_config, child_model)) in enumerate(mapping[1:]):
for (parent_config, parent_model) in mapping[:(index + 1)]:
with self.subTest(msg='Testing if {} is child of {}'.format(child_config.__name__, parent_config.__name__)):
self.assertFalse(issubclass(child_config, parent_config))
self.assertFalse(issubclass(child_model, parent_model)) |
def posterize(pil_img, level):
level = int_parameter(sample_level(level), 4)
ret = ImageOps.posterize(pil_img, (4 - level))
return ret |
def print_diff(diff_lines, use_color):
if use_color:
diff_lines = colorize(diff_lines)
sys.stdout.writelines(diff_lines) |
_model('dummy_model')
class DummyModel(FairseqLanguageModel):
def __init__(self, args, encoder):
super().__init__(encoder)
self.args = args
def add_args(parser):
parser.add_argument('--num-layers', type=int, default=24)
parser.add_argument('--embed-dim', type=int, default=1024)
def build_model(cls, args, task):
encoder = DummyEncoder(num_embed=len(task.target_dictionary), embed_dim=args.embed_dim, num_layers=args.num_layers)
return cls(args, encoder)
def forward(self, src_tokens, masked_tokens=None, **kwargs):
return self.decoder(src_tokens, masked_tokens=masked_tokens) |
def normalization(planes, norm='bn'):
if (norm == 'bn'):
m = nn.BatchNorm3d(planes)
elif (norm == 'gn'):
m = nn.GroupNorm(4, planes)
elif (norm == 'in'):
m = nn.InstanceNorm3d(planes)
else:
raise ValueError('normalization type {} is not supported'.format(norm))
return m |
class TestGemm(object):
def test_gemm(self):
mata_shape = [2, 7]
matb_shape = [7, 4]
matc_shape = [2, 4]
output_shape = [2, 4]
alpha = np.round(np.random.rand(), 2)
beta = np.round(np.random.rand(), 2)
(trans_a, trans_b) = (0, 0)
input_x = np.random.random(mata_shape)
b_val = np.random.random(matb_shape)
c_val = np.random.random(matc_shape)
a = onnx.helper.make_tensor_value_info('a', onnx.TensorProto.FLOAT, mata_shape)
b = onnx.helper.make_tensor_value_info('b', onnx.TensorProto.FLOAT, matb_shape)
c = onnx.helper.make_tensor_value_info('c', onnx.TensorProto.FLOAT, matc_shape)
Y = onnx.helper.make_tensor_value_info('Y', onnx.TensorProto.FLOAT, output_shape)
init_b = onnx.helper.make_tensor(name='b', data_type=onnx.TensorProto.FLOAT, dims=matb_shape, vals=b_val.flatten().tolist())
init_c = onnx.helper.make_tensor(name='c', data_type=onnx.TensorProto.FLOAT, dims=matc_shape, vals=c_val.flatten().tolist())
gemm_node = onnx.helper.make_node(op_type='Gemm', inputs=['a', 'b', 'c'], outputs=['Y'], alpha=alpha, beta=beta, transA=trans_a, transB=trans_b)
onnx_graph = onnx.helper.make_graph(nodes=[gemm_node], name='test-gather', inputs=[a, b, c], outputs=[Y], initializer=[init_b, init_c])
onnx_model = onnx.helper.make_model(onnx_graph, producer_name='ONNX')
onnx.checker.check_model(onnx_model)
bigdl_model = Gemm(b_val, c_val, alpha=alpha, beta=beta, trans_a=trans_a, trans_b=trans_b)
loaded_model = load_model_proto(onnx_model)
expected_out = bigdl_model.forward(input_x)
loaded_out = loaded_model.forward(input_x)
assert np.array_equal(expected_out, loaded_out) |
def replace_keys(d: dict[(str, ...)], old: str, new: str, is_prfx: bool=False, is_sffx: bool=False) -> dict[(str, ...)]:
return {replace_str(k, old, new, is_prfx=is_prfx, is_sffx=is_sffx): v for (k, v) in d.items()} |
(version='2.0')
class PyTorchCriterions(object):
def __init__(self):
self.criterions = {}
self.criterions.update(PYTORCH_CRITERIONS) |
class ResBlock(nn.Module):
def __init__(self, start_filts, planes, conv, stride=1, downsample=None, norm=None, relu='relu'):
super(ResBlock, self).__init__()
self.conv1 = conv(start_filts, planes, ks=1, stride=stride, norm=norm, relu=relu)
self.conv2 = conv(planes, planes, ks=3, pad=1, norm=norm, relu=relu)
self.conv3 = conv(planes, (planes * 4), ks=1, norm=norm, relu=None)
self.relu = (nn.ReLU(inplace=True) if (relu == 'relu') else nn.LeakyReLU(inplace=True))
if (downsample is not None):
self.downsample = conv(downsample[0], (downsample[0] * downsample[1]), ks=1, stride=downsample[2], norm=norm, relu=None)
else:
self.downsample = None
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
if self.downsample:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
_loss
def charbonnier_loss_color(pred, target, eps=1e-06):
diff = torch.add(pred, (- target))
diff_sq = (diff * diff)
diff_sq_color = torch.mean(diff_sq, 1, True)
error = torch.sqrt((diff_sq_color + eps))
loss = torch.mean(error)
return loss |
class MaskedLMConfig(FairseqDataclass):
data: str = field(default=MISSING, metadata={'help': 'colon separated path to data directories list, will be iterated upon during epochs in round-robin manner'})
sample_break_mode: SAMPLE_BREAK_MODE_CHOICES = field(default='none', metadata={'help': 'If omitted or "none", fills each sample with tokens-per-sample tokens. If set to "complete", splits samples only at the end of sentence, but may include multiple sentences per sample. "complete_doc" is similar but respects doc boundaries. If set to "eos", includes only one sentence per sample.'})
tokens_per_sample: int = field(default=1024, metadata={'help': 'max number of tokens per sample for LM dataset'})
mask_prob: float = field(default=0.15, metadata={'help': 'probability of replacing a token with mask'})
leave_unmasked_prob: float = field(default=0.1, metadata={'help': 'probability that a masked token is unmasked'})
random_token_prob: float = field(default=0.1, metadata={'help': 'probability of replacing a token with a random token'})
freq_weighted_replacement: bool = field(default=False, metadata={'help': 'sample random replacement words based on word frequencies'})
mask_whole_words: bool = field(default=False, metadata={'help': 'mask whole words; you may also want to set --bpe'})
mask_multiple_length: int = field(default=1, metadata={'help': 'repeat the mask indices multiple times'})
mask_stdev: float = field(default=0.0, metadata={'help': 'stdev of the mask length'})
shorten_method: SHORTEN_METHOD_CHOICES = field(default='none', metadata={'help': 'if not none, shorten sequences that exceed --tokens-per-sample'})
shorten_data_split_list: str = field(default='', metadata={'help': 'comma-separated list of dataset splits to apply shortening to, e.g., "train,valid" (default: all dataset splits)'})
seed: int = II('common.seed') |
def main(_):
set_path(args, args.experiment_name)
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
with tf.Session(config=tfconfig) as sess:
model = AUGAN(sess, args)
(model.train(args) if (args.phase == 'train') else model.test(args)) |
def _contiguous_ranges(span_list):
output = []
for (_, span) in itertools.groupby(enumerate(span_list), (lambda p: (p[1] - p[0]))):
span = list(span)
output.append((span[0][1], span[(- 1)][1]))
return output |
class ImageNetSRTrain(ImageNetSR):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def get_base(self):
with open('data/imagenet_train_hr_indices.p', 'rb') as f:
indices = pickle.load(f)
dset = ImageNetTrain(process_images=False)
return Subset(dset, indices) |
class Params():
def __init__(self):
arguments = docopt(__doc__)
self.experiments_config = arguments['--config']
self.training_trees = train_utils.load_tuple_trees('../../dataset_creation/data/uspto-train-depth_and_tree_tuples.pick', np.random.RandomState(10))
self.training_data_smi_list = self._get_training_data()
def _get_training_data(self):
all_train_molecules = set()
def unpack(iterable):
if isinstance(iterable, str):
all_train_molecules.add(iterable)
elif isinstance(iterable, (tuple, list)):
for item in iterable:
unpack(item)
else:
raise RuntimeError
unpack(self.training_trees)
all_mols = [rdkit_general_ops.canconicalize(smi) for smi in tqdm.tqdm(all_train_molecules, desc='ensuring molecules canonical')]
all_mols = sorted(list(set(all_mols)))
return all_mols |
def test_traffic():
import numpy as np
from dynamics_and_models import ReferencePath
def _reset_init_state():
ref_path = ReferencePath('straight')
random_index = (int((np.random.random() * (900 + 500))) + 700)
(x, y, phi) = ref_path.indexs2points(random_index)
v = (8 * np.random.random())
return dict(ego=dict(v_x=v, v_y=0, r=0, x=x.numpy(), y=y.numpy(), phi=phi.numpy(), l=4.8, w=2.2, routeID='du'))
init_state = dict(ego=dict(v_x=8.0, v_y=0, r=0, x=(- 30), y=1.5, phi=180, l=4.8, w=2.2, routeID='dl'))
traffic = Traffic(100.0, mode='training', init_n_ego_dict=init_state, training_task='left')
traffic.init_traffic(init_state)
traffic.sim_step()
for i in range():
traffic.sim_step() |
def main(args):
layers_map = {'relu4_2': '22', 'relu2_2': '8', 'relu3_2': '13', 'relu1_2': '4'}
vis = visdom.Visdom(port=args.display_port)
loss_graph = {'g': [], 'gd': [], 'gf': [], 'gpl': [], 'gpab': [], 'gs': [], 'd': [], 'gdl': [], 'dl': []}
transforms = get_transforms(args)
if (args.color_space == 'rgb'):
args.pixel_weight_ab = args.pixel_weight_rgb
args.pixel_weight_l = args.pixel_weight_rgb
rgbify = custom_transforms.toRGB()
train_dataset = ImageFolder('train', args.data_path, transforms)
train_loader = DataLoader(dataset=train_dataset, batch_size=args.batch_size, shuffle=True)
val_dataset = ImageFolder('val', args.data_path, transforms)
indices = torch.randperm(len(val_dataset))
val_display_size = args.batch_size
val_display_sampler = SequentialSampler(indices[:val_display_size])
val_loader = DataLoader(dataset=val_dataset, batch_size=val_display_size, sampler=val_display_sampler)
feat_model = models.vgg19(pretrained=True)
(netG, netD, netD_local) = get_models(args)
(criterion_gan, criterion_pixel_l, criterion_pixel_ab, criterion_style, criterion_feat, criterion_texturegan) = get_criterions(args)
real_label = 1
fake_label = 0
optimizerD = optim.Adam(netD.parameters(), lr=args.learning_rate_D, betas=(0.5, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=args.learning_rate, betas=(0.5, 0.999))
optimizerD_local = optim.Adam(netD_local.parameters(), lr=args.learning_rate_D_local, betas=(0.5, 0.999))
with torch.cuda.device(args.gpu):
netG.cuda()
netD.cuda()
netD_local.cuda()
feat_model.cuda()
criterion_gan.cuda()
criterion_pixel_l.cuda()
criterion_pixel_ab.cuda()
criterion_feat.cuda()
criterion_texturegan.cuda()
input_stack = torch.FloatTensor().cuda()
target_img = torch.FloatTensor().cuda()
target_texture = torch.FloatTensor().cuda()
segment = torch.FloatTensor().cuda()
label = torch.FloatTensor(args.batch_size).cuda()
label_local = torch.FloatTensor(args.batch_size).cuda()
extract_content = FeatureExtractor(feat_model.features, [layers_map[args.content_layers]])
extract_style = FeatureExtractor(feat_model.features, [layers_map[x.strip()] for x in args.style_layers.split(',')])
model = {'netG': netG, 'netD': netD, 'netD_local': netD_local, 'criterion_gan': criterion_gan, 'criterion_pixel_l': criterion_pixel_l, 'criterion_pixel_ab': criterion_pixel_ab, 'criterion_feat': criterion_feat, 'criterion_style': criterion_style, 'criterion_texturegan': criterion_texturegan, 'real_label': real_label, 'fake_label': fake_label, 'optimizerD': optimizerD, 'optimizerD_local': optimizerD_local, 'optimizerG': optimizerG}
for epoch in range(args.load_epoch, args.num_epoch):
train(model, train_loader, val_loader, input_stack, target_img, target_texture, segment, label, label_local, extract_content, extract_style, loss_graph, vis, epoch, args) |
class InstanceMaker(AwsInstance):
def __init__(self, identity, name, instance_type, db, force, no_connect, spot, queue_name):
super(InstanceMaker, self).__init__(identity, require_pem=True)
self.name = name
self.instance_type = instance_type
self.db = db
self.force = force
self.no_connect = no_connect
self.spot = spot
self.queue_name = queue_name
def start_instance(self):
if (not self.force):
running_instances = self.get_running_instances()
if running_instances:
print('You already have {} running instances:'.format(len(running_instances)))
for instance in running_instances:
print(format_instance(instance))
try:
res = input('Would you still like to continue? (y/n) ').lower()[0]
except KeyboardInterrupt:
print('Not creating a new instance')
return
if (res[0] != 'y'):
print('Not creating a new instance')
return
name = 'Ithemal'
if self.name:
name += ': {}'.format(self.name)
block_device_mappings = [{'DeviceName': '/dev/xvda', 'Ebs': {'VolumeSize': 16}}]
iam_profile_name = 'ithemal-ec2'
iam_profile_struct = {'Name': iam_profile_name}
if self.spot:
launch_specification = {'InstanceType': self.instance_type, 'SecurityGroupIds': ['sg-0780fe1760c00d96d'], 'BlockDeviceMappings': block_device_mappings, 'KeyName': self.identity, 'ImageId': 'ami-0b59bfac6be064b78', 'IamInstanceProfile': iam_profile_struct}
run_com = (lambda com: json.loads(subprocess.check_output(com))['SpotInstanceRequests'][0])
com = ['aws', 'ec2', 'request-spot-instances', '--launch-specification', json.dumps(launch_specification)]
if (self.spot > 0):
com.extend(['--block-duration-minutes', str((self.spot * 60))])
output = run_com(com)
print('Submitted spot instance request')
try:
while ('InstanceId' not in output):
print('\rWaiting for spot request to be fulfilled ({})...'.format(output['Status']['Code']), end=((' ' * 20) + '\r'))
time.sleep(1)
output = run_com(['aws', 'ec2', 'describe-spot-instance-requests', '--spot-instance-request-ids', output['SpotInstanceRequestId']])
except (KeyboardInterrupt, SystemExit):
subprocess.check_call(['aws', 'ec2', 'cancel-spot-instance-requests', '--spot-instance-request-ids', output['SpotInstanceRequestId']])
sys.exit(1)
print()
instance_id = output['InstanceId']
subprocess.check_call(['aws', 'ec2', 'create-tags', '--resources', instance_id, '--tags', 'Key=Name,Value="{}"'.format(name)])
else:
args = ['aws', 'ec2', 'run-instances', '--instance-type', self.instance_type, '--key-name', self.identity, '--image-id', 'ami-0b59bfac6be064b78', '--tag-specifications', 'ResourceType="instance",Tags=[{{Key="Name",Value="{}"}}]'.format(name), '--security-group-ids', 'sg-0780fe1760c00d96d', '--block-device-mappings', json.dumps(block_device_mappings), '--iam-instance-profile', json.dumps(iam_profile_struct)]
output = subprocess.check_output(args)
parsed_output = json.loads(output)
instance = parsed_output['Instances'][0]
instance_id = instance['InstanceId']
print('Started instance! Waiting for connection...')
subprocess.check_call(['aws', 'ec2', 'wait', 'instance-running', '--instance-ids', instance_id])
instance = next((instance for instance in self.get_running_instances() if (instance['InstanceId'] == instance_id)))
ssh_address = 'ec2-{}'.format(instance['PublicDnsName'])
while subprocess.call(['ssh', '-oStrictHostKeyChecking=no', '-i', self.pem_key, ssh_address, 'exit'], stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w')):
time.sleep(1)
git_root = subprocess.check_output(['git', 'rev-parse', '--show-toplevel'], cwd=_DIRNAME).strip()
ls_files = subprocess.Popen(['git', 'ls-files'], cwd=git_root, stdout=subprocess.PIPE)
tar = subprocess.Popen(['tar', 'Tcz', '-'], cwd=git_root, stdin=ls_files.stdout, stdout=subprocess.PIPE)
aws_credentials = json.loads(subprocess.check_output(['aws', 'ecr', 'get-authorization-token']).strip())
authorization_datum = aws_credentials['authorizationData'][0]
aws_authorization = base64.b64decode(authorization_datum['authorizationToken'])
aws_authorization_user = aws_authorization[:aws_authorization.index(':')]
aws_authorization_token = aws_authorization[(aws_authorization.index(':') + 1):]
aws_endpoint = authorization_datum['proxyEndpoint']
region = subprocess.check_output(['aws', 'configure', 'get', 'region']).strip()
mysql_credentials_dict = json.loads(subprocess.check_output(['aws', 'secretsmanager', 'get-secret-value', '--secret-id', 'ithemal/mysql-{}'.format(self.db)]).strip())
mysql_credentials = json.loads(mysql_credentials_dict['SecretString'])
mysql_user = mysql_credentials['username']
mysql_password = mysql_credentials['password']
mysql_host = mysql_credentials['host']
mysql_port = mysql_credentials['port']
initialization_command = 'mkdir ithemal; cd ithemal; cat | tar xz; aws/aws_utils/remote_setup.sh {}'.format(' '.join(map(str, [aws_authorization_user, aws_authorization_token, aws_endpoint, mysql_user, mysql_password, mysql_host, mysql_port, region])))
ssh = subprocess.Popen(['ssh', '-oStrictHostKeyChecking=no', '-i', self.pem_key, ssh_address, initialization_command], stdin=tar.stdout)
ls_files.wait()
tar.wait()
ssh.wait()
if self.queue_name:
self.start_queue_on_instance(instance, ssh_address)
if (not self.no_connect):
os.execlp(sys.executable, sys.executable, os.path.join(_DIRNAME, 'connect_instance.py'), self.identity, instance['InstanceId'])
def start_queue_on_instance(self, instance, ssh_address):
subprocess.check_call(['aws', 'ec2', 'create-tags', '--resources', instance['InstanceId'], '--tags', 'Key=QueueName,Value="{}"'.format(self.queue_name)])
queue_url = command_queue.queue_url_of_name(self.queue_name)
subprocess.check_call(['ssh', '-i', self.pem_key, ssh_address, 'sudo docker exec -u ithemal -dit ithemal bash -lc "~/ithemal/aws/aws_utils/queue_process.py --kill {}"'.format(queue_url)]) |
def multinomial_resample(weights):
cumulative_sum = np.cumsum(weights)
cumulative_sum[(- 1)] = 1.0
return np.searchsorted(cumulative_sum, random(len(weights))) |
def parse_component(component):
if isinstance(component, str):
return (component, None)
elif isinstance(component, dict):
component_name = list(component.keys())[0]
arguments = component[component_name]
return (component_name, arguments)
else:
raise ValueError('Argument to the parse_component function must be str or dict.') |
def get_linker(full_mol, clean_frag, starting_point):
matches = list(full_mol.GetSubstructMatches(clean_frag))
if (len(matches) == 0):
print('No matches')
return ''
linker_len = (full_mol.GetNumHeavyAtoms() - clean_frag.GetNumHeavyAtoms())
if (linker_len == 0):
return ''
mol_to_break = Chem.Mol(full_mol)
Chem.Kekulize(full_mol, clearAromaticFlags=True)
poss_linker = []
if (len(matches) > 0):
for match in matches:
mol_rw = Chem.RWMol(full_mol)
linker_atoms = list(set(list(range(full_mol.GetNumHeavyAtoms()))).difference(match))
linker_bonds = []
atoms_joined_to_linker = []
for idx_to_delete in sorted(match, reverse=True):
nei = [x.GetIdx() for x in mol_rw.GetAtomWithIdx(idx_to_delete).GetNeighbors()]
intersect = set(nei).intersection(set(linker_atoms))
if (len(intersect) == 1):
linker_bonds.append(mol_rw.GetBondBetweenAtoms(idx_to_delete, list(intersect)[0]).GetIdx())
atoms_joined_to_linker.append(idx_to_delete)
elif (len(intersect) > 1):
for idx_nei in list(intersect):
linker_bonds.append(mol_rw.GetBondBetweenAtoms(idx_to_delete, idx_nei).GetIdx())
atoms_joined_to_linker.append(idx_to_delete)
if (len(set(atoms_joined_to_linker)) != 2):
continue
for idx_to_delete in sorted(match, reverse=True):
mol_rw.RemoveAtom(idx_to_delete)
linker = Chem.Mol(mol_rw)
if (linker.GetNumHeavyAtoms() == linker_len):
mol_rw = Chem.RWMol(full_mol)
for idx_to_delete in sorted(linker_atoms, reverse=True):
mol_rw.RemoveAtom(idx_to_delete)
frags = Chem.Mol(mol_rw)
if (len(Chem.rdmolops.GetMolFrags(frags)) == 2):
fragmented_mol = Chem.FragmentOnBonds(mol_to_break, linker_bonds)
linker_to_return = Chem.Mol(fragmented_mol)
qp = Chem.AdjustQueryParameters()
qp.makeDummiesQueries = True
for f in starting_point.split('.'):
qfrag = Chem.AdjustQueryProperties(Chem.MolFromSmiles(f), qp)
linker_to_return = AllChem.DeleteSubstructs(linker_to_return, qfrag, onlyFrags=True)
if ((len(Chem.rdmolops.GetMolFrags(linker)) == 1) and (len(linker_bonds) == 2)):
Chem.Kekulize(linker_to_return, clearAromaticFlags=True)
if (len(Chem.rdmolops.GetMolFrags(linker_to_return)) > 1):
for frag in Chem.MolToSmiles(linker_to_return).split('.'):
if (Chem.MolFromSmiles(frag).GetNumHeavyAtoms() == linker_len):
return frag
return Chem.MolToSmiles(Chem.MolFromSmiles(Chem.MolToSmiles(linker_to_return)))
else:
fragmented_mol = Chem.MolFromSmiles(Chem.MolToSmiles(fragmented_mol), sanitize=False)
linker_to_return = AllChem.DeleteSubstructs(fragmented_mol, Chem.MolFromSmiles(starting_point))
poss_linker.append(Chem.MolToSmiles(linker_to_return))
if (len(poss_linker) == 1):
return poss_linker[0]
elif (len(poss_linker) == 0):
print('FAIL:', Chem.MolToSmiles(full_mol), Chem.MolToSmiles(clean_frag), starting_point)
return ''
else:
print('More than one poss linker. ', poss_linker)
return poss_linker[0] |
class GaussianNoiseLayer(nn.Module):
def __init__(self):
super(GaussianNoiseLayer, self).__init__()
def forward(self, x):
if (self.training == False):
return x
noise = Variable(torch.randn(x.size()).cuda(x.get_device()))
return (x + noise) |
def save_embeddings(filepath, filename, embeddings):
if (not os.path.exists(filepath)):
os.mkdir(filepath)
target_path = os.path.join(filepath, filename)
torch.save({'embeds': embeddings}, target_path)
return True |
class Net(network.resnet38d.Net):
def __init__(self):
super(Net, self).__init__()
self.f8_3 = torch.nn.Conv2d(512, 64, 1, bias=False)
self.f8_4 = torch.nn.Conv2d(1024, 128, 1, bias=False)
self.f8_5 = torch.nn.Conv2d(4096, 256, 1, bias=False)
self.f9 = torch.nn.Conv2d(448, 448, 1, bias=False)
torch.nn.init.kaiming_normal_(self.f8_3.weight)
torch.nn.init.kaiming_normal_(self.f8_4.weight)
torch.nn.init.kaiming_normal_(self.f8_5.weight)
torch.nn.init.xavier_uniform_(self.f9.weight, gain=4)
self.not_training = [self.conv1a, self.b2, self.b2_1, self.b2_2]
self.from_scratch_layers = [self.f8_3, self.f8_4, self.f8_5, self.f9]
self.predefined_featuresize = int((448 // 8))
(self.ind_from, self.ind_to) = pyutils.get_indices_of_pairs(radius=5, size=(self.predefined_featuresize, self.predefined_featuresize))
self.ind_from = torch.from_numpy(self.ind_from)
self.ind_to = torch.from_numpy(self.ind_to)
return
def forward(self, x, to_dense=False):
d = super().forward_as_dict(x)
f8_3 = F.elu(self.f8_3(d['conv4']))
f8_4 = F.elu(self.f8_4(d['conv5']))
f8_5 = F.elu(self.f8_5(d['conv6']))
x = F.elu(self.f9(torch.cat([f8_3, f8_4, f8_5], dim=1)))
if ((x.size(2) == self.predefined_featuresize) and (x.size(3) == self.predefined_featuresize)):
ind_from = self.ind_from
ind_to = self.ind_to
else:
(ind_from, ind_to) = pyutils.get_indices_of_pairs(5, (x.size(2), x.size(3)))
ind_from = torch.from_numpy(ind_from)
ind_to = torch.from_numpy(ind_to)
x = x.view(x.size(0), x.size(1), (- 1))
ff = torch.index_select(x, dim=2, index=ind_from.cuda(non_blocking=True))
ft = torch.index_select(x, dim=2, index=ind_to.cuda(non_blocking=True))
ff = torch.unsqueeze(ff, dim=2)
ft = ft.view(ft.size(0), ft.size(1), (- 1), ff.size(3))
aff = torch.exp((- torch.mean(torch.abs((ft - ff)), dim=1)))
if to_dense:
aff = aff.view((- 1)).cpu()
ind_from_exp = torch.unsqueeze(ind_from, dim=0).expand(ft.size(2), (- 1)).contiguous().view((- 1))
indices = torch.stack([ind_from_exp, ind_to])
indices_tp = torch.stack([ind_to, ind_from_exp])
area = x.size(2)
indices_id = torch.stack([torch.arange(0, area).long(), torch.arange(0, area).long()])
aff_mat = sparse.FloatTensor(torch.cat([indices, indices_id, indices_tp], dim=1), torch.cat([aff, torch.ones([area]), aff])).to_dense().cuda()
return aff_mat
else:
return aff
def get_parameter_groups(self):
groups = ([], [], [], [])
for m in self.modules():
if (isinstance(m, nn.Conv2d) or isinstance(m, nn.modules.normalization.GroupNorm)):
if m.weight.requires_grad:
if (m in self.from_scratch_layers):
groups[2].append(m.weight)
else:
groups[0].append(m.weight)
if ((m.bias is not None) and m.bias.requires_grad):
if (m in self.from_scratch_layers):
groups[3].append(m.bias)
else:
groups[1].append(m.bias)
return groups |
_errors
def prediction_tester(project, verbosity, passed, **kwargs) -> None:
sf.setLoggingLevel(verbosity)
project.predict(**kwargs) |
def set_double_double_solution(nvr, sol, vrblvl=0):
if (vrblvl > 0):
print('in set_double_double_solution, nvr :', nvr)
print('the solution :')
print(sol)
set_double_double_solutions(nvr, [sol])
phc = get_phcfun()
apars = (c_int32 * 2)()
apars[0] = c_int32(1)
apars[1] = c_int32(1)
apar = pointer(apars)
bvrb = pointer(c_int32(vrblvl))
ccc = pointer(c_double(0.0))
vrb = c_int32(vrblvl)
if (vrblvl > 0):
print('-> set_double_double_solution calls phc', end='')
retval = phc(861, apar, bvrb, ccc, vrb)
if (vrblvl > 0):
print(', return value :', retval)
return retval |
def bbox3d2roi(bbox_list):
rois_list = []
for (img_id, bboxes) in enumerate(bbox_list):
if (bboxes.size(0) > 0):
img_inds = bboxes.new_full((bboxes.size(0), 1), img_id)
rois = torch.cat([img_inds, bboxes], dim=(- 1))
else:
rois = torch.zeros_like(bboxes)
rois_list.append(rois)
rois = torch.cat(rois_list, 0)
return rois |
class InceptionV4Encoder(InceptionV4, EncoderMixin):
def __init__(self, stage_idxs, out_channels, depth=5, **kwargs):
super().__init__(**kwargs)
self._stage_idxs = stage_idxs
self._out_channels = out_channels
self._depth = depth
self._in_channels = 3
for m in self.modules():
if isinstance(m, nn.Conv2d):
if (m.kernel_size == (3, 3)):
m.padding = (1, 1)
if isinstance(m, nn.MaxPool2d):
m.padding = (1, 1)
del self.last_linear
def make_dilated(self, stage_list, dilation_list):
raise ValueError('InceptionV4 encoder does not support dilated mode due to pooling operation for downsampling!')
def get_stages(self):
return [nn.Identity(), self.features[:self._stage_idxs[0]], self.features[self._stage_idxs[0]:self._stage_idxs[1]], self.features[self._stage_idxs[1]:self._stage_idxs[2]], self.features[self._stage_idxs[2]:self._stage_idxs[3]], self.features[self._stage_idxs[3]:]]
def forward(self, x):
stages = self.get_stages()
features = []
for i in range((self._depth + 1)):
x = stages[i](x)
features.append(x)
return features
def load_state_dict(self, state_dict, **kwargs):
state_dict.pop('last_linear.bias')
state_dict.pop('last_linear.weight')
super().load_state_dict(state_dict, **kwargs) |
def chamfer_loss(pc1, pc2):
pc1 = pc1.permute(0, 2, 1)
pc2 = pc2.permute(0, 2, 1)
(chamfer_dist, _) = chamfer_distance(pc1, pc2)
return chamfer_dist |
def input_fn_builder(input_files, max_seq_length, is_training, num_cpu_threads=4):
def input_fn(params):
batch_size = params['batch_size']
name_to_features = {'input_ids': tf.FixedLenFeature([max_seq_length], tf.int64), 'target_ids': tf.FixedLenFeature([max_seq_length], tf.int64), 'input_mask': tf.FixedLenFeature([max_seq_length], tf.int64)}
if is_training:
d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))
d = d.repeat()
d = d.shuffle(buffer_size=len(input_files))
cycle_length = min(num_cpu_threads, len(input_files))
d = d.apply(tf.contrib.data.parallel_interleave(tf.data.TFRecordDataset, sloppy=is_training, cycle_length=cycle_length))
d = d.shuffle(buffer_size=100)
else:
d = tf.data.TFRecordDataset(input_files)
d = d.repeat()
d = d.apply(tf.contrib.data.map_and_batch((lambda record: _decode_record(record, name_to_features)), batch_size=batch_size, num_parallel_batches=num_cpu_threads, drop_remainder=True))
return d
return input_fn |
def main(args):
public_key = fetch_public_key(args.repo)
password = (args.password or getpass('PyPI password: '))
update_travis_deploy_password(encrypt(public_key, password.encode()))
print("Wrote encrypted password to .travis.yml -- you're ready to deploy") |
def find_ref_span(sent_offsets, target):
(start, end) = target
ref_start = (- 1)
ref_end = (- 1)
for (i, (sent_start, sent_end)) in enumerate(sent_offsets):
if ((start >= sent_start) and (start <= sent_end)):
ref_start = sent_start
if ((end >= sent_start) and (end <= sent_end)):
ref_end = sent_end
assert ((ref_end >= ref_start) and (ref_end >= 0) and (ref_start >= 0)), 'ref span is wrong {}'.format((ref_start, ref_end))
return (ref_start, ref_end) |
class GitProcessor(ProcessorMixin):
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'AutoImageProcessor'
tokenizer_class = 'AutoTokenizer'
def __init__(self, image_processor, tokenizer):
super().__init__(image_processor, tokenizer)
self.current_processor = self.image_processor
def __call__(self, text=None, images=None, return_tensors=None, **kwargs):
if ((text is None) and (images is None)):
raise ValueError('You have to specify either text or images. Both cannot be none.')
if (text is not None):
encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)
if (images is not None):
image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs)
if ((text is not None) and (images is not None)):
encoding['pixel_values'] = image_features.pixel_values
return encoding
elif (text is not None):
return encoding
else:
return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
def batch_decode(self, *args, **kwargs):
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
return self.tokenizer.decode(*args, **kwargs)
def model_input_names(self):
return ['input_ids', 'attention_mask', 'pixel_values'] |
def get_key(variable):
if (variable in KEYS):
return KEYS[variable]
else:
return generic_key(variable) |
class SelfAttention(layers.Layer):
def __init__(self, hidden_dim, output_dim, **kwargs):
self.hidden_dim = hidden_dim
self.output_dim = output_dim
super().__init__(**kwargs)
def get_config(self):
config = super().get_config().copy()
config.update({'hidden_dim': self.hidden_dim, 'output_dim': self.output_dim})
return config
def build(self, input_shape):
self.WQ = self.add_weight(name='WQ', shape=(input_shape[(- 1)], self.hidden_dim), initializer='uniform', trainable=True)
self.WK = self.add_weight(name='WK', shape=(input_shape[(- 1)], self.hidden_dim), initializer='uniform', trainable=True)
self.WV = self.add_weight(name='WV', shape=(input_shape[(- 1)], self.output_dim), initializer='uniform', trainable=True)
super().build(input_shape)
def call(self, inputs, **kwargs):
WQ = backend.dot(inputs, self.WQ)
WK = backend.dot(inputs, self.WK)
WV = backend.dot(inputs, self.WV)
WK = backend.permute_dimensions(WK, (0, 2, 1))
QK = backend.batch_dot(WQ, WK)
QK = (QK / (self.hidden_dim ** 0.5))
QK = backend.softmax(QK)
outputs = backend.batch_dot(QK, WV)
return outputs
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[1], self.output_dim) |
def load_examples(path: str, seed: int) -> List[Example]:
question_df = pd.read_csv(path)
random.seed(seed)
def shuffle_choices_and_create_example(row) -> Example:
list_choices = [row['Incorrect Answer 1'], row['Incorrect Answer 2'], row['Incorrect Answer 3'], row['Correct Answer']]
random.shuffle(list_choices)
example = Example(row.Question, list_choices[0], list_choices[1], list_choices[2], list_choices[3], list_choices.index(row['Correct Answer']))
return example
return [shuffle_choices_and_create_example(row) for (_, row) in question_df.iterrows()] |
def main(args):
train_args = vars(args).copy()
train_args['tree_subsample_frac'] = 1.0
train_args['tree_subsample_order'] = 'random'
train_args['instance_subsample_frac'] = 1.0
method_name = util.get_method_identifier(args.model_type, train_args)
in_dir = os.path.join(args.in_dir, args.custom_in_dir, args.dataset, args.in_scoring, f'fold{args.fold}', method_name)
method_name = util.get_method_identifier(args.model_type, vars(args))
out_dir = os.path.join(args.out_dir, args.custom_out_dir, args.dataset, args.out_scoring, f'fold{args.fold}', method_name)
os.makedirs(out_dir, exist_ok=True)
util.clear_dir(out_dir)
logger = util.get_logger(os.path.join(out_dir, 'log.txt'))
logger.info(args)
logger.info('\ntimestamp: {}'.format(datetime.now()))
(logfile, stdout, stderr) = util.stdout_stderr_to_log(os.path.join(out_dir, 'log+.txt'))
experiment(args, in_dir, out_dir, logger)
util.reset_stdout_stderr(logfile, stdout, stderr) |
class LevitImageProcessor(metaclass=DummyObject):
_backends = ['vision']
def __init__(self, *args, **kwargs):
requires_backends(self, ['vision']) |
class TopDownGlobalChaFuseReduce(HybridBlock):
def __init__(self, channels=64):
super(TopDownGlobalChaFuseReduce, self).__init__()
self.channels = channels
with self.name_scope():
self.feature_high = nn.HybridSequential(prefix='feature_high')
self.feature_high.add(nn.Conv2D(channels, kernel_size=1, strides=1, padding=0, dilation=1))
self.feature_high.add(nn.BatchNorm())
self.feature_high.add(nn.Activation('relu'))
self.global_att = nn.HybridSequential(prefix='global_att')
self.global_att.add(nn.GlobalAvgPool2D())
self.global_att.add(nn.Conv2D(self.channels, kernel_size=1, strides=1, padding=0))
self.global_att.add(nn.BatchNorm())
self.sigmoid = nn.Activation('sigmoid')
self.post = nn.HybridSequential(prefix='post')
self.post.add(nn.Conv2D(channels, kernel_size=3, strides=1, padding=1, dilation=1))
self.post.add(nn.BatchNorm())
self.post.add(nn.Activation('relu'))
def hybrid_forward(self, F, xh, xl):
xh = self.feature_high(xh)
xa = xh
ag = self.global_att(xa)
xa3 = self.sigmoid(ag)
xs = (xh + F.broadcast_mul(xl, xa3))
xs = self.post(xs)
return xs |
def _check_Parikh2014(mus, lams, views):
failed_check = [i for (i, (mu, lam, view)) in enumerate(zip(mus, lams, views)) if (mu < (lam / (np.linalg.norm(view) ** 2)))]
if failed_check:
raise ValueError(f'mu, lam, view not matching condition specified from Parikh 2014 (mu<lam/frobenius(representations)**2).Index of view(s) not meeting the condition: {failed_check}.') |
def test_decompose():
from cascades import run_cascade
pols = ['(x1-1)*(x1-2)*(x1-3)*(x1-4);', '(x1-1)*(x2-1)*(x2-2)*(x2-3);', '(x1-1)*(x1-2)*(x3-1)*(x3-2);', '(x1-1)*(x2-1)*(x3-1)*(x4-1);']
deco = run_cascade(4, 3, pols)
fadc = decompose(deco)
write_decomposition(fadc) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.