code stringlengths 101 5.91M |
|---|
def mdetr_clevr(pretrained=False, return_postprocessor=False):
model = _make_detr('resnet18', num_queries=25, qa_dataset='clevr', text_encoder='distilroberta-base')
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(url=' map_location='cpu', check_hash=True)
model.load_state_dict(checkpoint['model'])
if return_postprocessor:
return (model, PostProcess())
return model |
def main():
parser = HfArgumentParser((ModelArguments, DataArguments))
(model_args, data_args) = parser.parse_args_into_dataclasses()
logging.basicConfig(format='%(asctime)s-%(levelname)s-%(name)s- %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
model_args: ModelArguments
data_args: DataArguments
logger.info('Model parameters %s', model_args)
logger.info('Data parameters %s', data_args)
set_seed(2022)
config = AutoConfig.from_pretrained(model_args.model_name_or_path)
config.MCQ_M = model_args.MCQ_M
config.MCQ_K = model_args.MCQ_K
if (model_args.similarity_metric is not None):
config.similarity_metric = model_args.similarity_metric
if (model_args.pooling is not None):
config.pooling = model_args.pooling
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path)
dense_encoder = AutoDense.from_pretrained(model_args.model_name_or_path, config=config)
repconc = RepCONC(config, dense_encoder, use_constraint=False, sk_epsilon=None, sk_iters=None)
corpus_embeds = np.load(data_args.input_corpus_embed_path)
(repconc, index) = warmup_from_embeds(corpus_embeds, repconc)
os.makedirs(data_args.output_model_dir, exist_ok=True)
repconc.save_pretrained(data_args.output_model_dir)
tokenizer.save_pretrained(data_args.output_model_dir)
os.makedirs(os.path.dirname(data_args.output_index_path), exist_ok=True)
os.makedirs(os.path.dirname(data_args.output_corpus_ids_path), exist_ok=True)
faiss.write_index(faiss.downcast_index(index.index), data_args.output_index_path)
corpus_ids = np.load(data_args.input_corpus_ids_path)
np.save(data_args.output_corpus_ids_path, corpus_ids) |
def test_SB2():
orb = orbit.SB2(q, K, e, omega, P, T0, gamma, dates)
vels = orb.get_velocities()
(fig, ax) = plt.subplots(nrows=1)
ax.axhline(gamma, color='0.5', ls=':')
ax.plot(dates, vels[0])
ax.plot(dates, vels[1])
ax.set_xlabel('JD')
ax.set_ylabel('$v\\,\\mathrm{km/s}$')
fig.savefig((outdir + 'SB2.png'), dpi=300) |
def find_forward_params_input_dependent_flow(x_loader: torch.utils.data.dataloader, FLOW: Flow, optimizer_fn=None, num_epochs=None, seed=0, verbose=0, verbose_level=0, noise_var=0.0) -> Flow:
if (optimizer_fn is None):
warnings.warn('Using default optimizer (optim.Adam(trainable_params, lr=0.01))', Warning)
optimizer_fn = (lambda trainable_params: optim.Adam(trainable_params, lr=0.01))
if (num_epochs is None):
warnings.warn('Using default number of epochs (100)', Warning)
num_epochs = 100
np.random.seed(seed)
found_flows = []
found_min_losses = []
found_losses = []
trainable_params = []
for (n, p) in FLOW.named_parameters():
trainable_params.append(p)
optimizer = optimizer_fn(trainable_params)
state = FLOW.training
FLOW.train()
FLOW.to(cg.device)
loss_acc = 0.0
for e in range(num_epochs):
loss_acc = 0.0
for (x, y) in x_loader:
(x, y) = (x.to(cg.device), y.to(cg.device))
if (type(noise_var) is float):
if (noise_var > 0.0):
x = (x + (torch.zeros_like(x).normal_() * numpy.sqrt(noise_var)))
elif (type(noise_var) is list):
idx = numpy.random.randint(len(noise_var))
x = (x + (torch.zeros_like(x).normal_() * numpy.sqrt(noise_var[idx])))
else:
raise NotImplementedError()
optimizer.zero_grad()
loss = FLOW.forward_initializer(x)
loss.backward()
optimizer.step()
optimizer.zero_grad()
print('Epoch {} Loss {}'.format(e, loss.detach().item()), end='\r')
loss_acc += loss.item()
print('\n')
FLOW.training = state
FLOW.turn_off_initializer_parameters()
FLOW.to('cpu')
return (FLOW, loss_acc) |
class DynamicPairNorm(nn.Module):
def __init__(self):
super(DynamicPairNorm, self).__init__()
def __TransFeauture(self): |
class DreamerLearner():
def __init__(self, config):
self.config = config
self.model = DreamerModel(config).to(config.DEVICE).eval()
self.actor = Actor(config.FEAT, config.ACTION_SIZE, config.ACTION_HIDDEN, config.ACTION_LAYERS).to(config.DEVICE)
self.critic = AugmentedCritic(config.FEAT, config.HIDDEN).to(config.DEVICE)
initialize_weights(self.model, mode='xavier')
initialize_weights(self.actor)
initialize_weights(self.critic, mode='xavier')
self.old_critic = deepcopy(self.critic)
self.replay_buffer = DreamerMemory(config.CAPACITY, config.SEQ_LENGTH, config.ACTION_SIZE, config.IN_DIM, 2, config.DEVICE, config.ENV_TYPE)
self.entropy = config.ENTROPY
self.step_count = (- 1)
self.cur_update = 1
self.accum_samples = 0
self.total_samples = 0
self.init_optimizers()
self.n_agents = 2
Path(config.LOG_FOLDER).mkdir(parents=True, exist_ok=True)
global wandb
import wandb
wandb.init(dir=config.LOG_FOLDER)
def init_optimizers(self):
self.model_optimizer = torch.optim.Adam(self.model.parameters(), lr=self.config.MODEL_LR)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.config.ACTOR_LR, weight_decay=1e-05)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=self.config.VALUE_LR)
def params(self):
return {'model': {k: v.cpu() for (k, v) in self.model.state_dict().items()}, 'actor': {k: v.cpu() for (k, v) in self.actor.state_dict().items()}, 'critic': {k: v.cpu() for (k, v) in self.critic.state_dict().items()}}
def step(self, rollout):
if (self.n_agents != rollout['action'].shape[(- 2)]):
self.n_agents = rollout['action'].shape[(- 2)]
self.accum_samples += len(rollout['action'])
self.total_samples += len(rollout['action'])
self.replay_buffer.append(rollout['observation'], rollout['action'], rollout['reward'], rollout['done'], rollout['fake'], rollout['last'], rollout.get('avail_action'))
self.step_count += 1
if (self.accum_samples < self.config.N_SAMPLES):
return
if (len(self.replay_buffer) < self.config.MIN_BUFFER_SIZE):
return
self.accum_samples = 0
sys.stdout.flush()
for i in range(self.config.MODEL_EPOCHS):
samples = self.replay_buffer.sample(self.config.MODEL_BATCH_SIZE)
self.train_model(samples)
for i in range(self.config.EPOCHS):
samples = self.replay_buffer.sample(self.config.BATCH_SIZE)
self.train_agent(samples)
def train_model(self, samples):
self.model.train()
loss = model_loss(self.config, self.model, samples['observation'], samples['action'], samples['av_action'], samples['reward'], samples['done'], samples['fake'], samples['last'])
self.apply_optimizer(self.model_optimizer, self.model, loss, self.config.GRAD_CLIP)
self.model.eval()
def train_agent(self, samples):
(actions, av_actions, old_policy, imag_feat, returns) = actor_rollout(samples['observation'], samples['action'], samples['last'], self.model, self.actor, (self.critic if (self.config.ENV_TYPE == Env.STARCRAFT) else self.old_critic), self.config)
adv = (returns.detach() - self.critic(imag_feat).detach())
if (self.config.ENV_TYPE == Env.STARCRAFT):
adv = advantage(adv)
wandb.log({'Agent/Returns': returns.mean()})
for epoch in range(self.config.PPO_EPOCHS):
inds = np.random.permutation(actions.shape[0])
step = 2000
for i in range(0, len(inds), step):
self.cur_update += 1
idx = inds[i:(i + step)]
loss = actor_loss(imag_feat[idx], actions[idx], (av_actions[idx] if (av_actions is not None) else None), old_policy[idx], adv[idx], self.actor, self.entropy)
self.apply_optimizer(self.actor_optimizer, self.actor, loss, self.config.GRAD_CLIP_POLICY)
self.entropy *= self.config.ENTROPY_ANNEALING
val_loss = value_loss(self.critic, imag_feat[idx], returns[idx])
if (np.random.randint(20) == 9):
wandb.log({'Agent/val_loss': val_loss, 'Agent/actor_loss': loss})
self.apply_optimizer(self.critic_optimizer, self.critic, val_loss, self.config.GRAD_CLIP_POLICY)
if ((self.config.ENV_TYPE == Env.FLATLAND) and ((self.cur_update % self.config.TARGET_UPDATE) == 0)):
self.old_critic = deepcopy(self.critic)
def apply_optimizer(self, opt, model, loss, grad_clip):
opt.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip)
opt.step() |
def convert_str_dtype_to_torch_dtype(str_dtype: Optional[str]):
if (str_dtype in ('single', 'float32', 'float', 'fp32', None)):
return torch.float
elif (str_dtype in ('half', 'float16', 'fp16')):
return torch.float16
elif (str_dtype in ('bfloat16', 'bf16')):
return torch.bfloat16
elif (str_dtype in ('double', 'float64')):
return torch.float64
else:
raise ValueError(f'Unknown dtype: {str_dtype}') |
def _project_vertices(v, w, h, cam_r, cam_t):
V = ch.array(v)
U = ProjectPointsOrthogonal(v=V, f=[w, w], c=[(w / 2.0), (h / 2.0)], k=ch.zeros(5), t=cam_t, rt=cam_r)
return U |
class MyConcatDataset(ConcatDataset):
def __init__(self, datasets):
super(MyConcatDataset, self).__init__(datasets)
self.train = datasets[0].train
def set_scale(self, idx_scale):
for d in self.datasets:
if hasattr(d, 'set_scale'):
d.set_scale(idx_scale) |
class Conv1d(nn.Conv1d):
def __init__(self, *args, **kwargs):
if ((len(args) == 2) and ('kernel_size' not in kwargs.keys())):
super(Conv1d, self).__init__(*args, 1, **kwargs)
else:
super(Conv1d, self).__init__(*args, **kwargs) |
class TemplateDataset(BaseDataset):
def modify_commandline_options(parser, is_train):
parser.add_argument('--new_dataset_option', type=float, default=1.0, help='new dataset option')
parser.set_defaults(max_dataset_size=10, new_dataset_option=2.0)
return parser
def __init__(self, opt):
BaseDataset.__init__(self, opt)
self.image_paths = []
self.transform = get_transform(opt)
def __getitem__(self, index):
path = 'temp'
data_A = None
data_B = None
return {'data_A': data_A, 'data_B': data_B, 'path': path}
def __len__(self):
return len(self.image_paths) |
def __get_data_folder_path():
homedir = os_path.expanduser('~')
dpath = path_join(homedir, '.pydataset/resources/rdata/')
if os_path.exists(dpath):
return dpath
else:
__setup_db()
return __get_data_folder_path() |
def apply_random_overlay_triangle(img, max_alpha, mask=None, rnd_state=None):
if (rnd_state is None):
rnd_state = np.random
(h, w, c) = img.shape
pt1 = [rnd_state.randint(w), rnd_state.randint(h)]
pt2 = [rnd_state.randint(w), rnd_state.randint(h)]
pt3 = [rnd_state.randint(w), rnd_state.randint(h)]
alpha = (rnd_state.uniform() * max_alpha)
tri_mask = cv2.fillPoly(np.zeros_like(img), [np.array([pt1, pt2, pt3], np.int32)], ((alpha,) * c))
if (rnd_state.randint(2) == 0):
result = np.clip((img + tri_mask), 0, 1)
else:
result = np.clip((img - tri_mask), 0, 1)
if (mask is not None):
result = ((img * (1 - mask)) + (result * mask))
return result |
class LipNormLinear(nn.Linear):
def __init__(self, in_features, out_features, bias=True, coeff=0.97, domain=float('inf'), codomain=float('inf'), local_constraint=True, **unused_kwargs):
del unused_kwargs
super(LipNormLinear, self).__init__(in_features, out_features, bias)
self.coeff = coeff
self.domain = domain
self.codomain = codomain
self.local_constraint = local_constraint
(max_across_input_dims, self.norm_type) = operator_norm_settings(self.domain, self.codomain)
self.max_across_dim = (1 if max_across_input_dims else 0)
with torch.no_grad():
w_scale = _norm_except_dim(self.weight, self.norm_type, dim=self.max_across_dim)
if (not self.local_constraint):
w_scale = w_scale.max()
self.scale = nn.Parameter(_logit((w_scale / self.coeff)))
def compute_weight(self):
w_scale = _norm_except_dim(self.weight, self.norm_type, dim=self.max_across_dim)
if (not self.local_constraint):
w_scale = w_scale.max()
return (((self.weight / w_scale) * torch.sigmoid(self.scale)) * self.coeff)
def forward(self, input):
weight = self.compute_weight()
return F.linear(input, weight, self.bias)
def extra_repr(self):
s = super(LipNormLinear, self).extra_repr()
return (s + ', coeff={}, domain={}, codomain={}, local={}'.format(self.coeff, self.domain, self.codomain, self.local_constraint)) |
def get_parameters():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--data', metavar='DIR', default='', help='Path to Complete Point Cloud Data Set')
parser.add_argument('-s', '--split_value', default=0.9, help='Ratio of train and test data split')
parser.add_argument('-n', '--dataName', metavar='Data Set Name', default='shapenet')
parser.add_argument('--pretrained', default='')
parser.add_argument('-me', '--model_encoder', default='encoder_pointnet', help='Chose Your Encoder Model Here', choices=['encoder_pointnet'])
parser.add_argument('-md', '--model_decoder', default='decoder_sonet', help='Chose Your Decoder Model Here', choices=['decoder_sonet'])
parser.add_argument('--output_fc_pc_num', type=int, default=256, help='# of fc decoder output points')
parser.add_argument('--output_conv_pc_num', type=int, default=4096, help='# of conv decoder output points')
parser.add_argument('--feature_num', type=int, default=1024, help='length of encoded feature')
parser.add_argument('--activation', type=str, default='relu', help='activation function: relu, elu')
parser.add_argument('--normalization', type=str, default='batch', help='normalization function: batch, instance')
parser.add_argument('--name', type=str, default='GFV', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size')
parser.add_argument('--display_id', type=int, default=2000, help='window id of the web display')
parser.add_argument('--port_id', type=int, default=8099, help='Port id for browser')
parser.add_argument('--print_freq', type=int, default=10, help='Print Frequency')
parser.add_argument('--gpu_id', type=int, default=0, help='gpu ids: e.g. 0, 1. -1 is no GPU')
parser.add_argument('--max_action', type=float, default=10)
parser.add_argument('--model', type=str, default='sagan', choices=['sagan', 'qgan'])
parser.add_argument('--adv_loss', default='wgan-gp', type=str, choices=['wgan-gp', 'hinge'])
parser.add_argument('--imsize', default=32, type=int)
parser.add_argument('--imsize_new', default=32, type=int)
parser.add_argument('--g_num', type=int, default=5)
parser.add_argument('--z_dim', type=int, default=1)
parser.add_argument('--g_conv_dim', type=int, default=64)
parser.add_argument('--d_conv_dim', type=int, default=64)
parser.add_argument('--lambda_gp', type=float, default=10)
parser.add_argument('--version', default='sagan_celeb', type=str)
parser.add_argument('--total_step', type=int, default=1000000, help='how many times to update the generator')
parser.add_argument('--d_iters', type=float, default=5)
parser.add_argument('--batch_size', default=50, type=int)
parser.add_argument('--num_workers', type=int, default=2)
parser.add_argument('--g_lr', type=float, default=0.0001)
parser.add_argument('--d_lr', type=float, default=0.0001)
parser.add_argument('--lr_decay', type=float, default=0.0)
parser.add_argument('--beta1', type=float, default=0.5)
parser.add_argument('--beta2', type=float, default=0.9)
parser.add_argument('--pretrained_model', type=int, default=None)
parser.add_argument('--train', type=str2bool, default=True)
parser.add_argument('--parallel', type=str2bool, default=False)
parser.add_argument('--dataset', default='celeb', type=str, choices=['lsun', 'celeb'])
parser.add_argument('--use_tensorboard', type=str2bool, default=False)
parser.add_argument('--image_path', type=str, default='./data')
parser.add_argument('--log_path', type=str, default='./logs')
parser.add_argument('--model_save_path', type=str, default='./models')
parser.add_argument('--sample_path', type=str, default='./samples')
parser.add_argument('--attn_path', type=str, default='./attn')
parser.add_argument('--log_step', type=int, default=100)
parser.add_argument('--sample_step', type=int, default=100)
parser.add_argument('--model_save_step', type=float, default=10.0)
return parser.parse_args() |
def cpu_thread():
global CPU_IN_QUEUE
global GPU_IN_QUEUE
while True:
t = time()
(index, image_info) = CPU_IN_QUEUE.get()
images = load_images(image_info)
GPU_IN_QUEUE.put((index, images), block=True)
CPU_IN_QUEUE.task_done()
print('Loaded images in {}s.'.format((time() - t))) |
def train(model: ExactGPModel, train_x, train_y, training_iter=900, lr_scheduler_step=300, lr=1):
training_iter = training_iter
model.train()
model.likelihood.train()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
scheduler = StepLR(optimizer, step_size=lr_scheduler_step, gamma=0.1)
mll = gpytorch.mlls.ExactMarginalLogLikelihood(model.likelihood, model)
for i in range(training_iter):
optimizer.zero_grad()
output = model(train_x)
loss = (- mll(output, train_y))
loss.backward(retain_graph=True)
optimizer.step()
scheduler.step()
if ((i % 100) == 99):
try:
lengthscale = model.covar_module.base_kernel.lengthscale.item()
variance = model.covar_module.outputscale.item()
except:
lengthscale = model.covar_module.measure.lengthscale.item()
variance = model.covar_module.measure.variance.item()
if (type(model.mean_module) is gpytorch.means.ConstantMean):
mean = model.mean_module.constant.item()
if (type(model.mean_module) is gpytorch.means.ZeroMean):
mean = 0
print(('Iter %d/%d - Loss: %.3f lengthscale: %.3f variance: %.3f noise: %.3f, mean: %3f' % ((i + 1), training_iter, loss.item(), lengthscale, variance, model.likelihood.noise.item(), mean))) |
def check_details(line, spm_ids, tok_ids, slow, fast):
for (i, (spm_id, tok_id)) in enumerate(zip(spm_ids, tok_ids)):
if (spm_id != tok_id):
break
first = i
for (i, (spm_id, tok_id)) in enumerate(zip(reversed(spm_ids), reversed(tok_ids))):
if (spm_id != tok_id):
break
last = (len(spm_ids) - i)
spm_diff = spm_ids[first:last]
tok_diff = tok_ids[first:last]
if check_diff(spm_diff, tok_diff, slow, fast):
return True
if check_LTR_mark(line, first, fast):
return True
if ((last - first) > 5):
spms = Counter(spm_ids[first:last])
toks = Counter(tok_ids[first:last])
removable_tokens = {spm_ for (spm_, si) in spms.items() if (toks.get(spm_, 0) == si)}
min_width = 3
for i in range(((last - first) - min_width)):
if all(((spm_ids[((first + i) + j)] in removable_tokens) for j in range(min_width))):
possible_matches = [k for k in range(((last - first) - min_width)) if (tok_ids[(first + k):((first + k) + min_width)] == spm_ids[(first + i):((first + i) + min_width)])]
for j in possible_matches:
if (check_diff(spm_ids[first:(first + i)], tok_ids[first:(first + j)], sp, tok) and check_details(line, spm_ids[(first + i):last], tok_ids[(first + j):last], slow, fast)):
return True
print(f'Spm: {[fast.decode([spm_ids[i]]) for i in range(first, last)]}')
try:
print(f'Tok: {[fast.decode([tok_ids[i]]) for i in range(first, last)]}')
except Exception:
pass
ok_start = fast.decode(spm_ids[:first])
ok_end = fast.decode(spm_ids[last:])
wrong = fast.decode(spm_ids[first:last])
print()
print(wrong)
return False |
def get_metric_func(metric):
if (metric == 'auc'):
return roc_auc_score
if (metric == 'prc'):
return prc_auc
if (metric == 'rmse'):
return rmse
if (metric == 'mae'):
return mean_absolute_error
raise ValueError(f'Metric "{metric}" not supported.') |
class CameraRailBoundingBox():
def __init__(self, x_low, x_high, y_low, y_high, z_height, phi=0):
x_mid = ((x_high + x_low) / 2)
y_mid = ((y_high + y_low) / 2)
self.center = np.array([x_mid, y_mid])
self.radius = max(((x_high - x_low) / 2), ((y_high - y_low) / 2))
self.z = z_height
self.phi = phi
self.position = self._get_coords()
self.vel = 0
def get_coords(self, d_phi, factor=0.1):
self.phi += np.clip(d_phi, (((- 2) * np.pi) / 50), ((2 * np.pi) / 50))
return self._get_coords()
def _get_coords(self) -> list:
x = (np.cos(self.phi) * self.radius)
y = (np.sin(self.phi) * self.radius)
return [(self.center[0] + x), (self.center[1] + y), self.z] |
def xyz_from_depth(depth_image, depth_intrinsic, depth_scale=1000.0):
fx = depth_intrinsic[(0, 0)]
fy = depth_intrinsic[(1, 1)]
cx = depth_intrinsic[(0, 2)]
cy = depth_intrinsic[(1, 2)]
(y, x) = np.meshgrid(range(depth_image.shape[0]), range(depth_image.shape[1]), sparse=False, indexing='ij')
X = (((x - cx) * depth_image) / (fx * depth_scale))
Y = (((y - cy) * depth_image) / (fy * depth_scale))
xyz = np.stack([X, Y, (depth_image / depth_scale)], axis=2)
xyz[(depth_image == 0)] = np.nan
return xyz |
def flatten_grads(var_list, grads, clip_grad_range=None):
if (clip_grad_range is not None):
return tf.concat([tf.reshape(tf.clip_by_value(grad, *clip_grad_range), [U.numel(v)]) for (v, grad) in zip(var_list, grads)], 0)
else:
return tf.concat([tf.reshape(grad, [U.numel(v)]) for (v, grad) in zip(var_list, grads)], 0) |
class JsonInputReader(BaseInputReader):
def __init__(self, labels_path: str, bio_path: str, tokenizer: BertTokenizer, logger: Logger=None):
super().__init__(labels_path, bio_path, tokenizer, logger)
def read(self, dataset_paths):
for (dataset_label, dataset_path) in dataset_paths.items():
dataset = Dataset(dataset_label, self)
self._parse_dataset(dataset_path, dataset)
self._datasets[dataset_label] = dataset
self._context_size = self._calc_context_size(self._datasets.values())
def _parse_dataset(self, dataset_path, dataset):
documents = json.load(open(dataset_path))
for document in tqdm(documents, desc=("Parse dataset '%s'" % dataset.label)):
self._parse_document(document, dataset)
def _parse_document(self, doc, dataset) -> Document:
jtokens = doc['tokens']
jrelations = doc['relations']
jtags = doc['tags']
doc_id = doc['orig_id']
if (dataset.label != 'train'):
self._update_bio_file_info(jtokens, jtags)
(doc_tokens, doc_encoding) = self._parse_tokens(doc_id, jtokens, dataset)
entities = self._parse_entities(doc_id, jtags, doc_tokens, dataset)
relations = self._parse_relations(doc_id, jrelations, entities, dataset)
document = dataset.create_document(doc_id, doc_tokens, entities, relations, doc_encoding)
return document
def _parse_tokens(self, doc_id, jtokens, dataset):
doc_tokens = []
if (doc_id in dataset._documents):
return (dataset._documents[doc_id]._tokens, dataset._documents[doc_id].encoding)
doc_encoding = [self._tokenizer.convert_tokens_to_ids('[CLS]')]
for (i, token_phrase) in enumerate(jtokens):
token_encoding = self._tokenizer.encode(token_phrase, add_special_tokens=False)
(span_start, span_end) = (len(doc_encoding), (len(doc_encoding) + len(token_encoding)))
token = dataset.create_token(i, span_start, span_end, token_phrase)
doc_tokens.append(token)
doc_encoding += token_encoding
doc_encoding += [self._tokenizer.convert_tokens_to_ids('[SEP]')]
return (doc_tokens, doc_encoding)
def _parse_entities(self, doc_id, jtags, doc_tokens, dataset) -> List[Entity]:
entities = []
entity_labels = []
for (idx, jtag) in enumerate(jtags):
if (not jtag.startswith('O')):
entity_labels.append(self._entity_labels[jtag])
if (jtag.startswith('B') or jtag.startswith('U')):
start = idx
if (jtag.startswith('U') or jtag.startswith('L')):
entity_type = self._entity_types[jtag[2:]]
end = (idx + 1)
tokens = doc_tokens[start:end]
phrase = ' '.join([t.phrase for t in tokens])
entity = dataset.create_entity(doc_id, entity_type, entity_labels, tokens, phrase)
entities.append(entity)
entity_labels = []
return entities
def _parse_pred_entities(self, jpreds, doc_tokens, dataset) -> List[Entity]:
entities = []
entity_labels = []
for (idx, jpred) in enumerate(jpreds):
if (not jpred.startswith('O')):
entity_labels.append(self._entity_labels[jpred])
if (jpred.startswith('B') or jpred.startswith('U')):
start = idx
if (jpred.startswith('U') or jpred.startswith('L')):
entity_type = self._entity_types[jpred[2:]]
end = (idx + 1)
tokens = doc_tokens[start:end]
phrase = ' '.join([t.phrase for t in tokens])
entity = dataset.create_pred_entity(entity_type, entity_labels, tokens, phrase)
entities.append(entity)
entity_labels = []
return entities
def _parse_relations(self, doc_id, jrelations, entities, dataset) -> List[Relation]:
relations = []
for jrelation in jrelations:
relation_type = self._relation_types[jrelation['type']]
head_idx = jrelation['head']
tail_idx = jrelation['tail']
head = entities[head_idx]
tail = entities[tail_idx]
if (head.tokens[0].index < tail.tokens[0].index):
relation_label = self._relation_labels[('R-' + jrelation['type'])]
else:
relation_label = self._relation_labels[('L-' + jrelation['type'])]
relation = dataset.create_relation(doc_id, relation_type, relation_label, head_entity=head, tail_entity=tail)
relations.append(relation)
return relations
def _update_bio_file_info(self, tokens, tags):
bio_tags = []
for t in tags:
if t.startswith('U'):
bio_tags.append(('B' + t[1:]))
elif t.startswith('L'):
bio_tags.append(('I' + t[1:]))
else:
bio_tags.append(t)
self._bio_file['tokens'].append(tokens)
self._bio_file['tags'].append(bio_tags)
return |
class BatchNorm1d(_BNBase):
def __init__(self, in_size: int, *, name: str=''):
super().__init__(in_size, batch_norm=nn.BatchNorm1d, name=name) |
class Observation(NamedTuple):
grid: chex.Array
step_count: chex.Numeric
action_mask: chex.Array |
class DownsampleBlock(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, bias, pad, act_fun, downsample_mode):
super(DownsampleBlock, self).__init__()
self.op = nn.Sequential(conv(in_f=in_channel, out_f=out_channel, kernel_size=kernel_size, stride=2, bias=bias, pad=pad, downsample_mode=downsample_mode), bn(num_features=out_channel), act(act_fun=act_fun))
def forward(self, data):
return self.op(data) |
class eval_classifier_optimized_graph():
def run(self):
from neural_compressor import set_random_seed
set_random_seed(9527)
if args.tune:
from neural_compressor import quantization
from neural_compressor.config import PostTrainingQuantConfig
from neural_compressor.utils.create_obj_from_config import create_dataloader
calib_dataloader_args = {'batch_size': 10, 'dataset': {'ImageRecord': {'root': args.dataset_location}}, 'transform': {'BilinearImagenet': {'height': 299, 'width': 299}}, 'filter': None}
calib_dataloader = create_dataloader('tensorflow', calib_dataloader_args)
eval_dataloader_args = {'batch_size': 32, 'dataset': {'ImageRecord': {'root': args.dataset_location}}, 'transform': {'BilinearImagenet': {'height': 299, 'width': 299}}, 'filter': None}
eval_dataloader = create_dataloader('tensorflow', eval_dataloader_args)
conf = PostTrainingQuantConfig(calibration_sampling_size=[50, 100])
from neural_compressor import Metric
top1 = Metric(name='topk', k=1)
q_model = quantization.fit(args.input_graph, conf=conf, calib_dataloader=calib_dataloader, eval_dataloader=eval_dataloader, eval_metric=top1)
q_model.save(args.output_graph)
if args.benchmark:
from neural_compressor.utils.create_obj_from_config import create_dataloader
dataloader_args = {'batch_size': args.batch_size, 'dataset': {'ImageRecord': {'root': args.dataset_location}}, 'transform': {'BilinearImagenet': {'height': 299, 'width': 299}}, 'filter': None}
dataloader = create_dataloader('tensorflow', dataloader_args)
from neural_compressor import METRICS
metrics = METRICS('tensorflow')
top1 = metrics['topk']()
def eval(model):
return evaluate(model, dataloader, top1)
if (args.mode == 'performance'):
from neural_compressor.benchmark import fit
from neural_compressor.config import BenchmarkConfig
conf = BenchmarkConfig(warmup=10, iteration=100, cores_per_instance=4, num_of_instance=1)
fit(args.input_graph, conf, b_dataloader=dataloader)
elif (args.mode == 'accuracy'):
acc_result = eval(args.input_graph)
print(('Batch size = %d' % dataloader.batch_size))
print(('Accuracy: %.5f' % acc_result)) |
class CosineWithWarmup(_LRScheduler):
def __init__(self, optimizer, T_max, warmup_epochs, eta_min=0):
self.cosine_scheduler = CosineAnnealingLR(optimizer, (T_max - warmup_epochs), eta_min)
self.warmup_epochs = warmup_epochs
self.finished = False
super().__init__(optimizer=optimizer)
def get_lr(self):
if (self.last_epoch == 0):
return [(base_lr / self.warmup_epochs) for base_lr in self.base_lrs]
elif (0 < self.last_epoch < self.warmup_epochs):
return [((self.last_epoch * base_lr) / self.warmup_epochs) for base_lr in self.base_lrs]
elif (self.last_epoch == self.warmup_epochs):
self.finished = True
return self.base_lrs
else:
return self.cosine_scheduler.get_lr()
def step(self, epoch=None):
if self.finished:
if (epoch is None):
self.cosine_scheduler.step(None)
else:
self.cosine_scheduler.step((epoch - self.warmup_epochs))
else:
return super(CosineWithWarmup, self).step(epoch) |
def get_anno_path(split):
if (split == phase.TRAIN.value):
seq = cfg.PATH.ANNOTATIONS_TRAIN
elif (split == phase.VAL.value):
seq = cfg.PATH.ANNOTATIONS_VAL
elif (split == phase.TRAINVAL.value):
seq = cfg.PATH.ANNOTATIONS_TRAINVAL
elif (split == phase.TRAINTESTDEVOT.value):
seq = cfg.PATH.ANNOTATIONS_TRAINTESTDEVOT
else:
raise ValueError(('not support %s' % split))
return seq |
def cal_dtw(shortest_distances, prediction, reference, success=None, threshold=3.0):
dtw_matrix = (np.inf * np.ones(((len(prediction) + 1), (len(reference) + 1))))
dtw_matrix[0][0] = 0
for i in range(1, (len(prediction) + 1)):
for j in range(1, (len(reference) + 1)):
best_previous_cost = min(dtw_matrix[(i - 1)][j], dtw_matrix[i][(j - 1)], dtw_matrix[(i - 1)][(j - 1)])
cost = shortest_distances[prediction[(i - 1)]][reference[(j - 1)]]
dtw_matrix[i][j] = (cost + best_previous_cost)
dtw = dtw_matrix[len(prediction)][len(reference)]
ndtw = np.exp(((- dtw) / (threshold * len(reference))))
if (success is None):
success = float((shortest_distances[prediction[(- 1)]][reference[(- 1)]] < threshold))
sdtw = (success * ndtw)
return {'DTW': dtw, 'nDTW': ndtw, 'SDTW': sdtw} |
def save_checkpoint(state, is_best, dirpath, epoch):
filename = 'checkpoint.{}.ckpt'.format(epoch)
checkpoint_path = os.path.join(dirpath, filename)
best_path = os.path.join(dirpath, 'best.ckpt')
torch.save(state, checkpoint_path)
LOG.info(('--- checkpoint saved to %s ---' % checkpoint_path))
if is_best:
shutil.copyfile(checkpoint_path, best_path)
LOG.info(('--- checkpoint copied to %s ---' % best_path)) |
def convert_tf_weight_name_to_pt_weight_name(tf_name, start_prefix_to_remove=''):
tf_name = tf_name.replace(':0', '')
tf_name = re.sub('/[^/]*___([^/]*)/', '/\\1/', tf_name)
tf_name = tf_name.replace('_._', '/')
tf_name = re.sub('//+', '/', tf_name)
tf_name = tf_name.split('/')
if (len(tf_name) > 1):
tf_name = tf_name[1:]
transpose = bool(((tf_name[(- 1)] == 'kernel') or ('emb_projs' in tf_name) or ('out_projs' in tf_name)))
if ((tf_name[(- 1)] == 'kernel') or (tf_name[(- 1)] == 'embeddings') or (tf_name[(- 1)] == 'gamma')):
tf_name[(- 1)] = 'weight'
if (tf_name[(- 1)] == 'beta'):
tf_name[(- 1)] = 'bias'
tf_name = '.'.join(tf_name)
if start_prefix_to_remove:
tf_name = tf_name.replace(start_prefix_to_remove, '', 1)
return (tf_name, transpose) |
('Please use `bigdl.chronos.data.TSDataset` instead.')
class TimeMergeImputor(BaseImputation):
def __init__(self, time_interval, timestamp_column_name, mode=''):
self.time_interval = time_interval
self.timestamp_column_name = timestamp_column_name
self.mode = mode
def impute(self, input_df):
import pyspark.sql.functions as f
ori_column_name = (self.timestamp_column_name + '_ori')
df = input_df.withColumnRenamed(self.timestamp_column_name, ori_column_name)
merged_df = df.withColumn('add_seconds', ((f.round((f.second(ori_column_name) / self.time_interval)) * self.time_interval) - f.second(ori_column_name))).withColumn(self.timestamp_column_name, f.from_unixtime((f.unix_timestamp(ori_column_name) + f.col('add_seconds')))).drop('add_seconds')
if (self.mode == 'max'):
merged_df = merged_df.groupby(self.timestamp_column_name).max()
elif (self.mode == 'min'):
merged_df = merged_df.groupby(self.timestamp_column_name).min()
elif (self.mode == 'mean'):
merged_df = merged_df.groupby(self.timestamp_column_name).mean()
elif (self.mode == 'sum'):
merged_df = merged_df.groupby(self.timestamp_column_name).sum()
elif (self.mode == ''):
merged_df
else:
from bigdl.nano.utils.common import invalidInputError
invalidInputError(False, 'Currently only support max/min/mean/sum mode')
return merged_df |
def PGD_perturb(sess, model, gradient, x, y, num_step, step_size, max_perturb):
perturb = np.zeros(x.shape)
for num in range(num_step):
perturb += (step_size * np.sign(sess.run(gradient, feed_dict={model.x_input: (x + perturb), model.y_input: y})))
perturb = np.clip(perturb, (- max_perturb), max_perturb)
perturb = (np.clip((x + perturb), 0, 1.0) - x)
return np.clip((x + perturb), 0, 1.0) |
def operator_norm_settings(domain, codomain):
if ((domain == 1) and (codomain == 1)):
max_across_input_dims = True
norm_type = 1
elif ((domain == 1) and (codomain == 2)):
max_across_input_dims = True
norm_type = 2
elif ((domain == 1) and (codomain == float('inf'))):
max_across_input_dims = True
norm_type = float('inf')
elif ((domain == 2) and (codomain == float('inf'))):
max_across_input_dims = False
norm_type = 2
elif ((domain == float('inf')) and (codomain == float('inf'))):
max_across_input_dims = False
norm_type = 1
else:
raise ValueError('Unknown combination of domain "{}" and codomain "{}"'.format(domain, codomain))
return (max_across_input_dims, norm_type) |
def test_effvol_uniform_complete_partialsky():
comp = 0.33
(ramin, ramax) = (30.0, 120.0)
tsf = gaia_tools.select.tgasSelectUniform(comp=comp, ramin=ramin, ramax=ramax)
tesf = gaia_tools.select.tgasEffectiveSelect(tsf)
(dr, rmin) = (0.1, 0.0)
v = tesf.volume((lambda x, y, z: spher_vol_func(x, y, z, rmin=rmin, rmax=(rmin + dr))), xyz=True, ndists=251)
v_exp = ((((((4.0 * numpy.pi) * (dr ** 3.0)) / 3.0) * comp) * (ramax - ramin)) / 360.0)
assert (numpy.fabs(((v / v_exp) - 1.0)) < (10.0 ** (- 2.0))), 'Effective volume for unit completeness is not equal to the volume'
(dr, rmin) = (0.2, 0.0)
v = tesf.volume((lambda x, y, z: spher_vol_func(x, y, z, rmin=rmin, rmax=(rmin + dr))), xyz=True, ndists=501)
v_exp = ((((((4.0 * numpy.pi) * (dr ** 3.0)) / 3.0) * comp) * (ramax - ramin)) / 360.0)
assert (numpy.fabs(((v / v_exp) - 1.0)) < (10.0 ** (- 1.9))), 'Effective volume for unit completeness is not equal to the volume'
return None |
def _restore_model(file, c):
model = c.MODEL(c.P.d[0], c.P.d[1], c.N_HIDDEN, c.N_LAYERS)
cp = torch.load(file, map_location=torch.device('cpu'))
model.load_state_dict(cp['model_state_dict'])
model.eval()
return model |
def vgg16_bn(cuda=True, model_root=None):
print('Building vgg16_bn parameters')
from imagenet import vgg
m = vgg.vgg19_bn(model_root)
if cuda:
m = m.cuda()
return (m, dataset.get, True) |
def _get_partition_rules():
return [(('transformer', 'wpe', 'embedding'), P('mp', None)), (('transformer', 'wte', 'embedding'), P('mp', None)), (('attention', '(q_proj|k_proj|v_proj)', 'kernel'), P(None, 'mp')), (('attention', 'out_proj', 'kernel'), P('mp', None)), (('attention', 'out_proj', 'bias'), None), (('mlp', 'c_fc', 'kernel'), P(None, 'mp')), (('mlp', 'c_fc', 'bias'), P('mp')), (('mlp', 'c_proj', 'kernel'), P('mp', None)), (('mlp', 'c_proj', 'bias'), None), (('ln_\\d+', 'bias'), None), (('\\d+', 'ln_\\d+', 'scale'), None), (('ln_f', 'bias'), None), (('ln_f', 'scale'), None)] |
def has_answer(answers, text, tokenizer) -> bool:
text = _normalize(text)
text = tokenizer.tokenize(text, uncased=True)
for answer in answers:
answer = _normalize(answer)
answer = tokenizer.tokenize(answer, uncased=True)
for i in range(0, ((len(text) - len(answer)) + 1)):
if (answer == text[i:(i + len(answer))]):
return True
return False |
def main(raw_args=None):
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', type=str, required=True, help='model name e.g. bert-base-uncased')
parser.add_argument('--cache_dir', type=str, default=None, required=False, help='Directory containing pytorch model')
parser.add_argument('--pytorch_model_path', type=str, required=True, help='/path/to/<pytorch-model-name>.bin')
parser.add_argument('--tf_cache_dir', type=str, required=True, help='Directory in which to save tensorflow model')
args = parser.parse_args(raw_args)
model = UnilmForLM.from_pretrained(pretrained_model_name_or_path=args.model_name, state_dict=torch.load(args.pytorch_model_path), cache_dir=args.cache_dir)
convert_pytorch_checkpoint_to_tf(model=model, ckpt_dir=args.tf_cache_dir, model_name=args.model_name) |
def compute_fvd(opts, max_real: int, num_gen: int, num_frames: int, subsample_factor: int=1):
detector_url = '
detector_kwargs = dict(rescale=True, resize=True, return_features=True)
opts = copy.deepcopy(opts)
opts.dataset_kwargs.nframes = num_frames
opts.dataset_kwargs.stride = subsample_factor
opts.dataset_kwargs.return_vid = True
batch_size = (NUM_FRAMES_IN_BATCH[opts.dataset_kwargs.resolution] // num_frames)
(mu_real, sigma_real) = metric_utils.compute_feature_stats_for_dataset(opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs, rel_lo=0, rel_hi=0, capture_mean_cov=True, max_items=max_real, temporal_detector=True, batch_size=batch_size).get_mean_cov()
if opts.generator_as_dataset:
compute_gen_stats_fn = metric_utils.compute_feature_stats_for_dataset
gen_opts = metric_utils.rewrite_opts_for_gen_dataset(opts)
gen_opts.dataset_kwargs.nframes = num_frames
gen_opts.dataset_kwargs.stride = subsample_factor
gen_opts.dataset_kwargs.random_offset = False
gen_opts.dataset_kwargs.return_vid = True
gen_opts.dataset_kwargs.xflip = False
gen_kwargs = dict()
else:
compute_gen_stats_fn = metric_utils.compute_feature_stats_for_generator
gen_opts = opts
gen_kwargs = dict(num_video_frames=num_frames, subsample_factor=subsample_factor)
(mu_gen, sigma_gen) = compute_gen_stats_fn(opts=gen_opts, detector_url=detector_url, detector_kwargs=detector_kwargs, rel_lo=0, rel_hi=1, capture_mean_cov=True, max_items=num_gen, temporal_detector=True, batch_size=batch_size, **gen_kwargs).get_mean_cov()
if (opts.rank != 0):
return float('nan')
m = np.square((mu_gen - mu_real)).sum()
fvd = (m + metric_utils.trace_calculator(sigma_real, sigma_gen))
return float(fvd) |
_args
.parametrize('m', [256])
.parametrize('n', [1024])
.parametrize('k', [512])
.parametrize('blocksize', [128, (- 1)])
.parametrize('compute_type', ['int8', 'bf16', 'fp32'])
.parametrize('weight_type', ['int8', 'int4_clip', 'int4_fullrange', 'nf4', 'fp4_e2m1_bnb', 'fp4_e2m1', 'fp8_e5m2', 'fp8_e4m3'])
.parametrize('scale_type', ['fp32', 'fp8_e8m0'])
.parametrize('asym', [True, False])
.parametrize('transpose', [True, False])
.parametrize('add_bias', [True, False])
.parametrize('src_dt', ['fp32', 'bf16'])
.parametrize('dst_dt', ['fp32', 'bf16'])
def test(m, n, k, blocksize, compute_type, weight_type, scale_type, asym, transpose, add_bias, src_dt, dst_dt, dump_tensor_info=True):
if ((compute_type not in cmpt_configs[weight_type]) or (scale_type not in scale_configs[weight_type])):
pytest.skip()
if (asym and ((weight_type not in asym_configs) or (compute_type == 'int8'))):
pytest.skip()
torch.manual_seed(0)
ref_activation = torch.rand(m, k, dtype=torch.float)
tar_activation = ref_activation.clone()
if (src_dt == 'bf16'):
tar_activation = ref_activation.to(torch.bfloat16)
wei_row = k
wei_col = n
if transpose:
(wei_row, wei_col) = (wei_col, wei_row)
raw_wei = torch.rand(wei_row, wei_col, dtype=torch.float)
if dump_tensor_info:
print(raw_wei)
compress_wei = torch.ops.jblasop.woq_quantize(raw_wei, transpose, blocksize, compute_type, weight_type, scale_type, asym)
revert_wei = torch.zeros(wei_row, wei_col, dtype=torch.float)
torch.ops.jblasop.woq_dequantize(compress_wei, revert_wei, transpose, compute_type, weight_type, scale_type)
bias = (torch.rand(n, dtype=torch.float) * 10)
if dump_tensor_info:
print(revert_wei)
tar_dst = torch.zeros(m, n, dtype=torch.float)
if (dst_dt == 'bf16'):
tar_dst = tar_dst.to(torch.bfloat16)
if transpose:
revert_wei = torch.transpose(revert_wei, 0, 1)
ref_dst = torch.matmul(ref_activation, revert_wei)
torch.ops.jblasop.woq_linear(tar_activation, compress_wei, bias, tar_dst, n, add_bias, compute_type, weight_type, scale_type, asym)
if (dst_dt == 'bf16'):
tar_dst = tar_dst.to(torch.float)
if add_bias:
ref_dst += bias
if dump_tensor_info:
print(tar_dst)
print(ref_dst)
assert torch.allclose(tar_dst, ref_dst, rtol=0.03) |
def compute_sequence_length(sequence):
used = tf.sign(tf.reduce_max(tf.abs(sequence), axis=2))
length = tf.reduce_sum(used, axis=1)
length = tf.cast(length, tf.int32)
return length |
class SstProcessor(DataProcessor):
def get_train_examples(self, data_dir, segment='train'):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_dev_examples(self, data_dir, segment='dev'):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev')
def get_test_examples(self, data_dir, segment='test'):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test', is_test=True)
def get_labels(self):
return ['0', '1']
def _create_examples(self, lines, set_type, is_test=False):
examples = []
for (i, line) in enumerate(lines):
if (i == 0):
continue
guid = ('%s-%s' % (set_type, i))
if is_test:
text_a = line[1]
label = self.get_labels()[0]
else:
text_a = line[0]
label = line[1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples |
class FunnelForTokenClassification():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
class ResNet_NL(nn.Module):
def __init__(self, last_stride=1, block=Bottleneck, layers=[3, 4, 6, 3], non_layers=[0, 2, 3, 0]):
self.inplanes = 64
super().__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
NL_1 = [NonLocalBlock(self.inplanes, sub_sample=True) for i in range(non_layers[0])]
self.NL_1 = nn.ModuleList(NL_1)
self.NL_1_idx = sorted([(layers[0] - (i + 1)) for i in range(non_layers[0])])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
NL_2 = [NonLocalBlock(self.inplanes) for i in range(non_layers[1])]
self.NL_2 = nn.ModuleList(NL_2)
self.NL_2_idx = sorted([(layers[1] - (i + 1)) for i in range(non_layers[1])])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
NL_3 = [NonLocalBlock(self.inplanes) for i in range(non_layers[2])]
self.NL_3 = nn.ModuleList(NL_3)
self.NL_3_idx = sorted([(layers[2] - (i + 1)) for i in range(non_layers[2])])
self.layer4 = self._make_layer(block, 512, layers[3], stride=last_stride)
NL_4 = [NonLocalBlock(self.inplanes) for i in range(non_layers[3])]
self.NL_4 = nn.ModuleList(NL_4)
self.NL_4_idx = sorted([(layers[3] - (i + 1)) for i in range(non_layers[3])])
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.ModuleList(layers)
def forward(self, x):
(b, t, c, h, w) = x.shape
x = self.conv1(x.view((b * t), c, h, w))
x = self.bn1(x)
x = self.maxpool(x)
NL1_counter = 0
if (len(self.NL_1_idx) == 0):
self.NL_1_idx = [(- 1)]
for i in range(len(self.layer1)):
x = self.layer1[i](x)
if (i == self.NL_1_idx[NL1_counter]):
(_, c, h, w) = x.shape
x = self.NL_1[NL1_counter](x.view(b, t, c, h, w).permute(0, 2, 1, 3, 4))
x = x.permute(0, 2, 1, 3, 4).reshape((b * t), c, h, w)
NL1_counter += 1
NL2_counter = 0
if (len(self.NL_2_idx) == 0):
self.NL_2_idx = [(- 1)]
for i in range(len(self.layer2)):
x = self.layer2[i](x)
if (i == self.NL_2_idx[NL2_counter]):
(_, c, h, w) = x.shape
x = self.NL_2[NL2_counter](x.view(b, t, c, h, w).permute(0, 2, 1, 3, 4))
x = x.permute(0, 2, 1, 3, 4).reshape((b * t), c, h, w)
NL2_counter += 1
NL3_counter = 0
if (len(self.NL_3_idx) == 0):
self.NL_3_idx = [(- 1)]
for i in range(len(self.layer3)):
x = self.layer3[i](x)
if (i == self.NL_3_idx[NL3_counter]):
(_, c, h, w) = x.shape
x = self.NL_3[NL3_counter](x.view(b, t, c, h, w).permute(0, 2, 1, 3, 4))
x = x.permute(0, 2, 1, 3, 4).reshape((b * t), c, h, w)
NL3_counter += 1
NL4_counter = 0
if (len(self.NL_4_idx) == 0):
self.NL_4_idx = [(- 1)]
for i in range(len(self.layer4)):
x = self.layer4[i](x)
if (i == self.NL_4_idx[NL4_counter]):
(_, c, h, w) = x.shape
x = self.NL_4[NL4_counter](x.view(b, t, c, h, w).permute(0, 2, 1, 3, 4))
x = x.permute(0, 2, 1, 3, 4).reshape((b * t), c, h, w)
NL4_counter += 1
return x
def load_param(self, model_path, autoload=None):
if (autoload == 'r50'):
param_dict = models.resnet50(pretrained=True).state_dict()
else:
param_dict = torch.load(model_path)
for i in param_dict:
if ('fc' in i):
continue
self.state_dict()[i].copy_(param_dict[i])
def random_init(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_() |
class FixedLrUpdaterHook(LrUpdaterHook):
def __init__(self, **kwargs):
super(FixedLrUpdaterHook, self).__init__(**kwargs)
def get_lr(self, trainer, base_lr):
return base_lr |
def process_transformer_cfg(cfg):
log_dir = ''
if ('critical_params' in cfg):
critical_params = [cfg[key] for key in cfg.critical_params]
for (name, param) in zip(cfg['critical_params'], critical_params):
log_dir += '{:s}[{:s}]'.format(name, str(param))
return log_dir |
class EpochNumProbe(StatsProbe):
def __init__(self):
super(EpochNumProbe, self).__init__()
self.last_epoch_stats = {}
def get_last_epoch_stats(self):
return self.last_epoch_stats
def epoch_prologue(self):
self.last_epoch_stats = {}
def add_data(self, **kwargs):
self.last_epoch_stats['epoch'] = kwargs['epochs_trained'] |
def save_obj_data_binary_for_voxelization(model, min_corner, max_corner, corner_size, filename):
assert (('v' in model) and (model['v'].size != 0))
with open(filename, 'wb') as fp:
if (('v' in model) and (model['v'].size != 0)):
for v in model['v']:
fp.write(('v %f %f %f\n' % (v[0], v[1], v[2])))
fp.write(('v %f %f %f\n' % ((min_corner[0] + corner_size), min_corner[1], min_corner[2])))
fp.write(('v %f %f %f\n' % (min_corner[0], (min_corner[1] + corner_size), min_corner[2])))
fp.write(('v %f %f %f\n' % (min_corner[0], min_corner[1], (min_corner[2] + corner_size))))
fp.write(('v %f %f %f\n' % (min_corner[0], min_corner[1], min_corner[2])))
fp.write(('v %f %f %f\n' % ((max_corner[0] - corner_size), max_corner[1], max_corner[2])))
fp.write(('v %f %f %f\n' % (max_corner[0], (max_corner[1] - corner_size), max_corner[2])))
fp.write(('v %f %f %f\n' % (max_corner[0], max_corner[1], (max_corner[2] - corner_size))))
fp.write(('v %f %f %f\n' % (max_corner[0], max_corner[1], max_corner[2])))
if (('f' in model) and (model['f'].size != 0)):
for f_ in model['f']:
f = (np.copy(f_) + 1)
fp.write(('f %d %d %d\n' % (f[0], f[1], f[2])))
vid_start = (model['v'].size / 3)
fp.write(('f %d %d %d\n' % ((vid_start + 1), (vid_start + 2), (vid_start + 3))))
fp.write(('f %d %d %d\n' % ((vid_start + 1), (vid_start + 4), (vid_start + 2))))
fp.write(('f %d %d %d\n' % ((vid_start + 1), (vid_start + 3), (vid_start + 4))))
fp.write(('f %d %d %d\n' % ((vid_start + 2), (vid_start + 4), (vid_start + 3))))
vid_start += 4
fp.write(('f %d %d %d\n' % ((vid_start + 1), (vid_start + 2), (vid_start + 3))))
fp.write(('f %d %d %d\n' % ((vid_start + 1), (vid_start + 4), (vid_start + 2))))
fp.write(('f %d %d %d\n' % ((vid_start + 1), (vid_start + 3), (vid_start + 4))))
fp.write(('f %d %d %d\n' % ((vid_start + 2), (vid_start + 4), (vid_start + 3)))) |
_function('reciprocal')
class AutogradReciprocal(AutogradFunction):
def forward(ctx, input):
reciprocal = input.reciprocal()
ctx.save_for_backward(reciprocal)
return reciprocal
def backward(ctx, grad_output):
(reciprocal,) = ctx.saved_tensors
return grad_output.neg().mul_(reciprocal).mul_(reciprocal) |
def format_for_str(num_or_list, decimals=3):
if isinstance(num_or_list, torch.Tensor):
num_or_list = num_or_list.tolist()
return format_for_str(num_or_list)
if isinstance(num_or_list, list):
return [format_for_str(n) for n in num_or_list]
elif isinstance(num_or_list, float):
return np.round(num_or_list, decimals)
else:
return '' |
_materialize('core')
class ReplicatePad(Pad):
num_var_param = _pad_num_var_param(2, max=6)
def __init__(self, *padding_list):
super().__init__(padding_list, 'replicate')
self.inp_ranks = [rank_range(((len(padding_list) // 2) + 1), 4)]
self.out_ranks = [rank_range(((len(padding_list) // 2) + 1), 4)] |
class PY():
def _parse_kwargs(self, layer, kwargs):
l = getattr(self.py_module, layer)
if (not ('param_str' in kwargs)):
py_args = {}
for a in list(kwargs.keys()):
if hasattr(l, a):
py_args[a] = kwargs.pop(a)
kwargs['param_str'] = str(py_args)
if hasattr(l, 'N_TOP'):
kwargs['ntop'] = l.N_TOP
return kwargs
def __init__(self, module):
import importlib
self.module = module
self.py_module = importlib.import_module(module)
def __getattr__(self, name):
return (lambda *args, **kwargs: caffe.layers.Python(*args, module=self.module, layer=name, **self._parse_kwargs(name, kwargs))) |
def test_dense_matrix_from_pair_dictionary_square():
d = {('a', 'b'): 10, ('b', 'c'): 20}
(X, rows, columns) = dense_matrix_from_pair_dictionary(d, square_result=True)
eq_(rows, ['a', 'b', 'c'])
eq_(columns, ['a', 'b', 'c'])
assert np.isnan(X[(0, 0)])
eq_(X[(0, 1)], 10)
assert np.isnan(X[(0, 2)])
assert np.isnan(X[(1, 0)])
assert np.isnan(X[(1, 1)])
eq_(X[(1, 2)], 20)
assert np.isnan(X[(2, 0)])
assert np.isnan(X[(2, 1)])
assert np.isnan(X[(2, 2)]) |
def init_bn_params(bn_module):
bn_module.running_mean.fill_(0)
bn_module.running_var.fill_(1)
if bn_module.affine:
bn_module.weight.fill_(1)
bn_module.bias.fill_(0) |
def read_vgm_txt(vgm_logs_fps):
all_vgms = []
error_vgms = []
data_start_idx = (- 1)
loop_start_idx = (- 1)
loop_end_idx = (- 1)
for vgm_logs_fp in tqdm(vgm_logs_fps):
vgm = dict()
vgm['filename'] = vgm_logs_fp
with vgm_logs_fp.open() as f:
line = f.readlines()
soundchips = []
for (i, l) in enumerate(line):
sl = l.split('\t')
if ('Clock' in sl[0]):
soundchip = sl[0].split(' ')[0]
freq = int(sl[(- 1)].split(' ')[0])
if (soundchip == 'YM2413'):
vgm['soundchip'] = soundchip
vgm['clock_freq'] = freq
if (freq != 0):
soundchips.append([soundchip, freq])
elif ('Total Length' in sl[0]):
total_sample_length = int(sl[(- 1)].split(' ')[0])
vgm['total_sample_length'] = total_sample_length
elif ('VGMData' in l):
data_start_idx = (i + 1)
vgm['data_start_index'] = data_start_idx
elif ('Loop Point' in l):
loop_start_idx = (i + 1)
vgm['loop_start_index'] = loop_start_idx
if (len(soundchips) != 1):
error_vgms.append(['multiple soundchips', str(vgm_logs_fp), soundchips])
vgm_data = line[data_start_idx:]
vgm['data'] = vgm_data
all_vgms.append(vgm)
return all_vgms |
def load_result(path):
fullpath = os.path.join(path, 'rollout.json')
suffix = path.split('/')[(- 1)]
if (not os.path.exists(fullpath)):
return (None, None)
results = json.load(open(fullpath, 'rb'))
score = results['score']
info = dict(returns=results['return'], first_value=results['first_value'], first_search_value=results['first_search_value'], discount_return=results['discount_return'], prediction_error=results['prediction_error'], step=results['step'])
return ((score * 100), info) |
class SparseWeightedAverage(torch.autograd.Function):
avg = {'cpu': sparse_weighted_average_cpu, 'cuda': sparse_weighted_average_cuda}
avg_backward = {'cpu': sparse_weighted_average_backward_cpu, 'cuda': sparse_weighted_average_backward_cuda}
def forward(ctx, weights, values, topk):
ctx.save_for_backward(weights, values, topk)
(N, H, L, _) = weights.shape
(_, _, _, E) = values.shape
output = values.new_zeros(N, H, L, E)
SparseWeightedAverage.avg[weights.device.type](weights, values, topk, output)
return output
def backward(ctx, grad_output):
(weights, values, topk) = ctx.saved_tensors
grad_weights = torch.zeros_like(weights)
grad_values = torch.zeros_like(values)
if (grad_output.stride()[(- 1)] != 1):
grad_output = grad_output.contiguous()
SparseWeightedAverage.avg_backward[weights.device.type](weights, values, topk, grad_output, grad_weights, grad_values)
return (grad_weights, grad_values, None) |
def _integration(dataloader, tmp_path, loss=None, trainer_kwargs=None, **kwargs):
train_dataloader = dataloader['train']
val_dataloader = dataloader['val']
test_dataloader = dataloader['test']
early_stop_callback = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=1, verbose=False, mode='min')
logger = TensorBoardLogger(tmp_path)
if (trainer_kwargs is None):
trainer_kwargs = {}
trainer = pl.Trainer(max_epochs=2, gradient_clip_val=0.1, callbacks=[early_stop_callback], enable_checkpointing=True, default_root_dir=tmp_path, limit_train_batches=2, limit_val_batches=2, limit_test_batches=2, logger=logger, **trainer_kwargs)
if ('discount_in_percent' in train_dataloader.dataset.reals):
monotone_constaints = {'discount_in_percent': (+ 1)}
cuda_context = torch.backends.cudnn.flags(enabled=False)
else:
monotone_constaints = {}
cuda_context = nullcontext()
kwargs.setdefault('learning_rate', 0.15)
with cuda_context:
if (loss is not None):
pass
elif isinstance(train_dataloader.dataset.target_normalizer, NaNLabelEncoder):
loss = CrossEntropy()
elif isinstance(train_dataloader.dataset.target_normalizer, MultiNormalizer):
loss = MultiLoss([(CrossEntropy() if isinstance(normalizer, NaNLabelEncoder) else QuantileLoss()) for normalizer in train_dataloader.dataset.target_normalizer.normalizers])
else:
loss = QuantileLoss()
net = TemporalFusionTransformer.from_dataset(train_dataloader.dataset, hidden_size=2, hidden_continuous_size=2, attention_head_size=1, dropout=0.2, loss=loss, log_interval=5, log_val_interval=1, log_gradient_flow=True, monotone_constaints=monotone_constaints, **kwargs)
net.size()
try:
trainer.fit(net, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader)
if (not isinstance(net.loss, MQF2DistributionLoss)):
test_outputs = trainer.test(net, dataloaders=test_dataloader)
assert (len(test_outputs) > 0)
net = TemporalFusionTransformer.load_from_checkpoint(trainer.checkpoint_callback.best_model_path)
predictions = net.predict(val_dataloader, return_index=True, return_x=True, return_y=True, fast_dev_run=True, trainer_kwargs=trainer_kwargs)
pred_len = len(predictions.index)
def check(x):
if isinstance(x, (tuple, list)):
for xi in x:
check(xi)
elif isinstance(x, dict):
for xi in x.values():
check(xi)
else:
assert (pred_len == x.shape[0]), 'first dimension should be prediction length'
check(predictions.output)
if isinstance(predictions.output, torch.Tensor):
assert (predictions.output.ndim == 2), 'shape of predictions should be batch_size x timesteps'
else:
assert all(((p.ndim == 2) for p in predictions.output)), 'shape of predictions should be batch_size x timesteps'
check(predictions.x)
check(predictions.index)
net.predict(val_dataloader, return_index=True, return_x=True, fast_dev_run=True, mode='raw', trainer_kwargs=trainer_kwargs)
finally:
shutil.rmtree(tmp_path, ignore_errors=True) |
_module()
class PISASSDHead(SSDHead):
def loss_by_feat(self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList=None) -> Dict[(str, Union[(List[Tensor], Tensor)])]:
featmap_sizes = [featmap.size()[(- 2):] for featmap in cls_scores]
assert (len(featmap_sizes) == self.prior_generator.num_levels)
device = cls_scores[0].device
(anchor_list, valid_flag_list) = self.get_anchors(featmap_sizes, batch_img_metas, device=device)
cls_reg_targets = self.get_targets(anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore, unmap_outputs=False, return_sampling_results=True)
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor, sampling_results_list) = cls_reg_targets
num_images = len(batch_img_metas)
all_cls_scores = torch.cat([s.permute(0, 2, 3, 1).reshape(num_images, (- 1), self.cls_out_channels) for s in cls_scores], 1)
all_labels = torch.cat(labels_list, (- 1)).view(num_images, (- 1))
all_label_weights = torch.cat(label_weights_list, (- 1)).view(num_images, (- 1))
all_bbox_preds = torch.cat([b.permute(0, 2, 3, 1).reshape(num_images, (- 1), 4) for b in bbox_preds], (- 2))
all_bbox_targets = torch.cat(bbox_targets_list, (- 2)).view(num_images, (- 1), 4)
all_bbox_weights = torch.cat(bbox_weights_list, (- 2)).view(num_images, (- 1), 4)
all_anchors = []
for i in range(num_images):
all_anchors.append(torch.cat(anchor_list[i]))
isr_cfg = self.train_cfg.get('isr', None)
all_targets = (all_labels.view((- 1)), all_label_weights.view((- 1)), all_bbox_targets.view((- 1), 4), all_bbox_weights.view((- 1), 4))
if (isr_cfg is not None):
all_targets = isr_p(all_cls_scores.view((- 1), all_cls_scores.size((- 1))), all_bbox_preds.view((- 1), 4), all_targets, torch.cat(all_anchors), sampling_results_list, loss_cls=CrossEntropyLoss(), bbox_coder=self.bbox_coder, **self.train_cfg['isr'], num_class=self.num_classes)
(new_labels, new_label_weights, new_bbox_targets, new_bbox_weights) = all_targets
all_labels = new_labels.view(all_labels.shape)
all_label_weights = new_label_weights.view(all_label_weights.shape)
all_bbox_targets = new_bbox_targets.view(all_bbox_targets.shape)
all_bbox_weights = new_bbox_weights.view(all_bbox_weights.shape)
carl_loss_cfg = self.train_cfg.get('carl', None)
if (carl_loss_cfg is not None):
loss_carl = carl_loss(all_cls_scores.view((- 1), all_cls_scores.size((- 1))), all_targets[0], all_bbox_preds.view((- 1), 4), all_targets[2], SmoothL1Loss(beta=1.0), **self.train_cfg['carl'], avg_factor=avg_factor, num_class=self.num_classes)
assert torch.isfinite(all_cls_scores).all().item(), 'classification scores become infinite or NaN!'
assert torch.isfinite(all_bbox_preds).all().item(), 'bbox predications become infinite or NaN!'
(losses_cls, losses_bbox) = multi_apply(self.loss_by_feat_single, all_cls_scores, all_bbox_preds, all_anchors, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, avg_factor=avg_factor)
loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
if (carl_loss_cfg is not None):
loss_dict.update(loss_carl)
return loss_dict |
def save_tflite():
converter = tf.lite.TFLiteConverter.from_saved_model(FLAGS.weights)
if (FLAGS.quantize_mode == 'float16'):
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.compat.v1.lite.constants.FLOAT16]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
converter.allow_custom_ops = True
elif (FLAGS.quantize_mode == 'int8'):
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]
converter.allow_custom_ops = True
converter.representative_dataset = representative_data_gen
tflite_model = converter.convert()
open(FLAGS.output, 'wb').write(tflite_model)
logging.info('model saved to: {}'.format(FLAGS.output)) |
_registry(algorithm_type='smooth_quant', location='pre_quantization')
class SmoothQuant(Algorithm):
def __init__(self, alpha=0.5):
self.alpha = alpha
self.folding = False
self.percentile = None
self.op_types = None
self.scales_per_op = None
self.tune_cfg = None
self.weight_clip = None
self.auto_alpha_args = None
self.default_alpha = None
def __call__(self, origin_model, q_model, adaptor, dataloader, calib_iter):
kwargs = {}
if (self.op_types is not None):
kwargs['op_types'] = self.op_types
if (self.percentile is not None):
kwargs['percentile'] = self.percentile
if (self.scales_per_op is not None):
kwargs['scales_per_op'] = self.scales_per_op
kwargs['folding'] = self.folding
kwargs['record_max_info'] = True
kwargs['weight_clip'] = self.weight_clip
kwargs['auto_alpha_args'] = self.auto_alpha_args
kwargs['default_alpha'] = self.default_alpha
q_model = adaptor.smooth_quant(origin_model, dataloader, calib_iter, alpha=self.alpha, **kwargs)
return q_model |
class Meteor():
def __init__(self):
self.env = os.environ
self.env['LC_ALL'] = 'en_US.UTF_8'
self.meteor_cmd = ['java', '-jar', '-Xmx2G', METEOR_JAR, '-', '-', '-stdio', '-l', 'en', '-norm']
self.meteor_p = subprocess.Popen(self.meteor_cmd, cwd=os.path.dirname(os.path.abspath(__file__)), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=self.env, universal_newlines=True, bufsize=1)
self.lock = threading.Lock()
def compute_score(self, gts, res):
scores = []
eval_line = 'EVAL'
self.lock.acquire()
for (re, gt) in zip(res, gts):
stat = self._stat(re, gt)
eval_line += ' ||| {}'.format(stat)
self.meteor_p.stdin.write((eval_line + '\n'))
for i in range(len(gts)):
score = float(self.meteor_p.stdout.readline().strip())
scores.append(score)
final_score = float(self.meteor_p.stdout.readline().strip())
self.lock.release()
return (final_score, scores)
def method(self):
return 'METEOR'
def _stat(self, hypothesis_str, reference_list):
hypothesis_str = hypothesis_str.replace('|||', '').replace(' ', ' ')
score_line = ' ||| '.join(('SCORE', ' ||| '.join(reference_list), hypothesis_str))
self.meteor_p.stdin.write((score_line + '\n'))
return self.meteor_p.stdout.readline().strip()
def __del__(self):
self.lock.acquire()
self.meteor_p.stdin.close()
self.meteor_p.kill()
self.meteor_p.wait()
self.lock.release() |
class Standardize(torch.nn.Module):
def __init__(self, mean=0.369, std=0.0255, device='cpu'):
super().__init__()
self.mean = mean
self.std = std
self.device = device
def forward(self, x):
return ((x - self.mean) / self.std) |
def archive_model(serialization_dir: str, weights: str=_DEFAULT_WEIGHTS):
weights_file = os.path.join(serialization_dir, weights)
if (not os.path.exists(weights_file)):
logger.error('weights file %s does not exist, unable to archive model', weights_file)
return
config_file = os.path.join(serialization_dir, CONFIG_NAME)
if (not os.path.exists(config_file)):
logger.error('config file %s does not exist, unable to archive model', config_file)
archive_file = os.path.join(serialization_dir, 'model.tar.gz')
logger.info('archiving weights and vocabulary to %s', archive_file)
with tarfile.open(archive_file, 'w:gz') as archive:
archive.add(config_file, arcname=CONFIG_NAME)
archive.add(weights_file, arcname=_WEIGHTS_NAME)
archive.add(os.path.join(serialization_dir, 'vocabulary'), arcname='vocabulary') |
def shards(state, shard_size, eval=False):
if eval:
(yield filter_shard_state(state, False, True))
else:
non_none = dict(filter_shard_state(state))
(keys, values) = zip(*((k, torch.split(v, shard_size)) for (k, v) in non_none.items()))
for shard_tensors in zip(*values):
(yield dict(zip(keys, shard_tensors)))
variables = ((state[k], v.grad.data) for (k, v) in non_none.items() if (isinstance(v, Variable) and (v.grad is not None)))
(inputs, grads) = zip(*variables)
torch.autograd.backward(inputs, grads) |
def rotate(obj: Union[(bpy.types.Object, str)], rotation: Union[(Tuple[float], mathutils.Euler)]=(0.0, 0.0, 0.0), axis_order: str='XYZ') -> None:
obj = verify(obj)
view_layer = zpy.blender.verify_view_layer()
select(obj)
log.info(f'Rotating object {obj.name} by {rotation} radians in {axis_order}. ')
log.debug(f'''Before - obj.matrix_world
{obj.matrix_world}''')
if (not isinstance(rotation, mathutils.Euler)):
rotation = mathutils.Euler(rotation)
new_rotation_mat = (rotation.to_matrix() obj.rotation_euler.to_matrix())
new_rotation = new_rotation_mat.to_euler(axis_order)
obj.rotation_euler = mathutils.Euler(new_rotation, axis_order)
view_layer.update()
log.debug(f'''After - obj.matrix_world
{obj.matrix_world}''') |
def main():
args = parser.parse_args()
(train_data, val_data, test_data) = create_data()
forecaster = get_trained_forecaster(train_data, val_data)
accelerator_fn = {'pytorch': forecaster.predict, 'onnx': forecaster.predict_with_onnx, 'openvino': forecaster.predict_with_openvino}
if (args.accelerator == 'onnx'):
forecaster.build_onnx()
elif (args.accelerator == 'openvino'):
forecaster.build_openvino()
predict_fn = accelerator_fn[args.accelerator]
(x_test, y_test) = test_data.to_numpy()
predict_start = time.time()
for _ in range(100):
y_hat = predict_fn(x_test)
predict_end = time.time()
output = json.dumps({'config': args.name, 'inference_time': (predict_end - predict_start)})
print(f'>>>{output}<<<') |
def preprocess_input_with_single_transformation(data, device, non_blocking=True):
if (len(data[0]) == 2):
return (data[0][0].to(device, non_blocking=non_blocking), data[0][1].to(device, non_blocking=non_blocking), data[1], *data[2])
((t2, dw, target), filename, (partition_list, group_list)) = (to_device(data[0], device, non_blocking), data[1], data[2])
return (torch.cat([t2, dw], dim=1), target, filename, partition_list, group_list) |
def valid_mean(tensor, valid=None, dim=None):
dim = (() if (dim is None) else dim)
if (valid is None):
return tensor.mean(dim=dim)
valid = valid.type(tensor.dtype)
return ((tensor * valid).sum(dim=dim) / valid.sum(dim=dim)) |
def convert_basic_c2_names(original_keys):
layer_keys = copy.deepcopy(original_keys)
layer_keys = [{'pred_b': 'linear_b', 'pred_w': 'linear_w'}.get(k, k) for k in layer_keys]
layer_keys = [k.replace('_', '.') for k in layer_keys]
layer_keys = [re.sub('\\.b$', '.bias', k) for k in layer_keys]
layer_keys = [re.sub('\\.w$', '.weight', k) for k in layer_keys]
layer_keys = [re.sub('bn\\.s$', 'norm.weight', k) for k in layer_keys]
layer_keys = [re.sub('bn\\.bias$', 'norm.bias', k) for k in layer_keys]
layer_keys = [re.sub('bn\\.rm', 'norm.running_mean', k) for k in layer_keys]
layer_keys = [re.sub('bn\\.running.mean$', 'norm.running_mean', k) for k in layer_keys]
layer_keys = [re.sub('bn\\.riv$', 'norm.running_var', k) for k in layer_keys]
layer_keys = [re.sub('bn\\.running.var$', 'norm.running_var', k) for k in layer_keys]
layer_keys = [re.sub('bn\\.gamma$', 'norm.weight', k) for k in layer_keys]
layer_keys = [re.sub('bn\\.beta$', 'norm.bias', k) for k in layer_keys]
layer_keys = [re.sub('gn\\.s$', 'norm.weight', k) for k in layer_keys]
layer_keys = [re.sub('gn\\.bias$', 'norm.bias', k) for k in layer_keys]
layer_keys = [re.sub('^res\\.conv1\\.norm\\.', 'conv1.norm.', k) for k in layer_keys]
layer_keys = [re.sub('^conv1\\.', 'stem.conv1.', k) for k in layer_keys]
layer_keys = [k.replace('.branch1.', '.shortcut.') for k in layer_keys]
layer_keys = [k.replace('.branch2a.', '.conv1.') for k in layer_keys]
layer_keys = [k.replace('.branch2b.', '.conv2.') for k in layer_keys]
layer_keys = [k.replace('.branch2c.', '.conv3.') for k in layer_keys]
layer_keys = [re.sub('^body.conv.fcn', 'body_conv_fcn', k) for k in layer_keys]
layer_keys = [k.replace('AnnIndex.lowres', 'ann_index_lowres') for k in layer_keys]
layer_keys = [k.replace('Index.UV.lowres', 'index_uv_lowres') for k in layer_keys]
layer_keys = [k.replace('U.lowres', 'u_lowres') for k in layer_keys]
layer_keys = [k.replace('V.lowres', 'v_lowres') for k in layer_keys]
return layer_keys |
def new_full(g, self, size, fill_value, dtype, layout, device, pin_memory=False):
from torch.onnx.symbolic_opset9 import full
if ((dtype is None) and self.isCompleteTensor()):
dtype = self.type().scalarType()
dtype = sym_help.scalar_type_to_onnx.index(sym_help.cast_pytorch_to_onnx[dtype])
return full(g, size, fill_value, dtype, layout, device, pin_memory) |
def ResNeXt101(input_shape=None, input_tensor=None, weights=None, classes=1000, include_top=False, stride_size=2, repetitions=(3, 4, 23, 3), **kwargs):
return ResNeXt(MODELS_PARAMS['resnext101'], input_shape=input_shape, input_tensor=input_tensor, include_top=include_top, classes=classes, weights=weights, stride_size=stride_size, repetitions=repetitions, **kwargs) |
def test_custom(msg):
with pytest.raises(m.MyException) as excinfo:
m.throws1()
assert (msg(excinfo.value) == 'this error should go to a custom type')
with pytest.raises(RuntimeError) as excinfo:
m.throws2()
assert (msg(excinfo.value) == 'this error should go to a standard Python exception')
with pytest.raises(RuntimeError) as excinfo:
m.throws3()
assert (msg(excinfo.value) == 'Caught an unknown exception!')
with pytest.raises(m.MyException) as excinfo:
m.throws4()
assert (msg(excinfo.value) == 'this error is rethrown')
with pytest.raises(RuntimeError) as excinfo:
m.throws_logic_error()
assert (msg(excinfo.value) == 'this error should fall through to the standard handler')
with pytest.raises(OverflowError) as excinfo:
m.throws_overflow_error()
with pytest.raises(m.MyException5) as excinfo:
m.throws5()
assert (msg(excinfo.value) == 'this is a helper-defined translated exception')
with pytest.raises(m.MyException5) as excinfo:
m.throws5_1()
assert (msg(excinfo.value) == 'MyException5 subclass')
assert isinstance(excinfo.value, m.MyException5_1)
with pytest.raises(m.MyException5_1) as excinfo:
m.throws5_1()
assert (msg(excinfo.value) == 'MyException5 subclass')
with pytest.raises(m.MyException5) as excinfo:
try:
m.throws5()
except m.MyException5_1 as err:
raise RuntimeError('Exception error: caught child from parent') from err
assert (msg(excinfo.value) == 'this is a helper-defined translated exception') |
class TestTorchOP(unittest.TestCase):
def setUpClass(self):
pass
def tearDownClass(self):
pass
def test_1(self):
n = Net()
example_in = torch.ones(1, 32).long()
traced_model = torch.jit.trace(n, example_in)
torch.jit.save(traced_model, '{}.pt'.format(file_name))
ref_out = traced_model(example_in).squeeze(0).detach().numpy()
graph = compile('{}.pt'.format(file_name))
graph.save(file_name)
newgraph = Graph()
newgraph.graph_init((file_name + '/conf.yaml'), (file_name + '/model.bin'))
out = newgraph.inference([example_in.numpy()])
np.testing.assert_almost_equal(ref_out, [*out.values()][0], decimal=5)
os.remove('{}.pt'.format(file_name))
shutil.rmtree(file_name) |
def gen_iterator(out_path, dataset, gen_p):
global gen
gen = gen_p
if (not os.path.exists(out_path)):
os.makedirs(out_path)
print(out_path)
loader = dataset.get_loader(shuffle=True)
data_tupels = []
for (i, data) in tqdm(enumerate(loader)):
path = os.path.normpath(data['path'][0])
export_path = (out_path + '/generation/{}/{}/'.format(path.split(os.sep)[(- 2)], path.split(os.sep)[(- 1)]))
if os.path.exists(export_path):
print('Path exists - skip! {}'.format(export_path))
continue
try:
if (len(data_tupels) > 20):
create_meshes(data_tupels)
data_tupels = []
logits = gen.generate_mesh(data)
data_tupels.append((logits, data, out_path))
except Exception as err:
print('Error with {}: {}'.format(data['path'][0], traceback.format_exc()))
try:
create_meshes(data_tupels)
data_tupels = []
logits = gen.generate_mesh(data)
data_tupels.append((logits, data, out_path))
except Exception as err:
print('Error with {}: {}'.format(data['path'][0], traceback.format_exc())) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--success-distance', type=float, default=0.2)
parser.add_argument('--task-config', type=str, default='configs/tasks/pointnav.yaml')
parser.add_argument('--agent-class', type=str, default='GoalFollower')
args = parser.parse_args()
config = get_config(args.task_config)
agent = get_agent_cls(args.agent_class)(success_distance=args.success_distance, goal_sensor_uuid=config.TASK.GOAL_SENSOR_UUID)
benchmark = habitat.Benchmark(config_paths=args.task_config)
metrics = benchmark.evaluate(agent)
for (k, v) in metrics.items():
habitat.logger.info('{}: {:.3f}'.format(k, v)) |
def generate_random_data_sample(T, B=1, D=80):
net_input = {'src_tokens': torch.tensor(random_number_generator.randn(B, T, D)).float(), 'src_lengths': torch.tensor([T])}
return {'net_input': net_input} |
class WikipediaNetwork(InMemoryDataset):
def __init__(self, root: str, name: str, transform: Optional[Callable]=None, pre_transform: Optional[Callable]=None):
self.name = name.lower()
assert (self.name in ['chameleon', 'squirrel'])
super().__init__(root, transform, pre_transform)
(self.data, self.slices) = torch.load(self.processed_paths[0])
def raw_dir(self) -> str:
return osp.join(self.root, self.name, 'raw')
def processed_dir(self) -> str:
return osp.join(self.root, self.name, 'processed')
def raw_file_names(self) -> Union[(str, List[str])]:
return ['out1_node_feature_label.txt', 'out1_graph_edges.txt']
def processed_file_names(self) -> str:
return 'data.pt'
def download(self):
pass
def process(self):
with open(self.raw_paths[0], 'r') as f:
data = f.read().split('\n')[1:(- 1)]
x = [[float(v) for v in r.split('\t')[1].split(',')] for r in data]
x = torch.tensor(x, dtype=torch.float)
y = [int(r.split('\t')[2]) for r in data]
y = torch.tensor(y, dtype=torch.long)
with open(self.raw_paths[1], 'r') as f:
data = f.read().split('\n')[1:(- 1)]
data = [[int(v) for v in r.split('\t')] for r in data]
edge_index = torch.tensor(data, dtype=torch.long).t().contiguous()
(edge_index, _) = remove_self_loops(edge_index)
edge_index = to_undirected(edge_index)
(edge_index, _) = coalesce(edge_index, None, x.size(0), x.size(0))
data = Data(x=x, edge_index=edge_index, y=y)
if (self.pre_transform is not None):
data = self.pre_transform(data)
torch.save(self.collate([data]), self.processed_paths[0]) |
def get_article_recommendations(user_id):
cur = getDb().cursor(dictionary=True)
sql = 'SELECT sr.article_id, s.system_name, sr.explanation, \n sr.score AS system_score, ur.score AS recommendation_order,\n ur.seen_email, ur.seen_web, ur.clicked_email, ur.clicked_web,\n ur.saved, sr.recommendation_date\n FROM article_recommendations sr \n NATURAL JOIN systems s\n LEFT JOIN article_feedback ur \n ON sr.article_id = ur.article_id \n AND sr.user_id = ur.user_id\n AND sr.system_id = ur.system_id\n WHERE sr.user_id = %s\n ORDER BY sr.recommendation_date desc,\n s.system_name desc, sr.score desc'
cur.execute(sql, (user_id,))
return cur.fetchall() |
def log_mixture_discretized_normal(x, mean, logvar, pi, inverse_bin_width):
std = torch.exp((0.5 * logvar))
x = x.view(x.size(0), x.size(1), x.size(2), x.size(3), 1)
p = (normal_cdf((x + (0.5 / inverse_bin_width)), mean, std) - normal_cdf((x - (0.5 / inverse_bin_width)), mean, std))
p = torch.sum((p * pi), dim=(- 1))
logp = torch.log((p + 1e-08))
return logp |
class DPRContextEncoderTokenizerFast(metaclass=DummyObject):
_backends = ['tokenizers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tokenizers']) |
def _round_to_multiple_of(val, divisor, round_up_bias=0.9):
assert (0.0 < round_up_bias < 1.0)
new_val = max(divisor, ((int((val + (divisor / 2))) // divisor) * divisor))
return (new_val if (new_val >= (round_up_bias * val)) else (new_val + divisor)) |
class EnsembleNet(BaseModule, SlimmableMixin):
def __init__(self, base_net: Type, num_classes=10, track_running_stats=True, bn_type='bn', share_affine=False, width_scale=1, atom_slim_ratio=0.125, slimmable_ratios=None, **kwargs):
super(EnsembleNet, self).__init__()
self._set_slimmabe_ratios(slimmable_ratios)
self.atom_slim_ratio = atom_slim_ratio
num_ens = int((max(self.slimmable_ratios) / atom_slim_ratio))
self.base_net = base_net
self.bn_type = bn_type
for i in range(num_ens):
self.add_module(str(i), self.base_net(num_classes=num_classes, track_running_stats=track_running_stats, bn_type=bn_type, share_affine=share_affine, width_scale=(width_scale * atom_slim_ratio), **kwargs))
self.slim_bias_idx = 0
self.slim_ratio = max(self.slimmable_ratios)
if (width_scale != 1.0):
raise NotImplementedError()
self.slimmable_ratios = [(r / width_scale) for r in self.slimmable_ratios]
self._max_total_slim_ratio = 1.0
self.out_slim_bias_idx = self.slim_ratio
self.mix_forward_num = 0
self.base_idxs = list(range(num_ens))
def from_slimmable(cls, base_net: Type, atom_slim_ratio=0.125, **kwargs):
from .models import DigitModel
if issubclass(base_net, DigitModel):
ens_model = cls(base_net, atom_slim_ratio=atom_slim_ratio, **kwargs)
smodel = SlimmableDigitModel(**kwargs)
with torch.no_grad():
for (i_base, base) in enumerate(ens_model):
smodel.switch_slim_mode(atom_slim_ratio, slim_bias_idx=i_base)
base_state_dict = base.state_dict()
for (k, v) in smodel.state_dict().items():
base_state_dict[k].data.copy_(v.data)
inp_shape = copy.deepcopy(base_net.input_shape)
inp_shape[0] = 128
x = torch.rand(*inp_shape)
s_logits = smodel(x)
logits = base(x)
assert torch.all((s_logits == logits)), f'{torch.mean(torch.abs(s_logits))}, {torch.mean(torch.abs(logits))}'
else:
raise NotImplementedError(f'base_net: {base_net.__name__}')
return ens_model
def input_shape(self):
return self._modules['0'].input_shape
def _get_abs_string_index(self, idx):
idx = operator.index(idx)
if (not ((- len(self)) <= idx < len(self))):
raise IndexError('index {} is out of range'.format(idx))
if (idx < 0):
idx += len(self)
return str(idx)
def __getitem__(self, idx: int):
if isinstance(idx, slice):
return self.__class__(list(self._modules.values())[idx])
else:
return self._modules[self._get_abs_string_index(idx)]
def __setitem__(self, idx: int, module) -> None:
idx = self._get_abs_string_index(idx)
return setattr(self, str(idx), module)
def __len__(self) -> int:
return len(self._modules)
def forward(self, x):
base_idxs = self.current_slice()
logits = [self[i](x) for i in base_idxs]
if (len(base_idxs) > 1):
logits = torch.mean(torch.stack(logits, dim=(- 1)), dim=(- 1))
else:
logits = logits[0]
return logits
def current_slice(self):
start = self.slim_bias_idx
end = (start + int((self.slim_ratio / self.atom_slim_ratio)))
assert (end <= len(self)), f'Invalid slim_ratio. Too many subnets required. Have {len(self)} but require {end}-{start}={(end - start)}'
return self.base_idxs[start:end]
def full_net(self):
return self
def set_total_slim_ratio(self, r):
assert (r <= self._max_total_slim_ratio), f'try to set total_slim_ratio as {r}, but the max value should be {self._max_total_slim_ratio}'
self.slim_ratio = r
def state_dict(self, full_size=False, destination=None, prefix='', keep_vars=False):
if full_size:
return super(EnsembleNet, self).state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
if (destination is None):
destination = OrderedDict()
destination._metadata = OrderedDict()
destination._metadata[prefix[:(- 1)]] = local_metadata = dict(version=self._version)
self._save_to_state_dict(destination, prefix, keep_vars)
base_idxs = self.current_slice()
for idx in base_idxs:
name = self._get_abs_string_index(idx)
module = self._modules[name]
if (module is not None):
module.state_dict(destination, ((prefix + name) + '.'), keep_vars=keep_vars)
for hook in self._state_dict_hooks.values():
hook_result = hook(self, destination, prefix, local_metadata)
if (hook_result is not None):
destination = hook_result
return destination |
def load_all_data():
data = {}
for dataset in DATASETS:
data[dataset] = {}
for partition in PARTITIONS:
data[dataset][partition] = read_task_files(dataset, partition)
return data |
class BaseChecker(ABC):
def handler(cls, msg):
pass
def eq(cls, lhs, rhs, msg=''):
if (lhs != rhs):
cls.handler(f'Failed asertion :: {msg} | {lhs} != {rhs}')
def gt(cls, lhs, rhs, msg=''):
if (lhs <= rhs):
cls.handler(f'Failed asertion :: {msg} | {lhs} <= {rhs}')
def ge(cls, lhs, rhs, msg=''):
if (lhs < rhs):
cls.handler(f'Failed asertion :: {msg} | {lhs} < {rhs}')
def lt(cls, lhs, rhs, msg=''):
if (lhs >= rhs):
cls.handler(f'Failed asertion :: {msg} | {lhs} >= {rhs}')
def le(cls, lhs, rhs, msg=''):
if (lhs > rhs):
cls.handler(f'Failed asertion :: {msg} | {lhs} > {rhs}')
def none(cls, obj, msg=''):
if (obj is not None):
cls.handler(f'Failed asertion :: {msg} | expr is not None')
def not_none(cls, obj, msg=''):
if (obj is None):
cls.handler(f'Failed asertion :: {msg} | expr is None')
def true(cls, cond, msg=''):
if (not cond):
cls.handler(f'Failed asertion :: {msg} | condition is not True')
def false(cls, cond, msg=''):
if cond:
cls.handler(f'Failed asertion :: {msg} | condition is not False') |
_criterion('span_bert_loss')
class NoNSPPairLoss(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.args = args
self.aux_loss_weight = getattr(args, 'pair_loss_weight', 0)
def forward(self, model, sample, reduce=True):
net_output = model(**sample['net_input'])
lm_targets = sample['lm_target'].view((- 1))
lm_logits = net_output[0]
lm_logits = lm_logits.view((- 1), lm_logits.size((- 1)))
lm_loss = F.cross_entropy(lm_logits, lm_targets, size_average=False, ignore_index=self.padding_idx, reduce=reduce)
pair_target_logits = net_output[2]
pair_target_logits = pair_target_logits.view((- 1), pair_target_logits.size((- 1)))
pair_targets = sample['pair_targets'].view((- 1))
pair_loss = F.cross_entropy(pair_target_logits, pair_targets, size_average=False, ignore_index=self.padding_idx, reduce=reduce)
nsentences = sample['lm_target'].size(0)
ntokens = utils.strip_pad(lm_targets, self.padding_idx).numel()
npairs = (utils.strip_pad(pair_targets, self.padding_idx).numel() + 1)
sample_size = (nsentences if self.args.sentence_avg else ntokens)
loss = ((lm_loss / ntokens) + ((self.aux_loss_weight * pair_loss) / npairs))
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'lm_loss': (utils.item(lm_loss.data) if reduce else lm_loss.data), 'pair_loss': (utils.item(pair_loss.data) if reduce else pair_loss.data), 'ntokens': ntokens, 'npairs': npairs, 'nsentences': nsentences, 'sample_size': sample_size, 'aux_loss_weight': self.aux_loss_weight}
return (loss, sample_size, logging_output)
def aggregate_logging_outputs(logging_outputs):
lm_loss_sum = sum((log.get('lm_loss', 0) for log in logging_outputs))
pair_loss_sum = sum((log.get('pair_loss', 0) for log in logging_outputs))
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
npairs = sum((log.get('npairs', 0) for log in logging_outputs))
aux_loss_weight = max((log.get('aux_loss_weight', 0) for log in logging_outputs))
nsentences = sum((log.get('nsentences', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
agg_loss = (((lm_loss_sum / ntokens) / math.log(2)) + (((aux_loss_weight * pair_loss_sum) / npairs) / math.log(2)))
agg_output = {'loss': agg_loss, 'lm_loss': ((lm_loss_sum / ntokens) / math.log(2)), 'pair_loss': (((aux_loss_weight * pair_loss_sum) / npairs) / math.log(2)), 'nll_loss': ((lm_loss_sum / ntokens) / math.log(2)), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
return agg_output |
('disconnect', namespace='/tms')
def ws_disconn():
c = db.decr('user_count')
socketio.emit('msg', {'count': c}, namespace='/tms') |
class OnnxProfilingParser(ProfilingParser):
def process(self) -> List[dict]:
summarized = defaultdict((lambda : {'node_name': '', 'total_execution_time': 0, 'accelerator_execution_time': 0, 'cpu_execution_time': 0, 'op_run': 0, 'op_defined': 0}))
for log_file in self._logs:
with open(log_file) as f:
data = json.load(f)
for node in data:
category = node.get('cat')
op_name = node.get('args', {}).get('op_name')
dur = int((node.get('dur') or 0))
if ((category is None) or (node.get('name') is None) or (op_name is None)):
continue
summarized[op_name]['node_name'] = op_name
summarized[op_name]['total_execution_time'] += dur
if (category == 'Node'):
summarized[op_name]['cpu_execution_time'] += dur
elif (category == 'kernel'):
summarized[op_name]['accelerator_execution_time'] += dur
summarized[op_name]['op_defined'] += 1
summarized[op_name]['op_run'] += (0 if (not (dur or node.get('args', {}).get('thread_scheduling_stats'))) else 1)
for node in summarized.values():
self.add_result(ProfilingResult(**node))
return self._serialize_results() |
def tsv_mv(src_file, dst_file):
shutil.move(src_file, dst_file)
src_idx = (op.splitext(src_file)[0] + '.lineidx')
if op.isfile(src_idx):
dst_idx = (op.splitext(dst_file)[0] + '.lineidx')
shutil.move(src_idx, dst_idx) |
class Discriminator_wgan_noproj(torch.nn.Module):
def __init__(self, input_dim, num_filters, output_dim, optimizer, lr, betas, batch_norm=False):
super(Discriminator_wgan_noproj, self).__init__()
self.hidden_layer = torch.nn.Sequential()
for i in range(len(num_filters)):
if (i == 0):
conv = nn.Conv2d(input_dim, num_filters[i], kernel_size=4, stride=2, padding=1)
else:
conv = nn.Conv2d(num_filters[(i - 1)], num_filters[i], kernel_size=4, stride=2, padding=1)
conv_name = ('conv' + str((i + 1)))
self.hidden_layer.add_module(conv_name, conv)
nn.init.normal_(conv.weight, mean=0.0, std=0.02)
nn.init.constant_(conv.bias, 0.0)
if ((i != 0) and batch_norm):
bn_name = ('bn' + str((i + 1)))
self.hidden_layer.add_module(bn_name, torch.nn.BatchNorm2d(num_filters[i]))
act_name = ('act' + str((i + 1)))
self.hidden_layer.add_module(act_name, torch.nn.LeakyReLU(0.2))
self.output_layer = torch.nn.Sequential()
out = nn.Conv2d(num_filters[i], output_dim, kernel_size=4, stride=1, padding=0)
self.output_layer.add_module('out', out)
nn.init.normal_(out.weight, mean=0.0, std=0.02)
nn.init.constant_(out.bias, 0.0)
self.optimizer = optimizer((list(self.hidden_layer.parameters()) + list(self.output_layer.parameters())), lr=lr, betas=betas)
def forward(self, x):
h = self.hidden_layer(x)
out = self.output_layer(h)
return out |
class Up(nn.Module):
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d((in_channels // 2), (in_channels // 2), kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
diffY = (x2.size()[2] - x1.size()[2])
diffX = (x2.size()[3] - x1.size()[3])
x1 = F.pad(x1, [(diffX // 2), (diffX - (diffX // 2)), (diffY // 2), (diffY - (diffY // 2))])
x = torch.cat([x2, x1], dim=1)
return self.conv(x) |
def get_constant_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, last_epoch: int=(- 1)):
lr_lambda = partial(_get_constant_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps)
return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) |
def conv3x3(in_planes, out_planes, dilation=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, padding=dilation, dilation=dilation) |
def parse_args():
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('--exp_dir', default='models/snli/')
parser.add_argument('--model_type', default='bowman', choices=['bowman', 'minimal'])
parser.add_argument('--save_every', default=1, type=int)
parser.add_argument('--epochs', default=50, type=int)
parser.add_argument('--embedding_dim', default=300, type=int)
parser.add_argument('--hidden_dim', default=512, type=int)
parser.add_argument('--debug', action='store_true')
parser.add_argument('--cuda', action='store_true')
return parser.parse_args() |
def eval_tracking():
args = parse_args()
eval(os.path.join(args.work_dir, 'tracking_result.json'), 'val', args.work_dir, args.root) |
def test_numpy_subscripting():
run_cell('import numpy as np')
run_cell('x = np.zeros(5)')
run_cell('y = x[3] + 5')
run_cell('x[3] = 2')
run_cell('logging.info(y)')
assert_detected('y depends on stale x[3]') |
class TestFairseqDecoderBase(unittest.TestCase):
def setUpClass(cls):
if (cls is TestFairseqDecoderBase):
raise unittest.SkipTest('Skipping test case in base')
super().setUpClass()
def setUpDecoder(self, decoder):
self.assertTrue(isinstance(decoder, FairseqDecoder), msg='This class is only used for test FairseqDecoder')
self.decoder = decoder
def setUpInput(self, input=None):
self.forward_input = (get_dummy_encoder_output() if (input is None) else input)
def setUpPrevOutputTokens(self, tokens=None):
if (tokens is None):
self.encoder_input = get_dummy_input()
self.prev_output_tokens = self.encoder_input['prev_output_tokens']
else:
self.prev_output_tokens = tokens
def setUp(self):
self.decoder = None
self.forward_input = None
self.prev_output_tokens = None
def test_forward(self):
if ((self.decoder is not None) and (self.forward_input is not None) and (self.prev_output_tokens is not None)):
forward_output = self.decoder.forward(prev_output_tokens=self.prev_output_tokens, encoder_out=self.forward_input)
(succ, msg) = check_decoder_output(forward_output)
if (not succ):
self.assertTrue(succ, msg=msg)
self.forward_input = forward_output |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.