code stringlengths 101 5.91M |
|---|
def log_graph(graph, outdir, filename, identify_self=False, nodecolor='tag', fig_size=(4, 3), dpi=300, label_node_feat=True, edge_vmax=None, args=None, eps=1e-06):
if (len(graph.edges) == 0):
return
import matplotlib.pyplot as plt
plt.switch_backend('agg')
cmap = plt.get_cmap('tab20')
plt.switch_backend('agg')
fig = plt.figure(figsize=fig_size, dpi=dpi)
node_colors = []
edge_colors = [w for (u, v, w) in graph.edges.data('weight', default=1)]
vmax = 19
feat_labels = {}
for i in graph.nodes():
if (identify_self and ('self' in graph.nodes[i])):
node_colors.append(0)
elif ((nodecolor == 'tag') and ('tag' in graph.nodes[i])):
node_colors.append(graph.nodes[i]['tag'])
feat_labels[i] = graph.nodes[i]['tag']
elif ((nodecolor == 'feat') and ('feat' in graph.nodes[i])):
feat = graph.nodes[i]['feat'].detach().numpy()
feat_class = 0
for j in range(len(feat)):
if (feat[j] == 1):
feat_class = j
break
node_colors.append(feat_class)
feat_labels[i] = feat_class
else:
node_colors.append(1)
if (not label_node_feat):
feat_labels = None
plt.switch_backend('agg')
fig = plt.figure(figsize=fig_size, dpi=dpi)
if (graph.number_of_nodes() == 0):
raise Exception('empty graph')
if (graph.number_of_edges() == 0):
raise Exception('empty edge')
if (len(graph.nodes) > 20):
pos_layout = nx.kamada_kawai_layout(graph, weight=None)
else:
pos_layout = nx.kamada_kawai_layout(graph, weight=None)
weights = [d for (u, v, d) in graph.edges(data='weight', default=1)]
if (edge_vmax is None):
edge_vmax = statistics.median_high([d for (u, v, d) in graph.edges(data='weight', default=1)])
min_color = min([d for (u, v, d) in graph.edges(data='weight', default=1)])
edge_vmin = ((2 * min_color) - edge_vmax)
print(edge_vmin)
print(edge_vmax)
print(edge_colors)
nx.draw(graph, pos=pos_layout, with_labels=False, font_size=4, labels=feat_labels, node_color=node_colors, vmin=0, vmax=vmax, cmap=cmap, edge_color=edge_colors, edge_cmap=plt.get_cmap('Greys'), edge_vmin=(edge_vmin - eps), edge_vmax=edge_vmax, width=1.3, node_size=100, alpha=0.9, arrows=False)
fig.axes[0].xaxis.set_visible(False)
fig.canvas.draw()
save_path = os.path.join(outdir, filename)
os.makedirs(os.path.dirname(save_path), exist_ok=True)
nx.write_gpickle(graph, (os.path.splitext(save_path)[0] + '.gpickle'))
plt.savefig(save_path, format='pdf') |
class DQNAgent():
def __init__(self, env, seed=None, lr=0.001, training_steps=20000, batch_size=32, replay_size=10000, final_epsilon=0.05, exploration_steps=10000, gamma=0.99, hidden_sizes=[64, 64], target_update_freq=1000, verbose=True, **kwargs):
assert env.flat_actions
self.verbose = verbose
if self.verbose:
print(f'''
Running DQN with config:''')
pprint(locals())
self.seed = seed
if (self.seed is not None):
np.random.seed(self.seed)
self.env = env
self.num_actions = self.env.action_space.n
self.obs_dim = self.env.observation_space.shape
self.logger = SummaryWriter()
self.lr = lr
self.exploration_steps = exploration_steps
self.final_epsilon = final_epsilon
self.epsilon_schedule = np.linspace(1.0, self.final_epsilon, self.exploration_steps)
self.batch_size = batch_size
self.discount = gamma
self.training_steps = training_steps
self.steps_done = 0
self.device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
self.dqn = DQN(self.obs_dim, hidden_sizes, self.num_actions).to(self.device)
if self.verbose:
print(f'''
Using Neural Network running on device={self.device}:''')
print(self.dqn)
self.target_dqn = DQN(self.obs_dim, hidden_sizes, self.num_actions).to(self.device)
self.target_update_freq = target_update_freq
self.optimizer = optim.Adam(self.dqn.parameters(), lr=self.lr)
self.loss_fn = nn.SmoothL1Loss()
self.replay = ReplayMemory(replay_size, self.obs_dim, self.device)
def save(self, save_path):
self.dqn.save_DQN(save_path)
def load(self, load_path):
self.dqn.load_DQN(load_path)
def get_epsilon(self):
if (self.steps_done < self.exploration_steps):
return self.epsilon_schedule[self.steps_done]
return self.final_epsilon
def get_egreedy_action(self, o, epsilon):
if (random.random() > epsilon):
o = torch.from_numpy(o).float().to(self.device)
return self.dqn.get_action(o).cpu().item()
return random.randint(0, (self.num_actions - 1))
def optimize(self):
batch = self.replay.sample_batch(self.batch_size)
(s_batch, a_batch, next_s_batch, r_batch, d_batch) = batch
q_vals_raw = self.dqn(s_batch)
q_vals = q_vals_raw.gather(1, a_batch).squeeze()
with torch.no_grad():
target_q_val_raw = self.target_dqn(next_s_batch)
target_q_val = target_q_val_raw.max(1)[0]
target = (r_batch + ((self.discount * (1 - d_batch)) * target_q_val))
loss = self.loss_fn(q_vals, target)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if ((self.steps_done % self.target_update_freq) == 0):
self.target_dqn.load_state_dict(self.dqn.state_dict())
q_vals_max = q_vals_raw.max(1)[0]
mean_v = q_vals_max.mean().item()
return (loss.item(), mean_v)
def train(self):
if self.verbose:
print('\nStarting training')
num_episodes = 0
training_steps_remaining = self.training_steps
while (self.steps_done < self.training_steps):
ep_results = self.run_train_episode(training_steps_remaining)
(ep_return, ep_steps, goal) = ep_results
num_episodes += 1
training_steps_remaining -= ep_steps
self.logger.add_scalar('episode', num_episodes, self.steps_done)
self.logger.add_scalar('epsilon', self.get_epsilon(), self.steps_done)
self.logger.add_scalar('episode_return', ep_return, self.steps_done)
self.logger.add_scalar('episode_steps', ep_steps, self.steps_done)
self.logger.add_scalar('episode_goal_reached', int(goal), self.steps_done)
if (((num_episodes % 10) == 0) and self.verbose):
print(f'''
Episode {num_episodes}:''')
print(f' steps done = {self.steps_done} / {self.training_steps}')
print(f' return = {ep_return}')
print(f' goal = {goal}')
self.logger.close()
if self.verbose:
print('Training complete')
print(f'''
Episode {num_episodes}:''')
print(f' steps done = {self.steps_done} / {self.training_steps}')
print(f' return = {ep_return}')
print(f' goal = {goal}')
def run_train_episode(self, step_limit):
o = self.env.reset()
done = False
steps = 0
episode_return = 0
while ((not done) and (steps < step_limit)):
a = self.get_egreedy_action(o, self.get_epsilon())
(next_o, r, done, _) = self.env.step(a)
self.replay.store(o, a, next_o, r, done)
self.steps_done += 1
(loss, mean_v) = self.optimize()
self.logger.add_scalar('loss', loss, self.steps_done)
self.logger.add_scalar('mean_v', mean_v, self.steps_done)
o = next_o
episode_return += r
steps += 1
return (episode_return, steps, self.env.goal_reached())
def run_eval_episode(self, env=None, render=False, eval_epsilon=0.05, render_mode='readable'):
if (env is None):
env = self.env
o = env.reset()
done = False
steps = 0
episode_return = 0
line_break = ('=' * 60)
if render:
print(('\n' + line_break))
print(f'Running EVALUATION using epsilon = {eval_epsilon:.4f}')
print(line_break)
env.render(render_mode)
input('Initial state. Press enter to continue..')
while (not done):
a = self.get_egreedy_action(o, eval_epsilon)
(next_o, r, done, _) = env.step(a)
o = next_o
episode_return += r
steps += 1
if render:
print(('\n' + line_break))
print(f'Step {steps}')
print(line_break)
print(f'Action Performed = {env.action_space.get_action(a)}')
env.render(render_mode)
print(f'Reward = {r}')
print(f'Done = {done}')
input('Press enter to continue..')
if done:
print(('\n' + line_break))
print('EPISODE FINISHED')
print(line_break)
print(f'Goal reached = {env.goal_reached()}')
print(f'Total steps = {steps}')
print(f'Total reward = {episode_return}')
return (episode_return, steps, env.goal_reached()) |
class Add(Layer):
def __init__(self, input_size, bigdl_type='float'):
super(Add, self).__init__(None, bigdl_type, input_size)
def set_init_method(self, weight_init_method=None, bias_init_method=None):
callBigDlFunc(self.bigdl_type, 'setInitMethod', self.value, weight_init_method, bias_init_method)
return self |
class ConcolicGen(BaseGen):
def __init__(self, opset, seed=None, init_fp=False, **kwargs):
super().__init__(opset, seed, **kwargs)
if (seed is not None):
set_z3_state(seed)
self.insert_init_ph_node(self.make_random_concrete_placeholder(self.random_rank(), dtype=(DType.float32 if init_fp else None)))
def try_forward_insert_at(self, node: AbsOpBase, input_vars: List[str], force_positive_dim=False) -> bool:
solver = z3.Solver()
itensors = [self.ir.vars[vname] for vname in input_vars]
constraints = node.checked_requires(itensors)
if (SMT_LOG.getEffectiveLevel() <= logging.DEBUG):
SMT_LOG.debug(f'---> Trying to solve: {node} ~ {constraints}')
if force_positive_dim:
for aten in itensors:
if aten.gt_zero():
return False
otensors = node.checked_type_transfer(itensors)
if force_positive_dim:
for aten in otensors:
for c in aten.gt_zero():
constraints.append(c)
else:
for aten in otensors:
for c in aten.sym_gt_conc_ge_zero():
constraints.append(c)
check_res = self.base_check_sat(solver, *constraints)
if (check_res != z3.sat):
return False
node = concretize_op_autoinf(node, solver.model())
otensors = node.checked_type_transfer(itensors)
if (MGEN_LOG.getEffectiveLevel() <= logging.DEBUG):
MGEN_LOG.debug(f'>> Forward insert: {node}')
MGEN_LOG.debug(f' inputs: {itensors}')
MGEN_LOG.debug(f' outputs: {otensors}')
for (i, ten) in enumerate(otensors):
if (not all(self.tensor_type_constraints(ten))):
MGEN_LOG.debug(f'{i}-th output type constraint failed: {ten}')
return False
node.bind_input_like(itensors)
node.bind_output_like(otensors)
self.forward_insert_node(node, input_vars)
return True
def try_occupy_placeholder(self, node: AbsOpBase, phvars: List[str], force_positive_dim=False) -> bool:
if (MGEN_LOG.getEffectiveLevel() <= logging.DEBUG):
MGEN_LOG.debug(f'---> Trying to occupy placeholder: {phvars} for node {node}')
solver = z3.Solver()
otensors = [self.ir.vars[name] for name in phvars]
phs_as_op_inputs: List[Placeholder] = []
constraints = []
for (rank, dtype) in node.deduct_inp_ranks_and_dtype(otensors):
ph = self.make_symbolic_placeholder((rank if (rank != (- 1)) else self.random_rank()), dtype=dtype)
phs_as_op_inputs.append(ph)
constraints.extend((ph.ttype.gt_zero() if force_positive_dim else ph.ttype.sym_gt_conc_ge_zero()))
itensors = [p.ttype for p in phs_as_op_inputs]
constraints.extend(node.checked_requires(itensors))
inferred_otensors = node.checked_type_transfer(itensors)
for (i, shape) in enumerate(inferred_otensors):
constraints.extend(shape.eq(otensors[i]))
constraints.extend((shape.gt_zero() if force_positive_dim else shape.sym_gt_conc_ge_zero()))
check_res = self.base_check_sat(solver, *constraints)
if (check_res != z3.sat):
return False
if (MGEN_LOG.getEffectiveLevel() <= logging.DEBUG):
MGEN_LOG.debug(f'>> Backward insert: {node}')
MGEN_LOG.debug(f' inputs: {phs_as_op_inputs}')
model = solver.model()
itensors = []
for (i, ph) in enumerate(phs_as_op_inputs):
phs_as_op_inputs[i] = concretize_op_autoinf(ph, model)
itensors.append(phs_as_op_inputs[i].ttype)
for (i, ten) in enumerate(itensors):
if (not all(self.tensor_type_constraints(ten))):
MGEN_LOG.debug(f'{i}-th input type constraint failed: {ten}')
return False
node = concretize_op_autoinf(node, model)
node.bind_input_like(itensors)
node.bind_output_like(otensors)
input_vars = []
for ph in phs_as_op_inputs:
inst = self.forward_insert_node(ph, [])
input_vars.append(inst.retval())
self.backward_insert_node(node, input_vars, phvars)
return True
def assume(self, c: bool):
ConstraintCheck.true(c, 'Assumption failed')
def make_concrete(self) -> GraphIR:
return self.ir |
class Server():
def __init__(self, client_models):
self.client_models = client_models
self.models = [client_model.get_params() for client_model in client_models]
self.num_groups = len(client_models)
self.selected_clients = []
self.updates = []
self.updates_group_infos = []
def select_clients(self, my_round, possible_clients, num_clients=20):
num_clients = min(num_clients, len(possible_clients))
np.random.seed(my_round)
self.selected_clients = np.random.choice(possible_clients, num_clients, replace=False)
return [(c.num_train_samples, c.num_test_samples) for c in self.selected_clients]
def train_model(self, num_epochs=1, batch_size=10, minibatch=None, clients=None, force_group_dict=None):
if (clients is None):
clients = self.selected_clients
sys_metrics = {c.id: {BYTES_WRITTEN_KEY: 0, BYTES_READ_KEY: 0, LOCAL_COMPUTATIONS_KEY: 0} for c in clients}
client_test_metrics = self.test_model_group_info(clients, 'train')
for c in clients:
group_index = client_test_metrics[c.id]['group_index']
if (force_group_dict is not None):
group_index = force_group_dict[c.id]
else:
pass
c.model.set_params(self.models[group_index])
(comp, num_samples, update) = c.train(num_epochs, batch_size, minibatch)
sys_metrics[c.id][BYTES_READ_KEY] += c.model.size
sys_metrics[c.id][BYTES_WRITTEN_KEY] += c.model.size
sys_metrics[c.id][LOCAL_COMPUTATIONS_KEY] = comp
self.updates.append((num_samples, update))
self.updates_group_infos.append(group_index)
return sys_metrics
def update_model(self):
updates_by_group = [[] for _ in range(self.num_groups)]
for (i, g_i) in enumerate(self.updates_group_infos):
updates_by_group[g_i].append(self.updates[i])
for (g_i, updates) in enumerate(updates_by_group):
if (len(updates) == 0):
continue
total_weight = 0.0
base = ([0] * len(updates[0][1]))
for (client_samples, client_model) in updates:
total_weight += client_samples
for (i, v) in enumerate(client_model):
base[i] += (client_samples * v.astype(np.float64))
averaged_soln = [(v / total_weight) for v in base]
self.models[g_i] = averaged_soln
self.updates = []
self.updates_group_infos = []
def test_model(self, clients_to_test, set_to_use='test'):
if (clients_to_test is None):
clients_to_test = self.selected_clients
metrics = self.test_model_group_info(clients_to_test, set_to_use)
return metrics
def test_model_group_info(self, clients_to_test, set_to_use='test'):
metrics = {}
if (set_to_use == 'train'):
self.train_group_index = {}
for client in clients_to_test:
tmp_c_metrics = []
for g_i in range(self.num_groups):
client.model.set_params(self.models[g_i])
c_metrics = client.test(set_to_use)
tmp_c_metrics.append(c_metrics)
best_g_i = np.argmin([el['loss'] for el in tmp_c_metrics])
c_metrics = tmp_c_metrics[best_g_i]
c_metrics['group_index'] = best_g_i
metrics[client.id] = c_metrics
self.train_group_index[client.id] = best_g_i
else:
for client in clients_to_test:
tmp_c_metrics = []
g_i = self.train_group_index[client.id]
client.model.set_params(self.models[g_i])
c_metrics = client.test(set_to_use)
c_metrics['group_index'] = g_i
metrics[client.id] = c_metrics
self.train_group_index = {}
return metrics
def get_clients_info(self, clients):
if (clients is None):
clients = self.selected_clients
ids = [c.id for c in clients]
groups = {c.id: c.group for c in clients}
num_samples = {c.id: c.num_samples for c in clients}
return (ids, groups, num_samples)
def save_model(self, path):
self.client_models[0].set_params(self.model[0])
model_sess = self.client_models[0].sess
return self.client_models[0].saver.save(model_sess, path)
def close_model(self):
for client_model in self.client_models:
client_model.close() |
def train(args, model, device, train_loader, optimizer, epoch, logger):
model.train()
for (batch_idx, (data, target)) in enumerate(train_loader):
start = time()
(data, target) = (data.to(device), target.to(device))
model_fn = (lambda : model(data))
loss_fn = (lambda pred: F.cross_entropy(pred, target))
if isinstance(optimizer, CurveBall):
(loss, predictions) = optimizer.step(model_fn, loss_fn)
else:
optimizer.zero_grad()
predictions = model_fn()
loss = loss_fn(predictions)
loss.backward()
optimizer.step()
pred = predictions.max(1, keepdim=True)[1]
accuracy = pred.eq(target.view_as(pred)).double().mean()
stats = {'train.loss': loss.item(), 'train.accuracy': accuracy.item()}
if logger:
logger.update_average(stats)
if (logger.avg_count['train.loss'] > 3):
logger.update_average({'train.time': (time() - start)})
logger.print(line_prefix=('ep %i ' % epoch), prefix='train')
else:
print(stats) |
class MobileNetV2Config(PretrainedConfig):
model_type = 'mobilenet_v2'
def __init__(self, num_channels=3, image_size=224, depth_multiplier=1.0, depth_divisible_by=8, min_depth=8, expand_ratio=6, output_stride=32, first_layer_is_expansion=True, finegrained_output=True, hidden_act='relu6', tf_padding=True, classifier_dropout_prob=0.8, initializer_range=0.02, layer_norm_eps=0.001, semantic_loss_ignore_index=255, **kwargs):
super().__init__(**kwargs)
if (depth_multiplier <= 0):
raise ValueError('depth_multiplier must be greater than zero.')
self.num_channels = num_channels
self.image_size = image_size
self.depth_multiplier = depth_multiplier
self.depth_divisible_by = depth_divisible_by
self.min_depth = min_depth
self.expand_ratio = expand_ratio
self.output_stride = output_stride
self.first_layer_is_expansion = first_layer_is_expansion
self.finegrained_output = finegrained_output
self.hidden_act = hidden_act
self.tf_padding = tf_padding
self.classifier_dropout_prob = classifier_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.semantic_loss_ignore_index = semantic_loss_ignore_index |
class AttModel(att_model.AttModel):
def make_model(self):
mask_params = {'mask_type': self.config.prune_type, 'mask_init_value': self.config.prune_supermask_init}
self.embed = nn.Sequential(MaskedEmbedding(self.vocab_size, self.input_encoding_size, **mask_params), nn.ReLU(), nn.Dropout(self.drop_prob_lm))
self.fc_embed = nn.Sequential(MaskedLinear(self.fc_feat_size, self.rnn_size, **mask_params), nn.ReLU(), nn.Dropout(self.drop_prob_lm))
self.att_embed = nn.Sequential(*((((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ()) + (MaskedLinear(self.att_feat_size, self.rnn_size, **mask_params), nn.ReLU(), nn.Dropout(self.drop_prob_lm))) + ((nn.BatchNorm1d(self.rnn_size),) if (self.use_bn == 2) else ())))
self.logit_layers = self.config.get('logit_layers', 1)
if (self.logit_layers == 1):
self.logit = MaskedLinear(self.rnn_size, self.vocab_size, **mask_params)
else:
self.logit = [[MaskedLinear(self.rnn_size, self.rnn_size, **mask_params), nn.ReLU(), nn.Dropout(self.drop_prob_lm)] for _ in range((self.config.logit_layers - 1))]
self.logit = nn.Sequential(*(reduce((lambda x, y: (x + y)), self.logit) + [MaskedLinear(self.rnn_size, self.vocab_size, **mask_params)]))
self.ctx2att = MaskedLinear(self.rnn_size, self.att_hid_size, **mask_params) |
def splitEdgePunct(input):
input = EdgePunctLeft.sub('\\1\\2 \\3', input)
input = EdgePunctRight.sub('\\1 \\2\\3', input)
return input |
class SilverArrow(BaseBow):
def __init__(self):
super().__init__('silver arrow', weight=1, damage=D.Dice.from_str('d6'), material=M.Silver, hit=0) |
class Auxiliary_Rate(object):
def __init__(self, sentence_objs):
self.sentence_objs = sentence_objs
def handle(self):
(tot_num_advs, tot_num_words) = (0, 0)
for so in self.sentence_objs:
tot_num_advs += so.pos_tag_counter.get_pos_tag_count(AUXILIARY)
tot_num_words += so.num_words()
return (tot_num_advs / tot_num_words) |
def run_and_parse_first_match(run_lambda, command, regex):
(rc, out, _) = run_lambda(command)
if (rc != 0):
return None
match = re.search(regex, out)
if (match is None):
return None
return match.group(1) |
def min_max_scaler(X: torch.Tensor, ft_min: float, ft_max: float) -> torch.Tensor:
assert (ft_min < ft_max), 'The minimum value of the feature should be less than the maximum value.'
(X_min, X_max) = (X.min().item(), X.max().item())
X_range = (X_max - X_min)
scale_ = ((ft_max - ft_min) / X_range)
min_ = (ft_min - (X_min * scale_))
X = ((X * scale_) + min_)
return X |
class Case():
def __init__(self, url, raw_data):
self.url = url
self.raw_data = raw_data
self.data = None
def extract_data(self):
return self.to_standard(self.raw_data)
def get_data(self):
if (self.data is None):
self.data = self.extract_data()
return self.data |
def printHelp():
print('{} [OPTIONS] inputJson outputImg'.format(os.path.basename(sys.argv[0])))
print('')
print(' Reads labels as polygons in JSON format and converts them to instance images,')
print(' where each pixel has an ID that represents the ground truth class and the')
print(' individual instance of that class.')
print('')
print(' The pixel values encode both, class and the individual instance.')
print(' The integer part of a division by 1000 of each ID provides the class ID,')
print(' as described in labels.py. The remainder is the instance ID. If a certain')
print(' annotation describes multiple instances, then the pixels have the regular')
print(' ID of that class.')
print('')
print(' Example:')
print(' Let\'s say your labels.py assigns the ID 26 to the class "car".')
print(' Then, the individual cars in an image get the IDs 26000, 26001, 26002, ... .')
print(' A group of cars, where our annotators could not identify the individual')
print(' instances anymore, is assigned to the ID 26.')
print('')
print(' Note that not all classes distinguish instances (see labels.py for a full list).')
print(' The classes without instance annotations are always directly encoded with')
print(' their regular ID, e.g. 11 for "building".')
print('')
print('Options:')
print(' -h Print this help')
print(' -t Use the "trainIDs" instead of the regular mapping. See "labels.py" for details.') |
def test_nl_head():
head = NLHead(in_channels=8, channels=4, num_classes=19)
assert (len(head.convs) == 2)
assert hasattr(head, 'nl_block')
inputs = [torch.randn(1, 8, 23, 23)]
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 23, 23)) |
class Refiner(nn.Module):
def __init__(self, args):
super().__init__()
self.rlayer1 = torch.nn.Sequential(torch.nn.Conv3d(1, 32, kernel_size=4, padding=(2, 2, 2)), torch.nn.BatchNorm3d(32), torch.nn.LeakyReLU(0.2), torch.nn.MaxPool3d(kernel_size=2))
self.rlayer2 = torch.nn.Sequential(torch.nn.Conv3d(32, 64, kernel_size=4, padding=(2, 2, 2)), torch.nn.BatchNorm3d(64), torch.nn.LeakyReLU(0.2), torch.nn.MaxPool3d(kernel_size=2))
self.rlayer3 = torch.nn.Sequential(torch.nn.Conv3d(64, 128, kernel_size=4, padding=(2, 2, 2)), torch.nn.BatchNorm3d(128), torch.nn.LeakyReLU(0.2), torch.nn.MaxPool3d(kernel_size=2))
self.rlayer4 = torch.nn.Sequential(torch.nn.Conv3d(128, 256, kernel_size=4, padding=(2, 2, 2)), torch.nn.BatchNorm3d(256), torch.nn.LeakyReLU(0.2), torch.nn.MaxPool3d(kernel_size=2))
self.rlayer5 = torch.nn.Sequential(torch.nn.Linear((8192 * 2), 2048), torch.nn.ReLU(True))
self.rlayer6 = torch.nn.Sequential(torch.nn.Linear(2048, (8192 * 2)), torch.nn.ReLU(True))
self.rlayer7 = torch.nn.Sequential(torch.nn.ConvTranspose3d(256, 128, kernel_size=4, stride=2, padding=(1, 1, 1)), torch.nn.BatchNorm3d(128), torch.nn.ReLU())
self.rlayer8 = torch.nn.Sequential(torch.nn.ConvTranspose3d(128, 64, kernel_size=4, stride=2, padding=(1, 1, 1)), torch.nn.BatchNorm3d(64), torch.nn.ReLU())
self.rlayer9 = torch.nn.Sequential(torch.nn.ConvTranspose3d(64, 32, kernel_size=4, stride=2, padding=(1, 1, 1)), torch.nn.BatchNorm3d(32), torch.nn.ReLU())
self.rlayer10 = torch.nn.Sequential(torch.nn.ConvTranspose3d(32, 1, kernel_size=4, stride=2, padding=(1, 1, 1)), torch.nn.Sigmoid())
def forward(self, coarse_volumes):
volumes_32_l = self.rlayer1(coarse_volumes)
volumes_16_l = self.rlayer2(volumes_32_l)
volumes_8_l = self.rlayer3(volumes_16_l)
volumes_4_l = self.rlayer4(volumes_8_l)
flatten_features = self.rlayer5(volumes_4_l.view((- 1), (8192 * 2)))
flatten_features = self.rlayer6(flatten_features)
volumes_4_r = (volumes_4_l + flatten_features.view((- 1), 256, 4, 4, 4))
volumes_8_r = (volumes_8_l + self.rlayer7(volumes_4_r))
volumes_16_r = (volumes_16_l + self.rlayer8(volumes_8_r))
volumes_32_r = (volumes_32_l + self.rlayer9(volumes_16_r))
volumes_64_r = ((coarse_volumes + self.rlayer10(volumes_32_r)) * 0.5)
return ((volumes_64_r * 4) - 2) |
class LTRTrainer(BaseTrainer):
def __init__(self, actor, loaders, optimizer, settings, lr_scheduler=None, use_amp=False):
super().__init__(actor, loaders, optimizer, settings, lr_scheduler)
self._set_default_settings()
self.stats = OrderedDict({loader.name: None for loader in self.loaders})
self.wandb_writer = None
if (settings.local_rank in [(- 1), 0]):
tensorboard_writer_dir = os.path.join(self.settings.env.tensorboard_dir, self.settings.project_path)
if (not os.path.exists(tensorboard_writer_dir)):
os.makedirs(tensorboard_writer_dir)
self.tensorboard_writer = TensorboardWriter(tensorboard_writer_dir, [l.name for l in loaders])
if settings.use_wandb:
world_size = get_world_size()
cur_train_samples = (self.loaders[0].dataset.samples_per_epoch * max(0, (self.epoch - 1)))
interval = (world_size * settings.batchsize)
self.wandb_writer = WandbWriter(settings.project_path[6:], {}, tensorboard_writer_dir, cur_train_samples, interval)
self.move_data_to_gpu = getattr(settings, 'move_data_to_gpu', True)
self.settings = settings
self.use_amp = use_amp
if use_amp:
self.scaler = GradScaler()
def _set_default_settings(self):
default = {'print_interval': 10, 'print_stats': None, 'description': ''}
for (param, default_value) in default.items():
if (getattr(self.settings, param, None) is None):
setattr(self.settings, param, default_value)
def cycle_dataset(self, loader):
self.actor.train(loader.training)
torch.set_grad_enabled(loader.training)
self._init_timing()
for (i, data) in enumerate(loader, 1):
self.data_read_done_time = time.time()
if self.move_data_to_gpu:
data = data.to(self.device)
self.data_to_gpu_time = time.time()
data['epoch'] = self.epoch
data['settings'] = self.settings
if (not self.use_amp):
(loss, stats) = self.actor(data)
else:
with autocast():
(loss, stats) = self.actor(data)
if (loss == 'Nan'):
print('Meet nan here:{}'.format(loss))
continue
if loader.training:
self.optimizer.zero_grad()
if (not self.use_amp):
loss.backward()
if (self.settings.grad_clip_norm > 0):
torch.nn.utils.clip_grad_norm_(self.actor.net.parameters(), self.settings.grad_clip_norm)
self.optimizer.step()
else:
self.scaler.scale(loss).backward()
if (self.settings.grad_clip_norm > 0):
self.scaler.unscale_(self.optimizer)
torch.nn.utils.clip_grad_norm_(self.actor.net.parameters(), self.settings.grad_clip_norm)
self.scaler.step(self.optimizer)
self.scaler.update()
batch_size = data['template_images'].shape[loader.stack_dim]
self._update_stats(stats, batch_size, loader)
self._print_stats(i, loader, batch_size)
if ((self.wandb_writer is not None) and ((i % self.settings.print_interval) == 0)):
if (self.settings.local_rank in [(- 1), 0]):
self.wandb_writer.write_log(self.stats, self.epoch)
epoch_time = (self.prev_time - self.start_time)
print(('Epoch Time: ' + str(datetime.timedelta(seconds=epoch_time))))
print(('Avg Data Time: %.5f' % ((self.avg_date_time / self.num_frames) * batch_size)))
print(('Avg GPU Trans Time: %.5f' % ((self.avg_gpu_trans_time / self.num_frames) * batch_size)))
print(('Avg Forward Time: %.5f' % ((self.avg_forward_time / self.num_frames) * batch_size)))
def train_epoch(self):
for loader in self.loaders:
if ((self.epoch % loader.epoch_interval) == 0):
if isinstance(loader.sampler, DistributedSampler):
loader.sampler.set_epoch(self.epoch)
self.cycle_dataset(loader)
self._stats_new_epoch()
if (self.settings.local_rank in [(- 1), 0]):
self._write_tensorboard()
def _init_timing(self):
self.num_frames = 0
self.start_time = time.time()
self.prev_time = self.start_time
self.avg_date_time = 0
self.avg_gpu_trans_time = 0
self.avg_forward_time = 0
def _update_stats(self, new_stats: OrderedDict, batch_size, loader):
if ((loader.name not in self.stats.keys()) or (self.stats[loader.name] is None)):
self.stats[loader.name] = OrderedDict({name: AverageMeter() for name in new_stats.keys()})
if loader.training:
lr_list = self.lr_scheduler.get_last_lr()
for (i, lr) in enumerate(lr_list):
var_name = 'LearningRate/group{}'.format(i)
if (var_name not in self.stats[loader.name].keys()):
self.stats[loader.name][var_name] = StatValue()
self.stats[loader.name][var_name].update(lr)
for (name, val) in new_stats.items():
if (name not in self.stats[loader.name].keys()):
self.stats[loader.name][name] = AverageMeter()
self.stats[loader.name][name].update(val, batch_size)
def _print_stats(self, i, loader, batch_size):
self.num_frames += batch_size
current_time = time.time()
batch_fps = (batch_size / (current_time - self.prev_time))
average_fps = (self.num_frames / (current_time - self.start_time))
prev_frame_time_backup = self.prev_time
self.prev_time = current_time
self.avg_date_time += (self.data_read_done_time - prev_frame_time_backup)
self.avg_gpu_trans_time += (self.data_to_gpu_time - self.data_read_done_time)
self.avg_forward_time += (current_time - self.data_to_gpu_time)
if (((i % self.settings.print_interval) == 0) or (i == loader.__len__())):
print_str = ('[%s: %d, %d / %d] ' % (loader.name, self.epoch, i, loader.__len__()))
print_str += ('FPS: %.1f (%.1f) , ' % (average_fps, batch_fps))
print_str += ('DataTime: %.3f (%.3f) , ' % (((self.avg_date_time / self.num_frames) * batch_size), ((self.avg_gpu_trans_time / self.num_frames) * batch_size)))
print_str += ('ForwardTime: %.3f , ' % ((self.avg_forward_time / self.num_frames) * batch_size))
print_str += ('TotalTime: %.3f , ' % (((current_time - self.start_time) / self.num_frames) * batch_size))
for (name, val) in self.stats[loader.name].items():
if ((self.settings.print_stats is None) or (name in self.settings.print_stats)):
if hasattr(val, 'avg'):
print_str += ('%s: %.5f , ' % (name, val.avg))
print(print_str[:(- 5)])
log_str = (print_str[:(- 5)] + '\n')
with open(self.settings.log_file, 'a') as f:
f.write(log_str)
def _stats_new_epoch(self):
for loader in self.loaders:
if loader.training:
try:
lr_list = self.lr_scheduler.get_last_lr()
except:
lr_list = self.lr_scheduler._get_lr(self.epoch)
for (i, lr) in enumerate(lr_list):
var_name = 'LearningRate/group{}'.format(i)
if (var_name not in self.stats[loader.name].keys()):
self.stats[loader.name][var_name] = StatValue()
self.stats[loader.name][var_name].update(lr)
for loader_stats in self.stats.values():
if (loader_stats is None):
continue
for stat_value in loader_stats.values():
if hasattr(stat_value, 'new_epoch'):
stat_value.new_epoch()
def _write_tensorboard(self):
if (self.epoch == 1):
self.tensorboard_writer.write_info(self.settings.script_name, self.settings.description)
self.tensorboard_writer.write_epoch(self.stats, self.epoch) |
def beit_forward_features(self, x):
resolution = x.shape[2:]
x = self.patch_embed(x)
x = torch.cat((self.cls_token.expand(x.shape[0], (- 1), (- 1)), x), dim=1)
if (self.pos_embed is not None):
x = (x + self.pos_embed)
x = self.pos_drop(x)
rel_pos_bias = (self.rel_pos_bias() if (self.rel_pos_bias is not None) else None)
for blk in self.blocks:
if (self.grad_checkpointing and (not torch.jit.is_scripting())):
x = checkpoint(blk, x, shared_rel_pos_bias=rel_pos_bias)
else:
x = blk(x, resolution, shared_rel_pos_bias=rel_pos_bias)
x = self.norm(x)
return x |
class QueryNERProcessor(object):
def get_train_examples(self, data_dir):
data = read_mrc_ner_examples(os.path.join(data_dir, 'mrc-ner.train'))
return data
def get_dev_examples(self, data_dir):
return read_mrc_ner_examples(os.path.join(data_dir, 'mrc-ner.dev'))
def get_test_examples(self, data_dir):
return read_mrc_ner_examples(os.path.join(data_dir, 'mrc-ner.test')) |
def deterministic_cdf_oracle(observed_x, gamma):
w = torch.zeros(observed_x.shape[0], dtype=torch.float)
for i in range(observed_x.shape[0]):
w[i] = deterministic_one(observed_x[i], gamma)
return w |
def contLoss(y_true, y_pred):
loss = betweenLoss(y_true, y_pred)
loss += withinLoss(y_true, y_pred)
return loss |
class _ExpensivePotentials():
def __init__(self):
self._mcmillan17 = None
self._cautun20 = None
self._irrgang13i = None
self._irrgang13ii = None
self._irrgang13iii = None
self._dehnenbinney98i = None
self._dehnenbinney98ii = None
self._dehnenbinney98iii = None
self._dehnenbinney98iv = None
self.__globals__ = _setup_globals()
return None
def __dir__(self):
return ['McMillan17', 'Irrgang13I', 'Irrgang13II', 'Irrgang13III', 'Cautun20', 'DehnenBinney98I', 'DehnenBinney98II', 'DehnenBinney98III', 'DehnenBinney98IV']
def McMillan17(self):
if (not self._mcmillan17):
from galpy.potential.McMillan17 import McMillan17 as _McMillan17
self._mcmillan17 = _McMillan17
return self._mcmillan17
def Cautun20(self):
if (not self._cautun20):
from galpy.potential.Cautun20 import Cautun20 as _Cautun20
self._cautun20 = _Cautun20
return self._cautun20
def Irrgang13I(self):
if (not self._irrgang13i):
from galpy.potential.Irrgang13 import Irrgang13I as _Irrgang13I
self._irrgang13i = _Irrgang13I
return self._irrgang13i
def Irrgang13II(self):
if (not self._irrgang13ii):
from galpy.potential.Irrgang13 import Irrgang13II as _Irrgang13II
self._irrgang13ii = _Irrgang13II
return self._irrgang13ii
def Irrgang13III(self):
if (not self._irrgang13iii):
from galpy.potential.Irrgang13 import Irrgang13III as _Irrgang13III
self._irrgang13iii = _Irrgang13III
return self._irrgang13iii
def DehnenBinney98I(self):
from galpy.potential.DehnenBinney98 import define_dehnenbinney98_models
if (not self._dehnenbinney98i):
self._dehnenbinney98i = define_dehnenbinney98_models(model=1)
return self._dehnenbinney98i
def DehnenBinney98II(self):
from galpy.potential.DehnenBinney98 import define_dehnenbinney98_models
if (not self._dehnenbinney98ii):
self._dehnenbinney98ii = define_dehnenbinney98_models(model=2)
return self._dehnenbinney98ii
def DehnenBinney98III(self):
from galpy.potential.DehnenBinney98 import define_dehnenbinney98_models
if (not self._dehnenbinney98iii):
self._dehnenbinney98iii = define_dehnenbinney98_models(model=3)
return self._dehnenbinney98iii
def DehnenBinney98IV(self):
from galpy.potential.DehnenBinney98 import define_dehnenbinney98_models
if (not self._dehnenbinney98iv):
self._dehnenbinney98iv = define_dehnenbinney98_models(model=4)
return self._dehnenbinney98iv
def __getattr__(self, name):
try:
return self.__globals__[name]
except:
raise AttributeError(f"'module' object has no attribute '{name}'") |
class ConvEL():
def __init__(self, base_url='.', wiki_version='wiki_2019', ed_model=None, user_config=None, threshold=0, ner_model='bert_conv-td'):
self.threshold = threshold
self.wiki_version = wiki_version
self.base_url = base_url
self.file_pretrained = str((Path(base_url) / ner_model))
self.bert_md = BERT_MD(self.file_pretrained)
if (not ed_model):
ed_model = self._default_ed_model()
self.response_handler = ResponseHandler(self.base_url, self.wiki_version, model=ed_model)
self.eemd = EEMD(s2e_pe_model=str((Path(base_url) / 's2e_ast_onto')))
self.pemd = PEMD()
self.preprocess = pe_data.PreProcess()
self.postprocess = pe_data.PostProcess()
self.conv_hist_for_pe = []
self.ment2ent = {}
def _default_ed_model(self):
from REL.entity_disambiguation import EntityDisambiguation
return EntityDisambiguation(self.base_url, self.wiki_version, user_config={'mode': 'eval', 'model_path': f'{self.base_url}/{self.wiki_version}/generated/model'})
def _error_check(self, conv):
assert (type(conv) == list)
for turn in conv:
assert (type(turn) == dict)
assert (set(turn.keys()) == {'speaker', 'utterance'})
assert (turn['speaker'] in ['USER', 'SYSTEM']), f"""Speaker should be either "USER" or "SYSTEM", but got {turn['speaker']}"""
def _el(self, utt):
md_results = self.bert_md.md(utt)
spans = [[r[0], r[1]] for r in md_results]
el_results = self.ed(utt, spans)
self.conv_hist_for_pe[(- 1)]['mentions'] = [r[2] for r in el_results]
self.ment2ent.update({r[2]: r[3] for r in el_results})
return [r[:4] for r in el_results]
def _pe(self, utt):
ret = []
pem_results = self.pemd.pem_detector(utt)
pem2result = {r[2]: r for r in pem_results}
outputs = []
for (_, _, pem) in pem_results:
self.conv_hist_for_pe[(- 1)]['pems'] = [pem]
token_with_info = self.preprocess.get_tokens_with_info(self.conv_hist_for_pe)
input_data = self.preprocess.get_input_of_pe_linking(token_with_info)
assert (len(input_data) == 1), f'Current implementation can handle only one target PEM at a time'
input_data = input_data[0]
scores = self.eemd.get_scores(input_data)
outputs += self.postprocess.get_results(input_data, self.conv_hist_for_pe, self.threshold, scores)
self.conv_hist_for_pe[(- 1)]['pems'] = []
for r in outputs:
pem = r['personal_entity_mention']
pem_result = pem2result[pem]
eem = r['mention']
ent = self.ment2ent[eem]
ret.append([pem_result[0], pem_result[1], pem_result[2], ent])
return ret
def annotate(self, conv):
self._error_check(conv)
ret = []
self.conv_hist_for_pe = []
self.ment2ent = {}
for turn in conv:
utt = turn['utterance']
assert (turn['speaker'] in ['USER', 'SYSTEM']), f"""Speaker should be either "USER" or "SYSTEM", but got {turn['speaker']}"""
ret.append({'speaker': turn['speaker'], 'utterance': utt})
self.conv_hist_for_pe.append({})
self.conv_hist_for_pe[(- 1)]['speaker'] = turn['speaker']
self.conv_hist_for_pe[(- 1)]['utterance'] = utt
if (turn['speaker'] == 'USER'):
el_results = self._el(utt)
pe_results = self._pe(utt)
ret[(- 1)]['annotations'] = (el_results + pe_results)
return ret
def ed(self, text, spans):
response = self.response_handler.generate_response(text=text, spans=spans)
return [list(ent) for ent in response] |
def resnet_main(flags_obj, model_function, input_function, dataset_name, shape=None):
print('RESNET MAIN')
model_helpers.apply_clean(flags.FLAGS)
if flags_obj.tf_gpu_thread_mode:
override_flags_and_set_envars_for_gpu_thread_pool(flags_obj)
session_config = tf.ConfigProto(allow_soft_placement=True)
run_config = tf.estimator.RunConfig(session_config=session_config, save_checkpoints_secs=((60 * 60) * 24))
if (flags_obj.pretrained_model_checkpoint_path is not None):
warm_start_settings = tf.estimator.WarmStartSettings(flags_obj.pretrained_model_checkpoint_path, vars_to_warm_start='^(?!.*dense)')
else:
warm_start_settings = None
classifier = tf.estimator.Estimator(model_fn=model_function, model_dir=flags_obj.model_dir, config=run_config, warm_start_from=warm_start_settings, params={'resnet_size': int(flags_obj.resnet_size), 'data_format': flags_obj.data_format, 'batch_size': flags_obj.batch_size, 'resnet_version': int(flags_obj.resnet_version), 'loss_scale': flags_core.get_loss_scale(flags_obj), 'dtype': flags_core.get_tf_dtype(flags_obj), 'fine_tune': flags_obj.fine_tune})
run_params = {'batch_size': flags_obj.batch_size, 'dtype': flags_core.get_tf_dtype(flags_obj), 'resnet_size': flags_obj.resnet_size, 'resnet_version': flags_obj.resnet_version, 'synthetic_data': flags_obj.use_synthetic_data, 'train_epochs': flags_obj.train_epochs}
def input_fn_eval():
return input_function(is_training=False, data_dir=flags_obj.data_dir, batch_size=distribution_utils.per_device_batch_size(flags_obj.batch_size, flags_core.get_num_gpus(flags_obj)), num_epochs=1, dtype=flags_core.get_tf_dtype(flags_obj))
(schedule, n_loops) = ([0], 1)
if (flags_obj.export_dir is not None):
export_dtype = flags_core.get_tf_dtype(flags_obj)
if flags_obj.image_bytes_as_serving_input:
input_receiver_fn = functools.partial(image_bytes_serving_input_fn, shape, dtype=export_dtype)
else:
input_receiver_fn = export.build_tensor_serving_input_receiver_fn(shape, batch_size=flags_obj.batch_size, dtype=export_dtype)
classifier.export_savedmodel(flags_obj.export_dir, input_receiver_fn, strip_default_attrs=True) |
class DataSet(aicnn.DataSet):
def __init__(self, X, y, nb_classes, n_channels=3, scaling=True, test_size=0.2, random_state=0):
self.n_channels = n_channels
super().__init__(X, y, nb_classes, scaling=scaling, test_size=test_size, random_state=random_state)
def add_channels(self):
n_channels = self.n_channels
if (n_channels == 1):
super().add_channels()
else:
X = self.X
if (X.ndim < 4):
(N, img_rows, img_cols) = X.shape
if (K.image_dim_ordering() == 'th'):
X = X.reshape(X.shape[0], 1, img_rows, img_cols)
X = np.concatenate([X, X, X], axis=1)
input_shape = (n_channels, img_rows, img_cols)
else:
X = X.reshape(X.shape[0], img_rows, img_cols, 1)
X = np.concatenate([X, X, X], axis=3)
input_shape = (img_rows, img_cols, n_channels)
elif (K.image_dim_ordering() == 'th'):
(N, Ch, img_rows, img_cols) = X.shape
if (Ch == 1):
X = np.concatenate([X, X, X], axis=1)
input_shape = (n_channels, img_rows, img_cols)
else:
(N, img_rows, img_cols, Ch) = X.shape
if (Ch == 1):
X = np.concatenate([X, X, X], axis=3)
input_shape = (img_rows, img_cols, n_channels)
X = preprocess_input(X)
self.X = X
self.input_shape = input_shape |
def prepare_model(input_model, output_model):
batch_size = 1
model = torchvision.models.resnet50(pretrained=True)
x = torch.randn(batch_size, 3, 224, 224, requires_grad=True)
torch.onnx.export(model, x, output_model, export_params=True, opset_version=14, do_constant_folding=True, input_names=['input'], output_names=['output'], dynamic_axes={'input': {0: 'batch_size'}, 'output': {0: 'batch_size'}})
assert os.path.exists(output_model), f"Export failed! {output_model} doesn't exist!" |
class Parameters(ParamaterNotValid):
from .system import includeSystem
from .experiment import includeExperiment
from .derived import computeDerived
def __init__(self, includeDerived=True, update=None):
self.includeSystem()
self.includeExperiment()
if (update is not None):
for (parameter, value) in update.items():
setattr(self, parameter, value)
self.vadilityCheckBefore()
if includeDerived:
self.computeDerived()
self.vadilityCheckAfter()
'\n : Transform all available parameters to string table format for printing\n '
def __str__(self):
parNames = dir(self)
parTable = []
for pn in parNames:
if (pn.startswith('__') or pn.startswith('np')):
continue
parTable.append([pn, getattr(self, pn)])
return tabulate(parTable)
'\n : Check if values make sense BEFORE calculating derived parameters and raise error if not\n '
def vadilityCheckBefore(self):
if ((self.inputNumTargetNeurons is None) and (self.inputShareTargetNeurons is None)):
raise ParamaterNotValid('inputNumTargetNeurons and inputShareTargetNeurons are both set to None. Specify one of them.')
if ((self.inputNumTargetNeurons is not None) and (self.inputShareTargetNeurons is not None)):
raise ParameterNotValid('Either inputNumTargetNeurons or inputShareTargetNeurons can be set, not both. Set one of them to None to avoid ambiguity.')
if ((self.reservoirConnProb is None) and (self.reservoirConnPerNeuron is None)):
raise ParamaterNotValid('reservoirConnProb and reservoirConnPerNeuron are both set to None. Specify one of them.')
if ((self.reservoirConnProb is not None) and (self.reservoirConnPerNeuron is not None)):
raise ParameterNotValid('Either reservoirConnProb or reservoirConnPerNeuron can be set, not both. Set one of them to None to avoid ambiguity.')
'\n : Check if values make sense AFTER calculating derived parameters and raise error if not\n '
def vadilityCheckAfter(self):
if (self.reservoirConnPerNeuron > (self.reservoirExSize + self.reservoirInSize)):
raise ParamaterNotValid('Number of connections per neuron must be larger than number of neurons in the network.')
if (self.inputNumTargetNeurons > self.reservoirSize):
raise ParamaterNotValid('Input size is too large, cannot be larger than network size.')
if (int((self.reservoirExSize / self.neuronsPerCore)) > (self.numChips * self.numCoresPerChip)):
raise ParameterNotValid('Number of cores exceeded, increase number of neurons per core.') |
class Discriminator(nn.Module):
def __init__(self, args, gan_type='GAN'):
super(Discriminator, self).__init__()
in_channels = 3
out_channels = 64
depth = 7
bn = True
act = nn.LeakyReLU(negative_slope=0.2, inplace=True)
m_features = [common.BasicBlock(args.n_colors, out_channels, 3, bn=bn, act=act)]
for i in range(depth):
in_channels = out_channels
if ((i % 2) == 1):
stride = 1
out_channels *= 2
else:
stride = 2
m_features.append(common.BasicBlock(in_channels, out_channels, 3, stride=stride, bn=bn, act=act))
self.features = nn.Sequential(*m_features)
patch_size = (args.patch_size // (2 ** ((depth + 1) // 2)))
m_classifier = [nn.Linear((out_channels * (patch_size ** 2)), 1024), act, nn.Linear(1024, 1)]
self.classifier = nn.Sequential(*m_classifier)
def forward(self, x):
features = self.features(x)
output = self.classifier(features.view(features.size(0), (- 1)))
return output |
def parse_tuning_log(line, url_dict):
result = line.split(';')
(OS, Platform, Framework, Version, Model, Strategy, Tune_time, Tuning_trials, URL, __) = result
file_name = f'{Framework}-{Model}-tune.log'
download_url = url_dict.get(f'{Framework}_{Model}')
download_url = f'{download_url}{file_name}'
return download_url |
def pysptk_featurize(audiofile):
labels = list()
features = list()
(fs, x) = wavfile.read(audiofile)
f0_swipe = pysptk.swipe(x.astype(np.float64), fs=fs, hopsize=80, min=60, max=200, otype='f0')
features = (features + stats(f0_swipe))
labels = stats_labels('f0_swipe', labels)
f0_rapt = pysptk.rapt(x.astype(np.float32), fs=fs, hopsize=80, min=60, max=200, otype='f0')
features = (features + stats(f0_rapt))
labels = stats_labels('f0_rapt', labels)
mgc = pysptk.mgcep(xw, 20, 0.0, 0.0)
features = (features + stats(mgc))
labels = stats_labels('mel-spectrum envelope', labels)
return (features, labels) |
def test_intree_extensions_package_dir(monkeypatch, tmpdir):
monkeypatch.syspath_prepend(MAIN_DIR)
from pybind11.setup_helpers import intree_extensions
monkeypatch.chdir(tmpdir)
root = (tmpdir / 'src')
root.ensure_dir()
subdir = (root / 'dir')
subdir.ensure_dir()
src = (subdir / 'ext.cpp')
src.ensure()
(ext,) = intree_extensions([src.relto(tmpdir)], package_dir={'': 'src'})
assert (ext.name == 'dir.ext')
(ext,) = intree_extensions([src.relto(tmpdir)], package_dir={'foo': 'src'})
assert (ext.name == 'foo.dir.ext')
subdir.ensure('__init__.py')
(ext,) = intree_extensions([src.relto(tmpdir)], package_dir={'': 'src'})
assert (ext.name == 'dir.ext')
(ext,) = intree_extensions([src.relto(tmpdir)], package_dir={'foo': 'src'})
assert (ext.name == 'foo.dir.ext') |
def test_sequence():
cstats = ConstructorStats.get(m.Sequence)
s = m.Sequence(5)
assert (cstats.values() == ['of size', '5'])
assert ('Sequence' in repr(s))
assert (len(s) == 5)
assert ((s[0] == 0) and (s[3] == 0))
assert (12.34 not in s)
(s[0], s[3]) = (12.34, 56.78)
assert (12.34 in s)
assert (s[0] == approx(12.34, rel=1e-05))
assert (s[3] == approx(56.78, rel=1e-05))
rev = reversed(s)
assert (cstats.values() == ['of size', '5'])
rev2 = s[::(- 1)]
assert (cstats.values() == ['of size', '5'])
it = iter(m.Sequence(0))
for _ in range(3):
with pytest.raises(StopIteration):
next(it)
assert (cstats.values() == ['of size', '0'])
expected = [0, 56.78, 0, 0, 12.34]
assert (rev == approx(expected, rel=1e-05))
assert (rev2 == approx(expected, rel=1e-05))
assert (rev == rev2)
rev[0::2] = m.Sequence([2.0, 2.0, 2.0])
assert (cstats.values() == ['of size', '3', 'from std::vector'])
assert (rev == approx([2, 56.78, 2, 0, 2], rel=1e-05))
assert (cstats.alive() == 4)
del it
assert (cstats.alive() == 3)
del s
assert (cstats.alive() == 2)
del rev
assert (cstats.alive() == 1)
del rev2
assert (cstats.alive() == 0)
assert (cstats.values() == [])
assert (cstats.default_constructions == 0)
assert (cstats.copy_constructions == 0)
assert (cstats.move_constructions >= 1)
assert (cstats.copy_assignments == 0)
assert (cstats.move_assignments == 0) |
def test_funcall_kwarg_3():
run_cell('\n def f(x):\n return 2 * x + 8\n ')
run_cell('x = 7')
run_cell('y = f(x=x)')
run_cell('x = 8')
run_cell('logging.info(y)')
assert_detected('`y` depends on stale `x`') |
def is_protobuf_available():
if (importlib.util.find_spec('google') is None):
return False
return (importlib.util.find_spec('google.protobuf') is not None) |
def descnext(desc):
global next_description
if ((not default_verbosity) or (tqdm is None)):
return
next_description = desc |
def tolerance_infsolendgame_set(tol):
from phcpy.phcpy2c3 import py2c_set_value_of_continuation_parameter as set
return set(34, tol) |
def test_Gskew(white_noise):
a = FeatureSpace(featureList=['Gskew'])
a = a.calculateFeature(white_noise)
assert ((a.result(method='array') >= (- 0.2)) and (a.result(method='array') <= 0.2)) |
def build_single_model_ui(models):
notice_markdown = '\n<div class="title">\n<div style="\n color: #fff;\n">Large Language Model <p style="\n font-size: 0.8rem;\n">Future Gen Intel Xeon (codenamed Granite Rapids) with Intel AMX</p></div>\n</div>\n'
learn_more_markdown = '\n<div class="footer"><p>Powered by <a href=" style="text-decoration: underline;" target="_blank">Intel Extension for Transformers</a> and <a href=" style="text-decoration: underline;" target="_blank">Intel Extension for PyTorch</a></p>\n</div>\n<div class="acknowledgments">\n<p></p></div>\n'
state1 = gr.State()
state2 = gr.State()
notice = gr.Markdown(notice_markdown, elem_id='notice_markdown')
with gr.Row(elem_id='model_selector_row', visible=False):
model_selector = gr.Dropdown(choices=models, value=(models[0] if (len(models) > 0) else ''), interactive=True, show_label=False).style(container=False)
with gr.Row():
chatbot1 = grChatbot(elem_id='chatbot1', visible=False).style(height=500)
chatbot2 = grChatbot(elem_id='chatbot2', visible=False).style(height=500)
with gr.Row(elem_id='text-box-style'):
with gr.Column(scale=20):
textbox = gr.Textbox(show_label=False, placeholder='Enter text and press ENTER', visible=False).style(container=False)
with gr.Column(scale=1, min_width=50):
send_btn = gr.Button(value='Send', visible=False, elem_id='btn-send-style')
with gr.Accordion('Parameters', open=False, visible=False, elem_id='btn-style') as parameter_row:
temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.001, step=0.1, interactive=True, label='Temperature', visible=False)
max_output_tokens = gr.Slider(minimum=0, maximum=1024, value=512, step=1, interactive=True, label='Max output tokens')
topk = gr.Slider(minimum=1, maximum=10, value=1, step=1, interactive=True, label='TOP K')
with gr.Row(visible=False, elem_id='btn-style') as button_row:
regenerate_btn = gr.Button(value=' Regenerate', interactive=False, elem_id='btn-list-style')
clear_btn = gr.Button(value=' Clear history', interactive=False, elem_id='btn-list-style')
choice_chatbot1 = gr.Textbox(label='hidden', value=baseline_url, visible=False)
choice_chatbot2 = gr.Textbox(label='hidden', value=optimized_url, visible=False)
gr.Markdown(learn_more_markdown)
btn_list = [regenerate_btn, clear_btn]
regenerate_btn.click(regenerate, state1, ([state1, chatbot1, textbox] + btn_list)).then( [state1, model_selector, temperature, max_output_tokens, topk, choice_chatbot1], ([state1, chatbot1] + btn_list))
regenerate_btn.click(regenerate, state2, ([state2, chatbot2, textbox] + btn_list)).then( [state2, model_selector, temperature, max_output_tokens, topk, choice_chatbot2], ([state2, chatbot2] + btn_list))
clear_btn.click(clear_history, None, ([state1, chatbot1, textbox] + btn_list))
clear_btn.click(clear_history, None, ([state2, chatbot2, textbox] + btn_list))
model_selector.change(clear_history, None, ([state1, chatbot1, textbox] + btn_list))
model_selector.change(clear_history, None, ([state2, chatbot2, textbox] + btn_list))
textbox.submit(add_text, [state1, textbox], ([state1, chatbot1, textbox] + btn_list)).then( [state1, model_selector, temperature, max_output_tokens, topk, choice_chatbot1], ([state1, chatbot1] + btn_list))
textbox.submit(add_text, [state2, textbox], ([state2, chatbot2, textbox] + btn_list)).then( [state2, model_selector, temperature, max_output_tokens, topk, choice_chatbot2], ([state2, chatbot2] + btn_list))
send_btn.click(add_text, [state1, textbox], ([state1, chatbot1, textbox] + btn_list)).then( [state1, model_selector, temperature, max_output_tokens, topk, choice_chatbot1], ([state1, chatbot1] + btn_list))
send_btn.click(add_text, [state2, textbox], ([state2, chatbot2, textbox] + btn_list)).then( [state2, model_selector, temperature, max_output_tokens, topk, choice_chatbot2], ([state2, chatbot2] + btn_list))
return (state1, state2, model_selector, chatbot1, chatbot2, textbox, send_btn, button_row, parameter_row) |
class TaxiEnv(discrete.DiscreteEnv):
metadata = {'render.modes': ['human', 'ansi']}
def __init__(self):
self.desc = np.asarray(MAP, dtype='c')
self.locs = locs = [(0, 0), (0, 4), (4, 0), (4, 3)]
num_states = 500
num_rows = 5
num_columns = 5
max_row = (num_rows - 1)
max_col = (num_columns - 1)
initial_state_distrib = np.zeros(num_states)
num_actions = 6
P = {state: {action: [] for action in range(num_actions)} for state in range(num_states)}
for row in range(num_rows):
for col in range(num_columns):
for pass_idx in range((len(locs) + 1)):
for dest_idx in range(len(locs)):
state = self.encode(row, col, pass_idx, dest_idx)
if ((pass_idx < 4) and (pass_idx != dest_idx)):
initial_state_distrib[state] += 1
for action in range(num_actions):
(new_row, new_col, new_pass_idx) = (row, col, pass_idx)
reward = (- 1)
done = False
taxi_loc = (row, col)
if (action == 0):
new_row = min((row + 1), max_row)
elif (action == 1):
new_row = max((row - 1), 0)
if ((action == 2) and (self.desc[((1 + row), ((2 * col) + 2))] == b':')):
new_col = min((col + 1), max_col)
elif ((action == 3) and (self.desc[((1 + row), (2 * col))] == b':')):
new_col = max((col - 1), 0)
elif (action == 4):
if ((pass_idx < 4) and (taxi_loc == locs[pass_idx])):
new_pass_idx = 4
else:
reward = (- 10)
elif (action == 5):
if ((taxi_loc == locs[dest_idx]) and (pass_idx == 4)):
new_pass_idx = dest_idx
done = True
reward = 20
elif ((taxi_loc in locs) and (pass_idx == 4)):
new_pass_idx = locs.index(taxi_loc)
else:
reward = (- 10)
new_state = self.encode(new_row, new_col, new_pass_idx, dest_idx)
P[state][action].append((1.0, new_state, reward, done))
initial_state_distrib /= initial_state_distrib.sum()
discrete.DiscreteEnv.__init__(self, num_states, num_actions, P, initial_state_distrib)
def encode(self, taxi_row, taxi_col, pass_loc, dest_idx):
i = taxi_row
i *= 5
i += taxi_col
i *= 5
i += pass_loc
i *= 4
i += dest_idx
return i
def decode(self, i):
out = []
out.append((i % 4))
i = (i // 4)
out.append((i % 5))
i = (i // 5)
out.append((i % 5))
i = (i // 5)
out.append(i)
assert (0 <= i < 5)
return reversed(out)
def render(self, mode='human'):
outfile = (StringIO() if (mode == 'ansi') else sys.stdout)
out = self.desc.copy().tolist()
out = [[c.decode('utf-8') for c in line] for line in out]
(taxi_row, taxi_col, pass_idx, dest_idx) = self.decode(self.s)
def ul(x):
return ('_' if (x == ' ') else x)
if (pass_idx < 4):
out[(1 + taxi_row)][((2 * taxi_col) + 1)] = utils.colorize(out[(1 + taxi_row)][((2 * taxi_col) + 1)], 'yellow', highlight=True)
(pi, pj) = self.locs[pass_idx]
out[(1 + pi)][((2 * pj) + 1)] = utils.colorize(out[(1 + pi)][((2 * pj) + 1)], 'blue', bold=True)
else:
out[(1 + taxi_row)][((2 * taxi_col) + 1)] = utils.colorize(ul(out[(1 + taxi_row)][((2 * taxi_col) + 1)]), 'green', highlight=True)
(di, dj) = self.locs[dest_idx]
out[(1 + di)][((2 * dj) + 1)] = utils.colorize(out[(1 + di)][((2 * dj) + 1)], 'magenta')
outfile.write(('\n'.join([''.join(row) for row in out]) + '\n'))
if (self.lastaction is not None):
outfile.write(' ({})\n'.format(['South', 'North', 'East', 'West', 'Pickup', 'Dropoff'][self.lastaction]))
else:
outfile.write('\n')
if (mode != 'human'):
with closing(outfile):
return outfile.getvalue() |
()
def sample_run():
exp = {'name': 'test_exp', 'sources': [], 'doc': '', 'base_dir': '/tmp'}
host = {'hostname': 'test_host', 'cpu_count': 1, 'python_version': '3.4'}
config = {'config': 'True', 'foo': 'bar', 'answer': 42}
command = 'run'
meta_info = {'comment': 'test run'}
return {'_id': 'FEDCBA', 'ex_info': exp, 'command': command, 'host_info': host, 'start_time': T1, 'config': config, 'meta_info': meta_info} |
class EpisodeLoggerWrapper(AbstractTrainerWrapper):
def __init__(self, logging_period=10, validation_episodes=100, validation_period=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self._log_t = 0
self.logging_period = logging_period
self.validation_period = validation_period
self.validation_episodes = validation_episodes
self.metric_collector = None
self.validation_metric_context = None
self.metric_writer = None
self._global_t = 0
self._episodes = 0
self._data = []
self._losses = []
self._last_validation = 0
self._extended_context = dict()
def run(self, process, **kwargs):
if (self.metric_writer is None):
self.metric_collector = MetricContext()
self.validation_metric_context = MetricContext()
self.metric_writer = MetricWriter(session_name=self.unwrapped.name)
if hasattr(self.metric_writer, 'visdom'):
self._extended_context['visdom'] = self.metric_writer.visdom
def _late_process(*args, context=None, **kwargs):
context.update(self._extended_context)
data = process(*args, context=context, **kwargs)
if (self._log_t >= self.logging_period):
self.metric_collector.summary(self._global_t)
self.metric_collector.collect(self.metric_writer, self._global_t, 'train')
self._log_t = 0
return data
return super().run(_late_process, **kwargs)
def _process_episode_end(self, episode_end, mode):
if (episode_end is None):
return 0
eps = 0
collector = (self.metric_collector if (mode == 'train') else self.validation_metric_context)
if (len(episode_end) == 3):
(eps, episode_lengths, rewards) = episode_end
for (l, rw) in zip(episode_lengths, rewards):
collector.add_scalar('episode_length', l)
collector.add_scalar('reward', rw)
else:
eps = 1
(episode_length, reward) = episode_end
collector.add_scalar('episode_length', episode_length)
collector.add_scalar('reward', reward)
if (mode == 'train'):
self._episodes += eps
self._log_t += eps
self.metric_collector.add_last_value_scalar('episodes', self._episodes)
return eps
def _process_stats(self, stats, mode):
if (stats is None):
return
collector = (self.metric_collector if (mode == 'train') else self.validation_metric_context)
if isinstance(stats, dict):
if ('loss' in stats):
collector.add_scalar('loss', stats.get('loss'))
if ('win' in stats):
collector.add_scalar('win_rate', float(stats.get('win')))
collector.add_cummulative('win_count', int(stats.get('win')))
else:
stats.flush(collector)
def run_validation(self, **kwargs):
tval = 0
while (tval < self.validation_episodes):
(_, epend, stats) = self.trainer.process(mode='validation', **kwargs)
tval += self._process_episode_end(epend, 'validation')
self._process_stats(stats, 'validation')
print('Validation finished')
self.validation_metric_context.summary(self._global_t)
self.validation_metric_context.collect(self.metric_writer, self._global_t, 'validation')
self._last_validation = self._episodes
def process(self, mode='train', **kwargs):
(tdiff, episode_end, stats) = self.trainer.process(mode=mode, **kwargs)
self._global_t += (tdiff if (mode == 'train') else None)
epend = self._process_episode_end(episode_end, mode)
self._process_stats(stats, mode)
if ((self.validation_period is not None) and ((self._episodes - self._last_validation) > self.validation_period) and (epend > 0)):
self.run_validation(**kwargs)
return (tdiff, episode_end, stats)
def __repr__(self):
return ('<EpisodeLogger %s>' % repr(self.trainer))
def save(self, path):
super().save(path)
self.metric_writer.save(path)
def log(self, stats):
(episode_length, reward) = tuple(map((lambda *x: np.mean(x)), *self._data))
loss = (np.mean(self._losses) if (len(self._losses) > 0) else float('nan'))
self._data = []
self._losses = []
report = 'steps: {}, episodes: {}, reward: {:0.5f}, episode length: {}, loss: {:0.5f}'.format(self._global_t, self._episodes, reward, episode_length, loss)
metrics_row = self.metric_writer.record(self._global_t).scalar('reward', reward).scalar('episode_length', episode_length)
if ((stats is not None) and ('epsilon' in stats)):
report += ', epsilon:{:0.3f}'.format(stats.get('epsilon'))
metrics_row = metrics_row.scalar('epsilon', stats.get('epsilon'))
metrics_row.flush()
if (not math.isnan(loss)):
metrics_row = metrics_row.scalar('loss', loss)
print(report) |
def _image_tensor_input_placeholder():
input_tensor = tf.placeholder(dtype=tf.uint8, shape=(None, None, None, 3), name='image_tensor')
return (input_tensor, input_tensor) |
def parse():
args = parser.parse_args()
parameters = params.Params(model=args.model, data=args.data, num_epochs=args.num_epochs, num_hops=args.num_hops, diffusion_threshold=args.diffusion_threshold, learning_rate=args.learning_rate, update_fn=args.optimizer, num_nodes=run.metadata[args.data]['num_nodes'], num_features=run.metadata[args.data]['num_features'], num_classes=run.metadata[args.data]['num_classes'], dcnn_nonlinearity=args.dcnn_nonlinearity, dense_nonlinearity=args.dense_nonlinearity, out_nonlinearity=args.out_nonlinearity, loss_fn=args.loss_fn, stop_window_size=args.stop_window_size, stop_early=args.stop_early, batch_size=args.batch_size, num_dcnn_layers=args.num_dcnn_layers, num_dense_layers=args.num_dense_layers, dense_layer_size=args.dense_layer_size, print_train_accuracy=args.print_train_accuracy, print_valid_accuracy=args.print_valid_accuracy, momentum=args.momentum, explore=args.explore, check_sparse=args.check_sparse)
return parameters |
def load_model_and_checkpoint_files(folder, folds=None, mixed_precision=None, checkpoint_name='model_best'):
if isinstance(folds, str):
folds = [join(folder, 'all')]
assert isdir(folds[0]), ('no output folder for fold %s found' % folds)
elif isinstance(folds, (list, tuple)):
if ((len(folds) == 1) and (folds[0] == 'all')):
folds = [join(folder, 'all')]
else:
folds = [join(folder, ('fold_%d' % i)) for i in folds]
assert all([isdir(i) for i in folds]), 'list of folds specified but not all output folders are present'
elif isinstance(folds, int):
folds = [join(folder, ('fold_%d' % folds))]
assert all([isdir(i) for i in folds]), ('output folder missing for fold %d' % folds)
elif (folds is None):
print("folds is None so we will automatically look for output folders (not using 'all'!)")
folds = subfolders(folder, prefix='fold')
print('found the following folds: ', folds)
else:
raise ValueError('Unknown value for folds. Type: %s. Expected: list of int, int, str or None', str(type(folds)))
trainer = restore_model(join(folds[0], ('%s.model.pkl' % checkpoint_name)), fp16=mixed_precision)
trainer.output_folder = folder
trainer.output_folder_base = folder
trainer.update_fold(0)
trainer.initialize(False)
all_best_model_files = [join(i, ('%s.model' % checkpoint_name)) for i in folds]
print('using the following model files: ', all_best_model_files)
all_params = [torch.load(i, map_location=torch.device('cpu')) for i in all_best_model_files]
return (trainer, all_params) |
def dictionary_walk(dictionary):
for (key, value) in dictionary.items():
if isinstance(value, dict):
(yield from dictionary_walk(value))
else:
(yield (key, value)) |
def dynp(model, data, params):
ttf = []
for i in data.keys():
signal = StandardScaler().fit_transform(data[i].values)
algo = rpt.Dynp(model=model, params=params, jump=1).fit(signal)
my_bkps = algo.predict(n_bkps=1)
ttf.append((my_bkps[0] - 160))
return pd.DataFrame({((model + ' ') + str(*params.values())): ttf}).T |
class BaseDataset(object):
def get_imagedata_info(self, data):
(pids, cams) = ([], [])
for (_, pid, camid) in data:
pids += [pid]
cams += [camid]
pids = set(pids)
cams = set(cams)
num_pids = len(pids)
num_cams = len(cams)
num_imgs = len(data)
return (num_pids, num_imgs, num_cams)
def get_videodata_info(self, data, return_tracklet_stats=False):
(pids, cams, tracklet_stats) = ([], [], [])
is_mask = ((len(data[0]) == 4) or len(data[0]))
if (len(data[0]) == 3):
for (img_paths, pid, camid) in data:
pids += [pid]
cams += [camid]
tracklet_stats += [len(img_paths)]
elif (len(data[0]) == 4):
for (img_paths, pid, camid, _) in data:
pids += [pid]
cams += [camid]
tracklet_stats += [len(img_paths)]
elif (len(data[0]) == 5):
for (img_paths, pid, new_pid, new_ambi, camid) in data:
pids += [new_pid]
cams += [camid]
tracklet_stats += [len(img_paths)]
elif (len(data[0]) == 6):
for (img_paths, pid, new_pid, new_ambi, camid, _) in data:
pids += [new_pid]
cams += [camid]
tracklet_stats += [len(img_paths)]
pids = set(pids)
cams = set(cams)
num_pids = len(pids)
num_cams = len(cams)
num_tracklets = len(data)
if return_tracklet_stats:
return (num_pids, num_tracklets, num_cams, tracklet_stats)
return (num_pids, num_tracklets, num_cams)
def print_dataset_statistics(self):
raise NotImplementedError |
def pad(pad_type, padding):
pad_type = pad_type.lower()
if (padding == 0):
return None
if (pad_type == 'reflect'):
layer = nn.ReflectionPad2d(padding)
elif (pad_type == 'replicate'):
layer = nn.ReplicationPad2d(padding)
else:
raise NotImplementedError('padding layer [{:s}] is not implemented'.format(pad_type))
return layer |
def get_best_trajectory_within_cluster(clustered_seqs, tracklets_info, time, norm_distance):
ends = []
covers = []
visual_similarity = []
for tracklet_seq in clustered_seqs:
total_gap = 0
distance = []
for (i, t_id) in enumerate(tracklet_seq):
total_gap += (tracklets_info[t_id]['end'] - tracklets_info[t_id]['start'])
distance += [(1 - (norm((tracklets_info[t_id]['avg_reid_score'] - tracklets_info[t]['avg_reid_score'])) / norm_distance)) for t in tracklet_seq if (t > t_id)]
covers.append(total_gap)
if distance:
visual_similarity.append(max(distance))
else:
visual_similarity.append(0)
ends.append(tracklets_info[tracklet_seq[(- 1)]]['end'])
covers = (np.array(covers) / time)
visual_similarity = np.array(visual_similarity)
score = ((0.9 * covers) + (0.1 * visual_similarity))
if (np.isnan(score).any() == True):
nan_indices = np.where((np.isnan(score) == True))[0]
for i in nan_indices:
score[i] = 0
max_score = np.max(score)
best_ind = np.where((score == max_score))[0]
if (len(best_ind) > 1):
ind = best_ind[np.argmax([ends[i] for i in best_ind])]
else:
ind = best_ind[0]
sequence = clustered_seqs[ind]
clustered_seqs.pop(ind)
return (sequence, clustered_seqs) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-data_train', type=str, default='')
parser.add_argument('-data_dev', required=True)
parser.add_argument('-data_test', type=str, default='')
parser.add_argument('-vocab', required=True)
parser.add_argument('-epoch', type=int, default=10000)
parser.add_argument('-batch_size', type=int, default=64)
parser.add_argument('-d_model', type=int, default=512)
parser.add_argument('-n_heads', type=int, default=8)
parser.add_argument('-n_layers', type=int, default=6)
parser.add_argument('-n_warmup_steps', type=int, default=4000)
parser.add_argument('-dropout', type=float, default=0.1)
parser.add_argument('-log', default=None)
parser.add_argument('-save_model', default=None)
parser.add_argument('-save_mode', type=str, choices=['all', 'best'], default='best')
parser.add_argument('-no_cuda', action='store_true')
parser.add_argument('-label_smoothing', action='store_true')
parser.add_argument('-num_workers', type=int, default=1)
parser.add_argument('-cnn_name', type=str, default='resnet101')
parser.add_argument('-cnn_pretrained_model', type=str, default='')
parser.add_argument('-joint_enc_func', type=str, default='element_multiplication')
parser.add_argument('-lr', type=float, default=0.01)
parser.add_argument('-crop_size', type=int, default=224)
parser.add_argument('-max_seq_len', type=int, default=64)
parser.add_argument('-attribute_len', type=int, default=5)
parser.add_argument('-pretrained_model', type=str, default='')
parser.add_argument('-rank_alpha', type=float, default=1.0)
parser.add_argument('-patience', type=int, default=7)
parser.add_argument('-bleu_valid_every_n', type=int, default=5)
parser.add_argument('-data_dev_combined', required=True)
parser.add_argument('-beam_size', type=int, default=5)
parser.add_argument('-seed', type=int, default=0)
parser.add_argument('-attribute_vocab_size', type=int, default=1000)
parser.add_argument('-add_attribute', action='store_true')
args = parser.parse_args()
args.cuda = (not args.no_cuda)
args.d_word_vec = args.d_model
args.load_weights = False
if args.pretrained_model:
args.load_weights = True
np.random.seed(0)
torch.manual_seed(0)
args.device = torch.device(('cuda' if args.cuda else 'cpu'))
log_path = args.log.split('/')
log_path = '/'.join(log_path[:(- 1)])
if (not os.path.exists(log_path)):
os.makedirs(log_path)
model_path = args.save_model.split('/')
model_path = '/'.join(model_path[:(- 1)])
if (not os.path.exists(model_path)):
os.makedirs(model_path)
print(args)
if args.data_train:
print('start training')
transform = transforms.Compose([transforms.RandomCrop(args.crop_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
transform_dev = transforms.Compose([transforms.CenterCrop(args.crop_size), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
vocab = Vocabulary()
vocab.load(args.vocab)
args.vocab_size = len(vocab)
data_loader_training = get_loader(args.data_train, vocab, transform, args.batch_size, shuffle=True, num_workers=args.num_workers, max_seq_len=args.max_seq_len, attribute_len=args.attribute_len)
data_loader_dev = get_loader(args.data_dev, vocab, transform_dev, args.batch_size, shuffle=False, num_workers=args.num_workers, max_seq_len=args.max_seq_len, attribute_len=args.attribute_len)
data_loader_bleu = get_loader_test(args.data_dev_combined, vocab, transform_dev, 1, shuffle=False, attribute_len=args.attribute_len)
list_of_refs_dev = load_ori_token_data_new(args.data_dev_combined)
model = get_model(args, load_weights=False)
print(count_parameters(model))
optimizer = get_std_opt(model, args)
train(model, data_loader_training, data_loader_dev, optimizer, args, vocab, list_of_refs_dev, data_loader_bleu)
if args.data_test:
print('start testing')
args.pretrained_model = args.save_model
test(args) |
class AsymWeightLoss(nn.Module):
def __init__(self, underestimation_penalty=1, L1=False):
super().__init__()
invalidInputError((underestimation_penalty > 0), 'underestimation_penalty should be larger than 0')
self.L1 = L1
self.underestimation_penalty = underestimation_penalty
def forward(self, y_hat, y):
if self.L1:
loss = (F.relu((y_hat - y)) + (F.relu((y - y_hat)) * self.underestimation_penalty))
else:
loss = (torch.pow(F.relu((y_hat - y)), 2) + (torch.pow(F.relu((y - y_hat)), 2) * self.underestimation_penalty))
return torch.mean(loss) |
def get_game_envs():
_game_envs = defaultdict(set)
for env in gym.envs.registry.all():
try:
env_type = env.entry_point.split(':')[0].split('.')[(- 1)]
_game_envs[env_type].add(env.id)
except:
pass
return _game_envs |
class Res16UNetTemporalIN34(Res16UNetTemporal34):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
BLOCK = BasicBlockIN |
class GraphGRUCell(HybridBlock):
def __init__(self, aggregator_args, aggregation_type='all', typ='rnn', prefix=None, params=None):
super(GraphGRUCell, self).__init__(prefix=prefix, params=params)
assert (len(aggregator_args) == 2)
self._aggregator_args = copy.deepcopy(aggregator_args)
self._act = get_activation(cfg.AGGREGATOR.ACTIVATION)
self._state_dim = aggregator_args[1][0]
self._typ = 'gru'
self._map_dim = (3 * self._state_dim)
self._aggregation_type = aggregation_type
with self.name_scope():
self._aggregator_args[1][0] = self._map_dim
self.aggregator_x2h = parse_aggregator_from_desc(aggregator_desc=[self._aggregator_args[0], self._aggregator_args[1], 'agg_x2h_', 'identity'])
self.aggregator_h2h = parse_aggregator_from_desc(aggregator_desc=[self._aggregator_args[0], self._aggregator_args[1], 'agg_h2h_', 'identity'])
if (self._aggregation_type != 'concat'):
self.direct_h2h = nn.Dense(units=self._map_dim, flatten=False, prefix='direct_h2h_')
def summary(self):
logging.info(('Graph %s: State Dim=%d, Aggregation Type=%s, Aggregator=%s' % (self._typ.upper(), self._state_dim, self._aggregation_type, str(self._aggregator_args))))
def hybrid_forward(self, F, data, states, end_points, indptr, edge_data=None):
if (states is None):
prev_h = F.broadcast_axis(F.slice_axis(F.zeros_like(data), begin=0, end=1, axis=2), axis=2, size=self._state_dim)
else:
prev_h = states[0]
(x2h_data, x2h_mid_info) = shortcut_aggregator_forward(self.aggregator_x2h, data, data, end_points, indptr, edge_data)
if (self._aggregation_type != 'concat'):
(h2h_data, h2h_mid_info) = shortcut_aggregator_forward(self.aggregator_h2h, data, prev_h, end_points, indptr, edge_data)
h2h_data = (h2h_data + self.direct_h2h(h2h_data))
else:
(h2h_data, h2h_mid_info) = shortcut_aggregator_forward(self.aggregator_h2h, F.concat(data, prev_h, dim=(- 1)), prev_h, end_points, indptr, edge_data)
mid_info = (x2h_mid_info + h2h_mid_info)
x2h_data_l = F.split(x2h_data, num_outputs=3, axis=2)
h2h_data_l = F.split(h2h_data, num_outputs=3, axis=2)
U_t = F.Activation((x2h_data_l[0] + h2h_data_l[0]), act_type='sigmoid')
R_t = F.Activation((x2h_data_l[1] + h2h_data_l[1]), act_type='sigmoid')
H_prime_t = self._act((x2h_data_l[2] + (R_t * h2h_data_l[2])))
H_t = (((1 - U_t) * H_prime_t) + (U_t * prev_h))
return ([H_t], mid_info) |
_module()
class NRTRModalityTransform(BaseModule):
def __init__(self, input_channels=3, init_cfg=[dict(type='Kaiming', layer='Conv2d'), dict(type='Uniform', layer='BatchNorm2d')]):
super().__init__(init_cfg=init_cfg)
self.conv_1 = nn.Conv2d(in_channels=input_channels, out_channels=32, kernel_size=3, stride=2, padding=1)
self.relu_1 = nn.ReLU(True)
self.bn_1 = nn.BatchNorm2d(32)
self.conv_2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=2, padding=1)
self.relu_2 = nn.ReLU(True)
self.bn_2 = nn.BatchNorm2d(64)
self.linear = nn.Linear(512, 512)
def forward(self, x):
x = self.conv_1(x)
x = self.relu_1(x)
x = self.bn_1(x)
x = self.conv_2(x)
x = self.relu_2(x)
x = self.bn_2(x)
(n, c, h, w) = x.size()
x = x.permute(0, 3, 2, 1).contiguous().view(n, w, (h * c))
x = self.linear(x)
x = x.permute(0, 2, 1).contiguous().view(n, (- 1), 1, w)
return x |
_module()
class LoveDADataset(CustomDataset):
CLASSES = ('background', 'building', 'road', 'water', 'barren', 'forest', 'agricultural')
PALETTE = [[255, 255, 255], [255, 0, 0], [255, 255, 0], [0, 0, 255], [159, 129, 183], [0, 255, 0], [255, 195, 128]]
def __init__(self, **kwargs):
super(LoveDADataset, self).__init__(img_suffix='.png', seg_map_suffix='.png', reduce_zero_label=True, **kwargs)
def results2img(self, results, imgfile_prefix, indices=None):
mmcv.mkdir_or_exist(imgfile_prefix)
result_files = []
for (result, idx) in zip(results, indices):
filename = self.img_infos[idx]['filename']
basename = osp.splitext(osp.basename(filename))[0]
png_filename = osp.join(imgfile_prefix, f'{basename}.png')
output = Image.fromarray(result.astype(np.uint8))
output.save(png_filename)
result_files.append(png_filename)
return result_files
def format_results(self, results, imgfile_prefix, indices=None):
if (indices is None):
indices = list(range(len(self)))
assert isinstance(results, list), 'results must be a list.'
assert isinstance(indices, list), 'indices must be a list.'
result_files = self.results2img(results, imgfile_prefix, indices)
return result_files |
_pipeline_test
class FeatureExtractionPipelineTests(unittest.TestCase):
model_mapping = MODEL_MAPPING
tf_model_mapping = TF_MODEL_MAPPING
_torch
def test_small_model_pt(self):
feature_extractor = pipeline(task='feature-extraction', model='hf-internal-testing/tiny-random-distilbert', framework='pt')
outputs = feature_extractor('This is a test')
self.assertEqual(nested_simplify(outputs), [[[2.287, 1.234, 0.042, 1.53, 1.306, 0.879, (- 0.526), (- 1.71), (- 1.276), 0.756, (- 0.775), (- 1.048), (- 0.25), (- 0.595), (- 0.137), (- 0.598), 2.022, (- 0.812), 0.284, (- 0.488), (- 0.391), (- 0.403), (- 0.525), (- 0.061), (- 0.228), 1.086, 0.378, (- 0.14), 0.599, (- 0.087), (- 2.259), (- 0.098)], [1.676, 0.232, (- 1.508), (- 0.145), 1.798, (- 1.388), 1.331, (- 0.37), (- 0.939), 0.043, 0.06, (- 0.414), (- 1.408), 0.24, 0.622, (- 0.55), (- 0.569), 1.873, (- 0.706), 1.924, (- 0.254), 1.927, (- 0.423), 0.152, (- 0.952), 0.509, (- 0.496), (- 0.968), 0.093, (- 1.049), (- 0.65), 0.312], [0.207, (- 0.775), (- 1.822), 0.321, (- 0.71), (- 0.201), 0.3, 1.146, (- 0.233), (- 0.753), (- 0.305), 1.309, (- 1.47), (- 0.21), 1.802, (- 1.555), (- 1.175), 1.323, (- 0.303), 0.722, (- 0.076), 0.103, (- 1.406), 1.931, 0.091, 0.237, 1.172, 1.607, 0.253, (- 0.9), (- 1.068), 0.438], [0.615, 1.077, 0.171, (- 0.175), 1.3, 0.901, (- 0.653), (- 0.138), 0.341, (- 0.654), (- 0.184), (- 0.441), (- 0.424), 0.356, (- 0.075), 0.26, (- 1.023), 0.814, 0.524, (- 0.904), (- 0.204), (- 0.623), 1.234, (- 1.03), 2.594, 0.56, 1.831, (- 0.199), (- 1.508), (- 0.492), (- 1.687), (- 2.165)], [0.129, 0.008, (- 1.279), (- 0.412), (- 0.004), 1.663, 0.196, 0.104, 0.123, 0.119, 0.635, 1.757, 2.334, (- 0.799), (- 1.626), (- 1.26), 0.595, (- 0.316), (- 1.399), 0.232, 0.264, 1.386, (- 1.171), (- 0.256), (- 0.256), (- 1.944), 1.168, (- 0.368), (- 0.714), (- 0.51), 0.454, 1.148], [(- 0.32), 0.29, (- 1.309), (- 0.177), 0.453, 0.636, (- 0.024), 0.509, 0.931, (- 1.754), (- 1.575), 0.786, 0.046, (- 1.165), (- 1.416), 1.373, 1.293, (- 0.285), (- 1.541), (- 1.186), (- 0.106), (- 0.994), 2.001, 0.972, (- 0.02), 1.654, (- 0.236), 0.643, 1.02, 0.572, (- 0.914), (- 0.154)], [0.7, (- 0.937), 0.441, 0.25, 0.78, (- 0.022), 0.282, (- 0.095), 1.558, (- 0.336), 1.706, 0.884, 1.28, 0.198, (- 0.796), 1.218, (- 1.769), 1.197, (- 0.342), (- 0.177), (- 0.645), 1.364, 0.008, (- 0.597), (- 0.484), (- 2.772), (- 0.696), (- 0.632), (- 0.34), (- 1.527), (- 0.562), 0.862], [2.504, 0.831, (- 1.271), (- 0.033), 0.298, (- 0.735), 1.339, 1.74, 0.233, (- 1.424), (- 0.819), (- 0.761), 0.291, 0.853, (- 0.092), (- 0.885), 0.164, 1.025, 0.907, 0.749, (- 1.515), (- 0.545), (- 1.365), 0.271, 0.034, (- 2.005), 0.031, 0.244, 0.621, 0.176, 0.336, (- 1.196)], [(- 0.711), 0.591, (- 1.001), (- 0.946), 0.784, (- 1.66), 1.545, 0.799, (- 0.857), 1.148, 0.213, (- 0.285), 0.464, (- 0.139), 0.79, (- 1.663), (- 1.121), 0.575, (- 0.178), (- 0.508), 1.565, (- 0.242), (- 0.346), 1.024, (- 1.135), (- 0.158), (- 2.101), 0.275, 2.009, (- 0.425), 0.716, 0.981], [0.912, (- 1.186), (- 0.846), (- 0.421), (- 1.315), (- 0.827), 0.309, 0.533, 1.029, (- 2.343), 1.513, (- 1.238), 1.487, (- 0.849), 0.896, (- 0.927), (- 0.459), 0.159, 0.177, 0.873, 0.935, 1.433, (- 0.485), 0.737, 1.327, (- 0.338), 1.608, (- 0.47), (- 0.445), (- 1.118), (- 0.213), (- 0.446)], [(- 0.434), (- 1.362), (- 1.098), (- 1.068), 1.507, 0.003, 0.413, (- 0.395), 0.897, (- 0.237), 1.405, (- 0.344), 1.693, 0.677, 0.097, (- 0.257), (- 0.602), 1.026, (- 1.229), 0.855, (- 0.713), 1.014, 0.443, 0.238, 0.425, (- 2.184), 1.933, (- 1.157), (- 1.132), (- 0.597), (- 0.785), 0.967], [0.58, (- 0.971), 0.789, (- 0.468), (- 0.576), 1.779, 1.747, 1.715, (- 1.939), 0.125, 0.656, (- 0.042), (- 1.024), (- 1.767), 0.107, (- 0.408), (- 0.866), (- 1.774), 1.248, 0.939, (- 0.033), 1.523, 1.168, (- 0.744), 0.209, (- 0.168), (- 0.316), 0.207, (- 0.432), 0.047, (- 0.646), (- 0.664)], [(- 0.185), (- 0.613), (- 1.695), 1.602, (- 0.32), (- 0.277), 0.967, 0.728, (- 0.965), (- 0.234), 1.069, (- 0.63), (- 1.631), 0.711, 0.426, 1.298, (- 0.191), (- 0.467), (- 0.771), 0.971, (- 0.118), (- 1.577), (- 2.064), (- 0.055), (- 0.59), 0.642, (- 0.997), 1.251, 0.538, 1.367, 0.106, 1.704]]])
_tf
def test_small_model_tf(self):
feature_extractor = pipeline(task='feature-extraction', model='hf-internal-testing/tiny-random-distilbert', framework='tf')
outputs = feature_extractor('This is a test')
self.assertEqual(nested_simplify(outputs), [[[2.287, 1.234, 0.042, 1.53, 1.306, 0.879, (- 0.526), (- 1.71), (- 1.276), 0.756, (- 0.775), (- 1.048), (- 0.25), (- 0.595), (- 0.137), (- 0.598), 2.022, (- 0.812), 0.284, (- 0.488), (- 0.391), (- 0.403), (- 0.525), (- 0.061), (- 0.228), 1.086, 0.378, (- 0.14), 0.599, (- 0.087), (- 2.259), (- 0.098)], [1.676, 0.232, (- 1.508), (- 0.145), 1.798, (- 1.388), 1.331, (- 0.37), (- 0.939), 0.043, 0.06, (- 0.414), (- 1.408), 0.24, 0.622, (- 0.55), (- 0.569), 1.873, (- 0.706), 1.924, (- 0.254), 1.927, (- 0.423), 0.152, (- 0.952), 0.509, (- 0.496), (- 0.968), 0.093, (- 1.049), (- 0.65), 0.312], [0.207, (- 0.775), (- 1.822), 0.321, (- 0.71), (- 0.201), 0.3, 1.146, (- 0.233), (- 0.753), (- 0.305), 1.309, (- 1.47), (- 0.21), 1.802, (- 1.555), (- 1.175), 1.323, (- 0.303), 0.722, (- 0.076), 0.103, (- 1.406), 1.931, 0.091, 0.237, 1.172, 1.607, 0.253, (- 0.9), (- 1.068), 0.438], [0.615, 1.077, 0.171, (- 0.175), 1.3, 0.901, (- 0.653), (- 0.138), 0.341, (- 0.654), (- 0.184), (- 0.441), (- 0.424), 0.356, (- 0.075), 0.26, (- 1.023), 0.814, 0.524, (- 0.904), (- 0.204), (- 0.623), 1.234, (- 1.03), 2.594, 0.56, 1.831, (- 0.199), (- 1.508), (- 0.492), (- 1.687), (- 2.165)], [0.129, 0.008, (- 1.279), (- 0.412), (- 0.004), 1.663, 0.196, 0.104, 0.123, 0.119, 0.635, 1.757, 2.334, (- 0.799), (- 1.626), (- 1.26), 0.595, (- 0.316), (- 1.399), 0.232, 0.264, 1.386, (- 1.171), (- 0.256), (- 0.256), (- 1.944), 1.168, (- 0.368), (- 0.714), (- 0.51), 0.454, 1.148], [(- 0.32), 0.29, (- 1.309), (- 0.177), 0.453, 0.636, (- 0.024), 0.509, 0.931, (- 1.754), (- 1.575), 0.786, 0.046, (- 1.165), (- 1.416), 1.373, 1.293, (- 0.285), (- 1.541), (- 1.186), (- 0.106), (- 0.994), 2.001, 0.972, (- 0.02), 1.654, (- 0.236), 0.643, 1.02, 0.572, (- 0.914), (- 0.154)], [0.7, (- 0.937), 0.441, 0.25, 0.78, (- 0.022), 0.282, (- 0.095), 1.558, (- 0.336), 1.706, 0.884, 1.28, 0.198, (- 0.796), 1.218, (- 1.769), 1.197, (- 0.342), (- 0.177), (- 0.645), 1.364, 0.008, (- 0.597), (- 0.484), (- 2.772), (- 0.696), (- 0.632), (- 0.34), (- 1.527), (- 0.562), 0.862], [2.504, 0.831, (- 1.271), (- 0.033), 0.298, (- 0.735), 1.339, 1.74, 0.233, (- 1.424), (- 0.819), (- 0.761), 0.291, 0.853, (- 0.092), (- 0.885), 0.164, 1.025, 0.907, 0.749, (- 1.515), (- 0.545), (- 1.365), 0.271, 0.034, (- 2.005), 0.031, 0.244, 0.621, 0.176, 0.336, (- 1.196)], [(- 0.711), 0.591, (- 1.001), (- 0.946), 0.784, (- 1.66), 1.545, 0.799, (- 0.857), 1.148, 0.213, (- 0.285), 0.464, (- 0.139), 0.79, (- 1.663), (- 1.121), 0.575, (- 0.178), (- 0.508), 1.565, (- 0.242), (- 0.346), 1.024, (- 1.135), (- 0.158), (- 2.101), 0.275, 2.009, (- 0.425), 0.716, 0.981], [0.912, (- 1.186), (- 0.846), (- 0.421), (- 1.315), (- 0.827), 0.309, 0.533, 1.029, (- 2.343), 1.513, (- 1.238), 1.487, (- 0.849), 0.896, (- 0.927), (- 0.459), 0.159, 0.177, 0.873, 0.935, 1.433, (- 0.485), 0.737, 1.327, (- 0.338), 1.608, (- 0.47), (- 0.445), (- 1.118), (- 0.213), (- 0.446)], [(- 0.434), (- 1.362), (- 1.098), (- 1.068), 1.507, 0.003, 0.413, (- 0.395), 0.897, (- 0.237), 1.405, (- 0.344), 1.693, 0.677, 0.097, (- 0.257), (- 0.602), 1.026, (- 1.229), 0.855, (- 0.713), 1.014, 0.443, 0.238, 0.425, (- 2.184), 1.933, (- 1.157), (- 1.132), (- 0.597), (- 0.785), 0.967], [0.58, (- 0.971), 0.789, (- 0.468), (- 0.576), 1.779, 1.747, 1.715, (- 1.939), 0.125, 0.656, (- 0.042), (- 1.024), (- 1.767), 0.107, (- 0.408), (- 0.866), (- 1.774), 1.248, 0.939, (- 0.033), 1.523, 1.168, (- 0.744), 0.209, (- 0.168), (- 0.316), 0.207, (- 0.432), 0.047, (- 0.646), (- 0.664)], [(- 0.185), (- 0.613), (- 1.695), 1.602, (- 0.32), (- 0.277), 0.967, 0.728, (- 0.965), (- 0.234), 1.069, (- 0.63), (- 1.631), 0.711, 0.426, 1.298, (- 0.191), (- 0.467), (- 0.771), 0.971, (- 0.118), (- 1.577), (- 2.064), (- 0.055), (- 0.59), 0.642, (- 0.997), 1.251, 0.538, 1.367, 0.106, 1.704]]])
_torch
def test_tokenization_small_model_pt(self):
feature_extractor = pipeline(task='feature-extraction', model='hf-internal-testing/tiny-random-distilbert', framework='pt')
outputs = feature_extractor('This is a test')
self.assertEqual(nested_simplify(outputs), [[[2.287, 1.234, 0.042, 1.53, 1.306, 0.879, (- 0.526), (- 1.71), (- 1.276), 0.756, (- 0.775), (- 1.048), (- 0.25), (- 0.595), (- 0.137), (- 0.598), 2.022, (- 0.812), 0.284, (- 0.488), (- 0.391), (- 0.403), (- 0.525), (- 0.061), (- 0.228), 1.086, 0.378, (- 0.14), 0.599, (- 0.087), (- 2.259), (- 0.098)], [1.676, 0.232, (- 1.508), (- 0.145), 1.798, (- 1.388), 1.331, (- 0.37), (- 0.939), 0.043, 0.06, (- 0.414), (- 1.408), 0.24, 0.622, (- 0.55), (- 0.569), 1.873, (- 0.706), 1.924, (- 0.254), 1.927, (- 0.423), 0.152, (- 0.952), 0.509, (- 0.496), (- 0.968), 0.093, (- 1.049), (- 0.65), 0.312], [0.207, (- 0.775), (- 1.822), 0.321, (- 0.71), (- 0.201), 0.3, 1.146, (- 0.233), (- 0.753), (- 0.305), 1.309, (- 1.47), (- 0.21), 1.802, (- 1.555), (- 1.175), 1.323, (- 0.303), 0.722, (- 0.076), 0.103, (- 1.406), 1.931, 0.091, 0.237, 1.172, 1.607, 0.253, (- 0.9), (- 1.068), 0.438], [0.615, 1.077, 0.171, (- 0.175), 1.3, 0.901, (- 0.653), (- 0.138), 0.341, (- 0.654), (- 0.184), (- 0.441), (- 0.424), 0.356, (- 0.075), 0.26, (- 1.023), 0.814, 0.524, (- 0.904), (- 0.204), (- 0.623), 1.234, (- 1.03), 2.594, 0.56, 1.831, (- 0.199), (- 1.508), (- 0.492), (- 1.687), (- 2.165)], [0.129, 0.008, (- 1.279), (- 0.412), (- 0.004), 1.663, 0.196, 0.104, 0.123, 0.119, 0.635, 1.757, 2.334, (- 0.799), (- 1.626), (- 1.26), 0.595, (- 0.316), (- 1.399), 0.232, 0.264, 1.386, (- 1.171), (- 0.256), (- 0.256), (- 1.944), 1.168, (- 0.368), (- 0.714), (- 0.51), 0.454, 1.148], [(- 0.32), 0.29, (- 1.309), (- 0.177), 0.453, 0.636, (- 0.024), 0.509, 0.931, (- 1.754), (- 1.575), 0.786, 0.046, (- 1.165), (- 1.416), 1.373, 1.293, (- 0.285), (- 1.541), (- 1.186), (- 0.106), (- 0.994), 2.001, 0.972, (- 0.02), 1.654, (- 0.236), 0.643, 1.02, 0.572, (- 0.914), (- 0.154)], [0.7, (- 0.937), 0.441, 0.25, 0.78, (- 0.022), 0.282, (- 0.095), 1.558, (- 0.336), 1.706, 0.884, 1.28, 0.198, (- 0.796), 1.218, (- 1.769), 1.197, (- 0.342), (- 0.177), (- 0.645), 1.364, 0.008, (- 0.597), (- 0.484), (- 2.772), (- 0.696), (- 0.632), (- 0.34), (- 1.527), (- 0.562), 0.862], [2.504, 0.831, (- 1.271), (- 0.033), 0.298, (- 0.735), 1.339, 1.74, 0.233, (- 1.424), (- 0.819), (- 0.761), 0.291, 0.853, (- 0.092), (- 0.885), 0.164, 1.025, 0.907, 0.749, (- 1.515), (- 0.545), (- 1.365), 0.271, 0.034, (- 2.005), 0.031, 0.244, 0.621, 0.176, 0.336, (- 1.196)], [(- 0.711), 0.591, (- 1.001), (- 0.946), 0.784, (- 1.66), 1.545, 0.799, (- 0.857), 1.148, 0.213, (- 0.285), 0.464, (- 0.139), 0.79, (- 1.663), (- 1.121), 0.575, (- 0.178), (- 0.508), 1.565, (- 0.242), (- 0.346), 1.024, (- 1.135), (- 0.158), (- 2.101), 0.275, 2.009, (- 0.425), 0.716, 0.981], [0.912, (- 1.186), (- 0.846), (- 0.421), (- 1.315), (- 0.827), 0.309, 0.533, 1.029, (- 2.343), 1.513, (- 1.238), 1.487, (- 0.849), 0.896, (- 0.927), (- 0.459), 0.159, 0.177, 0.873, 0.935, 1.433, (- 0.485), 0.737, 1.327, (- 0.338), 1.608, (- 0.47), (- 0.445), (- 1.118), (- 0.213), (- 0.446)], [(- 0.434), (- 1.362), (- 1.098), (- 1.068), 1.507, 0.003, 0.413, (- 0.395), 0.897, (- 0.237), 1.405, (- 0.344), 1.693, 0.677, 0.097, (- 0.257), (- 0.602), 1.026, (- 1.229), 0.855, (- 0.713), 1.014, 0.443, 0.238, 0.425, (- 2.184), 1.933, (- 1.157), (- 1.132), (- 0.597), (- 0.785), 0.967], [0.58, (- 0.971), 0.789, (- 0.468), (- 0.576), 1.779, 1.747, 1.715, (- 1.939), 0.125, 0.656, (- 0.042), (- 1.024), (- 1.767), 0.107, (- 0.408), (- 0.866), (- 1.774), 1.248, 0.939, (- 0.033), 1.523, 1.168, (- 0.744), 0.209, (- 0.168), (- 0.316), 0.207, (- 0.432), 0.047, (- 0.646), (- 0.664)], [(- 0.185), (- 0.613), (- 1.695), 1.602, (- 0.32), (- 0.277), 0.967, 0.728, (- 0.965), (- 0.234), 1.069, (- 0.63), (- 1.631), 0.711, 0.426, 1.298, (- 0.191), (- 0.467), (- 0.771), 0.971, (- 0.118), (- 1.577), (- 2.064), (- 0.055), (- 0.59), 0.642, (- 0.997), 1.251, 0.538, 1.367, 0.106, 1.704]]])
tokenize_kwargs = {'max_length': 3}
outputs = feature_extractor('This is a test', tokenize_kwargs=tokenize_kwargs)
self.assertEqual(np.squeeze(outputs).shape, (3, 32))
tokenize_kwargs = {'truncation': True, 'padding': True, 'max_length': 4}
outputs = feature_extractor(['This is a test', 'This', 'This is', 'This is a', 'This is a test test test test'], tokenize_kwargs=tokenize_kwargs)
self.assertEqual(np.squeeze(outputs).shape, (5, 4, 32))
tokenize_kwargs = {'padding': True, 'max_length': 4}
outputs = feature_extractor(['This is a test', 'This', 'This is', 'This is a', 'This is a test test test test'], truncation=True, tokenize_kwargs=tokenize_kwargs)
self.assertEqual(np.squeeze(outputs).shape, (5, 4, 32))
tokenize_kwargs = {'truncation': True}
with self.assertRaises(ValueError):
_ = feature_extractor(['This is a test', 'This', 'This is', 'This is a', 'This is a test test test test'], truncation=True, tokenize_kwargs=tokenize_kwargs)
_tf
def test_tokenization_small_model_tf(self):
feature_extractor = pipeline(task='feature-extraction', model='hf-internal-testing/tiny-random-distilbert', framework='tf')
outputs = feature_extractor('This is a test')
self.assertEqual(nested_simplify(outputs), [[[2.287, 1.234, 0.042, 1.53, 1.306, 0.879, (- 0.526), (- 1.71), (- 1.276), 0.756, (- 0.775), (- 1.048), (- 0.25), (- 0.595), (- 0.137), (- 0.598), 2.022, (- 0.812), 0.284, (- 0.488), (- 0.391), (- 0.403), (- 0.525), (- 0.061), (- 0.228), 1.086, 0.378, (- 0.14), 0.599, (- 0.087), (- 2.259), (- 0.098)], [1.676, 0.232, (- 1.508), (- 0.145), 1.798, (- 1.388), 1.331, (- 0.37), (- 0.939), 0.043, 0.06, (- 0.414), (- 1.408), 0.24, 0.622, (- 0.55), (- 0.569), 1.873, (- 0.706), 1.924, (- 0.254), 1.927, (- 0.423), 0.152, (- 0.952), 0.509, (- 0.496), (- 0.968), 0.093, (- 1.049), (- 0.65), 0.312], [0.207, (- 0.775), (- 1.822), 0.321, (- 0.71), (- 0.201), 0.3, 1.146, (- 0.233), (- 0.753), (- 0.305), 1.309, (- 1.47), (- 0.21), 1.802, (- 1.555), (- 1.175), 1.323, (- 0.303), 0.722, (- 0.076), 0.103, (- 1.406), 1.931, 0.091, 0.237, 1.172, 1.607, 0.253, (- 0.9), (- 1.068), 0.438], [0.615, 1.077, 0.171, (- 0.175), 1.3, 0.901, (- 0.653), (- 0.138), 0.341, (- 0.654), (- 0.184), (- 0.441), (- 0.424), 0.356, (- 0.075), 0.26, (- 1.023), 0.814, 0.524, (- 0.904), (- 0.204), (- 0.623), 1.234, (- 1.03), 2.594, 0.56, 1.831, (- 0.199), (- 1.508), (- 0.492), (- 1.687), (- 2.165)], [0.129, 0.008, (- 1.279), (- 0.412), (- 0.004), 1.663, 0.196, 0.104, 0.123, 0.119, 0.635, 1.757, 2.334, (- 0.799), (- 1.626), (- 1.26), 0.595, (- 0.316), (- 1.399), 0.232, 0.264, 1.386, (- 1.171), (- 0.256), (- 0.256), (- 1.944), 1.168, (- 0.368), (- 0.714), (- 0.51), 0.454, 1.148], [(- 0.32), 0.29, (- 1.309), (- 0.177), 0.453, 0.636, (- 0.024), 0.509, 0.931, (- 1.754), (- 1.575), 0.786, 0.046, (- 1.165), (- 1.416), 1.373, 1.293, (- 0.285), (- 1.541), (- 1.186), (- 0.106), (- 0.994), 2.001, 0.972, (- 0.02), 1.654, (- 0.236), 0.643, 1.02, 0.572, (- 0.914), (- 0.154)], [0.7, (- 0.937), 0.441, 0.25, 0.78, (- 0.022), 0.282, (- 0.095), 1.558, (- 0.336), 1.706, 0.884, 1.28, 0.198, (- 0.796), 1.218, (- 1.769), 1.197, (- 0.342), (- 0.177), (- 0.645), 1.364, 0.008, (- 0.597), (- 0.484), (- 2.772), (- 0.696), (- 0.632), (- 0.34), (- 1.527), (- 0.562), 0.862], [2.504, 0.831, (- 1.271), (- 0.033), 0.298, (- 0.735), 1.339, 1.74, 0.233, (- 1.424), (- 0.819), (- 0.761), 0.291, 0.853, (- 0.092), (- 0.885), 0.164, 1.025, 0.907, 0.749, (- 1.515), (- 0.545), (- 1.365), 0.271, 0.034, (- 2.005), 0.031, 0.244, 0.621, 0.176, 0.336, (- 1.196)], [(- 0.711), 0.591, (- 1.001), (- 0.946), 0.784, (- 1.66), 1.545, 0.799, (- 0.857), 1.148, 0.213, (- 0.285), 0.464, (- 0.139), 0.79, (- 1.663), (- 1.121), 0.575, (- 0.178), (- 0.508), 1.565, (- 0.242), (- 0.346), 1.024, (- 1.135), (- 0.158), (- 2.101), 0.275, 2.009, (- 0.425), 0.716, 0.981], [0.912, (- 1.186), (- 0.846), (- 0.421), (- 1.315), (- 0.827), 0.309, 0.533, 1.029, (- 2.343), 1.513, (- 1.238), 1.487, (- 0.849), 0.896, (- 0.927), (- 0.459), 0.159, 0.177, 0.873, 0.935, 1.433, (- 0.485), 0.737, 1.327, (- 0.338), 1.608, (- 0.47), (- 0.445), (- 1.118), (- 0.213), (- 0.446)], [(- 0.434), (- 1.362), (- 1.098), (- 1.068), 1.507, 0.003, 0.413, (- 0.395), 0.897, (- 0.237), 1.405, (- 0.344), 1.693, 0.677, 0.097, (- 0.257), (- 0.602), 1.026, (- 1.229), 0.855, (- 0.713), 1.014, 0.443, 0.238, 0.425, (- 2.184), 1.933, (- 1.157), (- 1.132), (- 0.597), (- 0.785), 0.967], [0.58, (- 0.971), 0.789, (- 0.468), (- 0.576), 1.779, 1.747, 1.715, (- 1.939), 0.125, 0.656, (- 0.042), (- 1.024), (- 1.767), 0.107, (- 0.408), (- 0.866), (- 1.774), 1.248, 0.939, (- 0.033), 1.523, 1.168, (- 0.744), 0.209, (- 0.168), (- 0.316), 0.207, (- 0.432), 0.047, (- 0.646), (- 0.664)], [(- 0.185), (- 0.613), (- 1.695), 1.602, (- 0.32), (- 0.277), 0.967, 0.728, (- 0.965), (- 0.234), 1.069, (- 0.63), (- 1.631), 0.711, 0.426, 1.298, (- 0.191), (- 0.467), (- 0.771), 0.971, (- 0.118), (- 1.577), (- 2.064), (- 0.055), (- 0.59), 0.642, (- 0.997), 1.251, 0.538, 1.367, 0.106, 1.704]]])
tokenize_kwargs = {'max_length': 3}
outputs = feature_extractor('This is a test', tokenize_kwargs=tokenize_kwargs)
self.assertEqual(np.squeeze(outputs).shape, (3, 32))
tokenize_kwargs = {'truncation': True, 'padding': True, 'max_length': 4}
outputs = feature_extractor(['This is a test', 'This', 'This is', 'This is a', 'This is a test test test test'], tokenize_kwargs=tokenize_kwargs)
self.assertEqual(np.squeeze(outputs).shape, (5, 4, 32))
tokenize_kwargs = {'padding': True, 'max_length': 4}
outputs = feature_extractor(['This is a test', 'This', 'This is', 'This is a', 'This is a test test test test'], truncation=True, tokenize_kwargs=tokenize_kwargs)
self.assertEqual(np.squeeze(outputs).shape, (5, 4, 32))
tokenize_kwargs = {'truncation': True}
with self.assertRaises(ValueError):
_ = feature_extractor(['This is a test', 'This', 'This is', 'This is a', 'This is a test test test test'], truncation=True, tokenize_kwargs=tokenize_kwargs)
_torch
def test_return_tensors_pt(self):
feature_extractor = pipeline(task='feature-extraction', model='hf-internal-testing/tiny-random-distilbert', framework='pt')
outputs = feature_extractor('This is a test', return_tensors=True)
self.assertTrue(torch.is_tensor(outputs))
_tf
def test_return_tensors_tf(self):
feature_extractor = pipeline(task='feature-extraction', model='hf-internal-testing/tiny-random-distilbert', framework='tf')
outputs = feature_extractor('This is a test', return_tensors=True)
self.assertTrue(tf.is_tensor(outputs))
def get_shape(self, input_, shape=None):
if (shape is None):
shape = []
if isinstance(input_, list):
subshapes = [self.get_shape(in_, shape) for in_ in input_]
if all(((s == 0) for s in subshapes)):
shape.append(len(input_))
else:
subshape = subshapes[0]
shape = [len(input_), *subshape]
elif isinstance(input_, float):
return 0
else:
raise ValueError('We expect lists of floats, nothing else')
return shape
def get_test_pipeline(self, model, tokenizer, processor):
if (tokenizer is None):
self.skipTest('No tokenizer')
return
elif ((type(model.config) in FEATURE_EXTRACTOR_MAPPING) or isinstance(model.config, LxmertConfig) or (type(model.config) in IMAGE_PROCESSOR_MAPPING)):
self.skipTest('This is a bimodal model, we need to find a more consistent way to switch on those models.')
return
elif model.config.is_encoder_decoder:
self.skipTest('encoder_decoder models are trickier for this pipeline.\n Do we want encoder + decoder inputs to get some featues?\n Do we want encoder only features ?\n For now ignore those.\n ')
return
feature_extractor = FeatureExtractionPipeline(model=model, tokenizer=tokenizer, feature_extractor=processor)
return (feature_extractor, ['This is a test', 'This is another test'])
def run_pipeline_test(self, feature_extractor, examples):
outputs = feature_extractor('This is a test')
shape = self.get_shape(outputs)
self.assertEqual(shape[0], 1)
outputs = feature_extractor(['This is a test', 'Another longer test'])
shape = self.get_shape(outputs)
self.assertEqual(shape[0], 2)
outputs = feature_extractor(('This is a test' * 100), truncation=True)
shape = self.get_shape(outputs)
self.assertEqual(shape[0], 1) |
class MarianForCausalLM(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class TestPearlmutterHvp(TfGraphTestCase):
def test_pearl_mutter_hvp_1x1(self):
policy = HelperPolicy(n_vars=1)
x = policy.get_params()[0]
a_val = np.array([5.0])
a = tf.constant([0.0])
f = (a * (x ** 2))
expected_hessian = (2 * a_val)
vector = np.array([10.0])
expected_hvp = (expected_hessian * vector)
reg_coeff = 1e-05
hvp = PearlmutterHvp()
self.sess.run(tf.compat.v1.global_variables_initializer())
hvp.update_hvp(f, policy, (a,), reg_coeff)
hx = hvp.build_eval(np.array([a_val]))
computed_hvp = hx(vector)
assert np.allclose(computed_hvp, expected_hvp)
.parametrize('a_val, b_val, x_val, y_val, vector', [(1.0, 1.0, 1.0, 1.0, [10.0, 20.0]), (5.0, 10.0, (- 2.0), 5.0, [0.0, (- 1.0)]), (0.0, 0.0, 1.1, 0.02, [0.0, 0.0]), ((- 2.2), (- 1.5), (- 12.3), 34.8, [2.2, 5.3]), ((- 1.5), 0.0, (- 0.002), 4.93, [0.1, (- 0.02)])])
def test_pearl_mutter_hvp_2x2(self, a_val, b_val, x_val, y_val, vector):
a_val = [a_val]
b_val = [b_val]
vector = np.array([vector], dtype=np.float32)
policy = HelperPolicy(n_vars=2)
params = policy.get_params()
(x, y) = (params[0], params[1])
a = tf.constant(a_val)
b = tf.constant(b_val)
f = ((a * (x ** 2)) + (b * (y ** 2)))
expected_hessian = compute_hessian(f, [x, y])
expected_hvp = tf.matmul(vector, expected_hessian)
reg_coeff = 1e-05
hvp = PearlmutterHvp()
self.sess.run(tf.compat.v1.global_variables_initializer())
self.sess.run(x.assign([x_val]))
self.sess.run(y.assign([y_val]))
hvp.update_hvp(f, policy, (a, b), reg_coeff)
hx = hvp.build_eval((np.array(a_val), np.array(b_val)))
hvp = hx(vector[0])
expected_hvp = expected_hvp.eval()
assert np.allclose(hvp, expected_hvp, atol=1e-06)
.parametrize('a_val, b_val, x_val, y_val, vector', [(1.0, 1.0, 1.0, 1.0, [10.0, 20.0]), (5.0, 10.0, (- 2.0), 5.0, [0.0, (- 1.0)]), (0.0, 0.0, 1.1, 0.02, [0.0, 0.0]), ((- 2.2), (- 1.5), (- 12.3), 34.8, [2.2, 5.3]), ((- 1.5), 0.0, (- 0.002), 4.93, [0.1, (- 0.02)])])
def test_pearl_mutter_hvp_2x2_non_diagonal(self, a_val, b_val, x_val, y_val, vector):
a_val = [a_val]
b_val = [b_val]
vector = np.array([vector], dtype=np.float32)
policy = HelperPolicy(n_vars=2)
params = policy.get_params()
(x, y) = (params[0], params[1])
a = tf.constant(a_val)
b = tf.constant(b_val)
f = ((((a * (x ** 3)) + (b * (y ** 3))) + ((x ** 2) * y)) + ((y ** 2) * x))
expected_hessian = compute_hessian(f, [x, y])
expected_hvp = tf.matmul(vector, expected_hessian)
reg_coeff = 1e-05
hvp = PearlmutterHvp()
self.sess.run(tf.compat.v1.global_variables_initializer())
self.sess.run(x.assign([x_val]))
self.sess.run(y.assign([y_val]))
hvp.update_hvp(f, policy, (a, b), reg_coeff)
hx = hvp.build_eval((np.array(a_val), np.array(b_val)))
hvp = hx(vector[0])
expected_hvp = expected_hvp.eval()
assert np.allclose(hvp, expected_hvp)
def test_pickleable(self):
policy = HelperPolicy(n_vars=1)
x = policy.get_params()[0]
a_val = np.array([5.0])
a = tf.constant([0.0])
f = (a * (x ** 2))
vector = np.array([10.0])
reg_coeff = 1e-05
hvp = PearlmutterHvp()
self.sess.run(tf.compat.v1.global_variables_initializer())
hvp.update_hvp(f, policy, (a,), reg_coeff)
hx = hvp.build_eval(np.array([a_val]))
before_pickle = hx(vector)
hvp = pickle.loads(pickle.dumps(hvp))
hvp.update_hvp(f, policy, (a,), reg_coeff)
after_pickle = hx(vector)
assert np.equal(before_pickle, after_pickle) |
def get_repeat_tasks(tasks, counts=5):
counts = (([counts] * len(tasks)) if isinstance(counts, int) else counts)
alltasks = []
isreptask = []
for (i, task) in enumerate(tasks):
for c in range(1, (counts[i] + 1)):
if (c == 1):
alltasks.append(task)
isreptask.append(0)
elif (c > 1):
alltasks.append(f'{task}_{c}')
isreptask.append(1)
else:
continue
return (alltasks, isreptask) |
def _get_sailvos_instances_meta():
SAILVOS_CATEGORIES_ = [c for c in SAILVOS_CATEGORIES if (c['id'] not in sailvos_ignore)]
thing_ids = [k['id'] for k in SAILVOS_CATEGORIES_]
thing_colors = [k['color'] for k in SAILVOS_CATEGORIES_]
assert (len(thing_ids) == 163), len(thing_ids)
thing_dataset_id_to_contiguous_id = {k: i for (i, k) in enumerate(thing_ids)}
thing_classes = [k['name'] for k in SAILVOS_CATEGORIES_]
ret = {'thing_dataset_id_to_contiguous_id': thing_dataset_id_to_contiguous_id, 'thing_classes': thing_classes, 'thing_colors': thing_colors, 'ignore_classes': sailvos_ignore}
return ret |
class SquadExample(object):
def __init__(self, qas_id, question_text, doc_tokens, orig_answer_text=None, start_position=None, end_position=None):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
def __str__(self):
return self.__repr__()
def __repr__(self):
s = []
s.append(('qas_id: %s' % tokenization.printable_text(self.qas_id)))
s.append(('question_text: %s' % tokenization.printable_text(self.question_text)))
s.append(('doc_tokens: [%s]' % ' '.join(self.doc_tokens)))
if self.start_position:
s.append(('start_position: %d' % self.start_position))
if self.start_position:
s.append(('end_position: %d' % self.end_position))
return ', '.join(s) |
class ConvBNReLU(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1, bn_aff=True):
padding = ((kernel_size - 1) // 2)
super(ConvBNReLU, self).__init__(nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False), nn.BatchNorm2d(out_planes, affine=bn_aff), nn.ReLU6(inplace=True)) |
class PropPredictor(nn.Module):
def __init__(self, args, n_classes=1):
super(PropPredictor, self).__init__()
self.args = args
hidden_size = args.hidden_size
model = None
if (args.model_type == 'conv_net'):
model = MolConvNet(args, use_attn=False)
elif (args.model_type == 'conv_net_attn'):
model = MolConvNet(args, use_attn=True)
elif (args.model_type == 'transformer'):
model = MolTransformer(args)
else:
assert False
self.model = model
self.W_p_h = nn.Linear(model.output_size, hidden_size)
self.W_p_o = nn.Linear(hidden_size, n_classes)
def aggregate_atom_h(self, atom_h, scope):
mol_h = []
for (st, le) in scope:
cur_atom_h = atom_h.narrow(0, st, le)
if (self.args.agg_func == 'sum'):
mol_h.append(cur_atom_h.sum(dim=0))
elif (self.args.agg_func == 'mean'):
mol_h.append(cur_atom_h.mean(dim=0))
else:
assert False
mol_h = torch.stack(mol_h, dim=0)
return mol_h
def forward(self, mol_graph, stats_tracker, output_attn=False):
attn_list = None
if (self.args.model_type == 'transformer'):
(atom_h, attn_list) = self.model(mol_graph, stats_tracker)
else:
atom_h = self.model(mol_graph, stats_tracker)
scope = mol_graph.scope
mol_h = self.aggregate_atom_h(atom_h, scope)
mol_h = nn.ReLU()(self.W_p_h(mol_h))
mol_o = self.W_p_o(mol_h)
if (not output_attn):
return mol_o
else:
return (mol_o, attn_list) |
class TVMByteArray(ctypes.Structure):
_fields_ = [('data', ctypes.POINTER(ctypes.c_byte)), ('size', ctypes.c_size_t)] |
def hypergraph_Gnm(num_v: int, num_e: int, method: str='low_order_first', prob_k_list: Optional[List[float]]=None):
assert (num_v > 1), 'num_v must be greater than 1'
assert (num_e > 0), 'num_e must be greater than 0'
assert (method in ('uniform', 'low_order_first', 'high_order_first', 'custom')), "method must be one of 'uniform', 'low_order_first', 'high_order_first', 'custom'"
deg_e_list = list(range(2, (num_v + 1)))
if (method == 'uniform'):
prob_k_list = [(C(num_v, k) / ((2 ** num_v) - 1)) for k in deg_e_list]
elif (method == 'low_order_first'):
prob_k_list = [(3 ** (- k)) for k in range(len(deg_e_list))]
sum_of_prob_k_list = sum(prob_k_list)
prob_k_list = [(prob_k / sum_of_prob_k_list) for prob_k in prob_k_list]
elif (method == 'high_order_first'):
prob_k_list = [(3 ** (- k)) for k in range(len(deg_e_list))].reverse()
sum_of_prob_k_list = sum(prob_k_list)
prob_k_list = [(prob_k / sum_of_prob_k_list) for prob_k in prob_k_list]
elif (method == 'custom'):
assert (prob_k_list is not None), 'prob_k_list must be provided when method is custom'
assert (len(prob_k_list) == (num_v - 1)), "prob_k_list must have length `num_v - 1'"
sum_of_prob_k_list = sum(prob_k_list)
prob_k_list = [(prob_k / sum_of_prob_k_list) for prob_k in prob_k_list]
else:
raise ValueError(f'Unknown method: {method}')
edges = set()
while (len(edges) < num_e):
k = random.choices(deg_e_list, weights=prob_k_list)[0]
e = random.sample(range(num_v), k)
e = tuple(sorted(e))
if (e not in edges):
edges.add(e)
return Hypergraph(num_v, list(edges)) |
def torch_mean(input, dim=None, keepdim=False, out=None):
global raw_torch_op, module_tensor_op
_stack = inspect.stack()
if (('forward' == _stack[1].function) and ('torch_mean_{}_1'.format(_stack[1].lineno) == module_tensor_op.get_module_name())):
input = module_tensor_op(input)
module_tensor_op.add_idx_name_tensor_op()
if (dim is None):
x = raw_torch_op['mean'](input)
else:
x = raw_torch_op['mean'](input, dim)
del _stack
return x |
def split(t, split_size=None):
if (not exists(split_size)):
return t
if isinstance(t, torch.Tensor):
return t.split(split_size, dim=0)
if isinstance(t, Iterable):
return split_iterable(t, split_size)
return TypeError |
class AttentionPooling(nn.Module):
def __init__(self, in_features: int, keep_seq_dim: bool=False):
super().__init__()
self.norm = nn.LayerNorm(in_features)
self.query = nn.Parameter(torch.zeros(1, 1, in_features))
self.attn = nn.MultiheadAttention(in_features, 1, bias=False)
self.keep_seq_dim = keep_seq_dim
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = rearrange(x, 'b c t -> t b c')
x = self.norm(x)
q = repeat(self.query, '() () c -> () b c', b=x.shape[1])
(attn, _) = self.attn(q, x, x, need_weights=False)
if self.keep_seq_dim:
attn = rearrange(attn, 't b c -> b c t')
else:
attn = attn.squeeze(dim=0)
return attn |
class ConstantDice(Dice):
def __init__(self, constant):
super().__init__()
self.constant = self.max = constant
def roll(self):
return self.constant
def describe(self):
return repr(self.constant) |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride=stride, dilation=dilation)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, stride=1, dilation=dilation)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
class actionAngleHarmonic(actionAngle):
def __init__(self, *args, **kwargs):
actionAngle.__init__(self, ro=kwargs.get('ro', None), vo=kwargs.get('vo', None))
if (not ('omega' in kwargs)):
raise OSError('Must specify omega= for actionAngleHarmonic')
self._omega = conversion.parse_frequency(kwargs.get('omega'), ro=self._ro, vo=self._vo)
return None
def _evaluate(self, *args, **kwargs):
if (len(args) == 2):
(x, vx) = args
return ((((vx ** 2.0) / self._omega) + (self._omega * (x ** 2.0))) / 2.0)
else:
raise ValueError('actionAngleHarmonic __call__ input not understood')
def _actionsFreqs(self, *args, **kwargs):
if (len(args) == 2):
(x, vx) = args
return (((((vx ** 2.0) / self._omega) + (self._omega * (x ** 2.0))) / 2.0), (self._omega * numpy.ones_like(x)))
else:
raise ValueError('actionAngleHarmonic __call__ input not understood')
def _actionsFreqsAngles(self, *args, **kwargs):
if (len(args) == 2):
(x, vx) = args
return (((((vx ** 2.0) / self._omega) + (self._omega * (x ** 2.0))) / 2.0), (self._omega * numpy.ones_like(x)), numpy.arctan2((self._omega * x), vx))
else:
raise ValueError('actionAngleHarmonic __call__ input not understood') |
def _update_registered_buffer(module, buffer_name, state_dict_key, state_dict, policy='resize_if_empty', dtype=torch.int):
new_size = state_dict[state_dict_key].size()
registered_buf = find_named_buffer(module, buffer_name)
if (policy in ('resize_if_empty', 'resize')):
if (registered_buf is None):
raise RuntimeError(f'buffer "{buffer_name}" was not registered')
if ((policy == 'resize') or (registered_buf.numel() == 0)):
registered_buf.resize_(new_size)
elif (policy == 'register'):
if (registered_buf is not None):
raise RuntimeError(f'buffer "{buffer_name}" was already registered')
module.register_buffer(buffer_name, torch.empty(new_size, dtype=dtype).fill_(0))
else:
raise ValueError(f'Invalid policy "{policy}"') |
def test_tuple_return_obj():
run_cell('\n x = 0\n y = 1\n a = x + 42\n b = y + 77\n def foo():\n return [a], [b]\n ')
run_cell('t = foo()[1][0]')
run_cell('x = 9')
run_cell('logging.info(t)')
assert_not_detected('`t` independent of updated `x`')
run_cell('y = 10')
run_cell('logging.info(t)')
assert_detected('`t` depends on old version of `y`') |
def _upsample_flops_compute(input, size=None, scale_factor=None, mode='nearest', align_corners=None):
if (size is not None):
if isinstance(size, (tuple, list)):
return (int(_prod(size)), 0)
else:
return (int(size), 0)
assert (scale_factor is not None), 'either size or scale_factor should be defined'
flops = torch.numel(input)
if (isinstance(scale_factor, (tuple, list)) and (len(scale_factor) == len(input))):
(flops * int(_prod(scale_factor)))
else:
(flops * (scale_factor ** len(input)))
return (flops, 0) |
def _get_lr_scheduler(lr, decay):
if (decay[0] == 'inverse time'):
lr_sch = paddle.optimizer.lr.InverseTimeDecay(lr, decay[1], verbose=False)
else:
raise NotImplementedError(f'{decay[0]} decay is not implemented in PaddlePaddle')
return lr_sch |
class ObservationGroupEncoder(Module):
def __init__(self, observation_group_shapes, feature_activation=nn.ReLU, encoder_kwargs=None):
super(ObservationGroupEncoder, self).__init__()
assert isinstance(observation_group_shapes, OrderedDict)
assert np.all([isinstance(observation_group_shapes[k], OrderedDict) for k in observation_group_shapes])
self.observation_group_shapes = observation_group_shapes
self.nets = nn.ModuleDict()
for obs_group in self.observation_group_shapes:
self.nets[obs_group] = obs_encoder_factory(obs_shapes=self.observation_group_shapes[obs_group], feature_activation=feature_activation, encoder_kwargs=encoder_kwargs)
def forward(self, **inputs):
assert set(self.observation_group_shapes.keys()).issubset(inputs), '{} does not contain all observation groups {}'.format(list(inputs.keys()), list(self.observation_group_shapes.keys()))
outputs = []
for obs_group in self.observation_group_shapes:
outputs.append(self.nets[obs_group].forward(inputs[obs_group]))
return torch.cat(outputs, dim=(- 1))
def output_shape(self):
feat_dim = 0
for obs_group in self.observation_group_shapes:
feat_dim += self.nets[obs_group].output_shape()[0]
return [feat_dim]
def __repr__(self):
header = '{}'.format(str(self.__class__.__name__))
msg = ''
for k in self.observation_group_shapes:
msg += '\n'
indent = (' ' * 4)
msg += textwrap.indent('group={}\n{}'.format(k, self.nets[k]), indent)
msg = (((header + '(') + msg) + '\n)')
return msg |
def multi_topk_meter(ctx: Context, train_ctx: Context, k: int=1, init_num: int=1, end_num: int=0) -> dict:
def accuracy(output, target, k=1):
batch_size = target.size(0)
(_, pred) = output.topk(k, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True)
return correct_k.mul_((100.0 / batch_size))
for i in range(init_num, (len(train_ctx.output) - end_num)):
if (not (('branch_' + str(i)) in ctx)):
setattr(ctx, ('branch_' + str(i)), AverageMeter())
if (train_ctx.batch_idx == 0):
for i in range(init_num, (len(train_ctx.output) - end_num)):
getattr(ctx, ('branch_' + str(i))).reset()
for i in range(init_num, (len(train_ctx.output) - end_num)):
acc = accuracy(train_ctx.output[i], train_ctx.target, k)
getattr(ctx, ('branch_' + str(i))).update(acc.item())
acc_list = {}
for i in range(init_num, (len(train_ctx.output) - end_num)):
acc_list[('branch_' + str(i))] = getattr(ctx, ('branch_' + str(i))).avg
return acc_list |
class StandardAugInput(AugInput):
def __init__(self, image: np.ndarray, *, boxes: Optional[np.ndarray]=None, sem_seg: Optional[np.ndarray]=None):
_check_img_dtype(image)
self.image = image
self.boxes = boxes
self.sem_seg = sem_seg
def transform(self, tfm: Transform) -> None:
self.image = tfm.apply_image(self.image)
if (self.boxes is not None):
self.boxes = tfm.apply_box(self.boxes)
if (self.sem_seg is not None):
self.sem_seg = tfm.apply_segmentation(self.sem_seg) |
class RobertaPreLayerNormForCausalLM(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def main(args):
img_mean = np.array([134., 102., 87.])
img_stddev = np.sqrt(np.array([3941., 2856., 2519.]))
vae_def = importlib.import_module(args.vae_def)
vae = vae_def.Vae(args.latent_var_size)
gen_image_size = vae.get_image_size()
subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
model_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)
if (not os.path.isdir(model_dir)):
os.makedirs(model_dir)
log_file_name = os.path.join(model_dir, 'logs.h5')
facenet.write_arguments_to_file(args, os.path.join(model_dir, 'arguments.txt'))
(src_path, _) = os.path.split(os.path.realpath(__file__))
facenet.store_revision_info(src_path, model_dir, ' '.join(sys.argv))
with tf.Graph().as_default():
tf.set_random_seed(args.seed)
global_step = tf.Variable(0, trainable=False)
train_set = facenet.get_dataset(args.data_dir)
(image_list, _) = facenet.get_image_paths_and_labels(train_set)
input_queue = tf.train.string_input_producer(image_list, shuffle=True)
nrof_preprocess_threads = 4
image_per_thread = []
for _ in range(nrof_preprocess_threads):
file_contents = tf.read_file(input_queue.dequeue())
image = tf.image.decode_image(file_contents, channels=3)
image = tf.image.resize_image_with_crop_or_pad(image, args.input_image_size, args.input_image_size)
image.set_shape((args.input_image_size, args.input_image_size, 3))
image = tf.cast(image, tf.float32)
image_per_thread.append([image])
images = tf.train.batch_join(image_per_thread, batch_size=args.batch_size, capacity=((4 * nrof_preprocess_threads) * args.batch_size), allow_smaller_final_batch=False)
images_norm = ((images - img_mean) / img_stddev)
images_norm_resize = tf.image.resize_images(images_norm, (gen_image_size, gen_image_size))
(mean, log_variance) = vae.encoder(images_norm_resize, True)
epsilon = tf.random_normal((tf.shape(mean)[0], args.latent_var_size))
std = tf.exp((log_variance / 2))
latent_var = (mean + (epsilon * std))
reconstructed_norm = vae.decoder(latent_var, True)
reconstructed = ((reconstructed_norm * img_stddev) + img_mean)
if (args.reconstruction_loss_type == 'PLAIN'):
images_resize = tf.image.resize_images(images, (gen_image_size, gen_image_size))
reconstruction_loss = tf.reduce_mean(tf.reduce_sum(tf.pow((images_resize - reconstructed), 2)))
elif (args.reconstruction_loss_type == 'PERCEPTUAL'):
network = importlib.import_module(args.model_def)
reconstructed_norm_resize = tf.image.resize_images(reconstructed_norm, (args.input_image_size, args.input_image_size))
shp = ([(- 1)] + images_norm.get_shape().as_list()[1:])
input_images = tf.reshape(tf.stack([images_norm, reconstructed_norm_resize], axis=0), shp)
(_, end_points) = network.inference(input_images, 1.0, phase_train=False, bottleneck_layer_size=128, weight_decay=0.0)
feature_names = args.loss_features.replace(' ', '').split(',')
reconstruction_loss_list = []
for feature_name in feature_names:
feature_flat = slim.flatten(end_points[feature_name])
(image_feature, reconstructed_feature) = tf.unstack(tf.reshape(feature_flat, [2, args.batch_size, (- 1)]), num=2, axis=0)
reconstruction_loss = tf.reduce_mean(tf.reduce_sum(tf.pow((image_feature - reconstructed_feature), 2)), name=(feature_name + '_loss'))
reconstruction_loss_list.append(reconstruction_loss)
reconstruction_loss = tf.add_n(reconstruction_loss_list, 'reconstruction_loss')
else:
pass
kl_loss = kl_divergence_loss(mean, log_variance)
kl_loss_mean = tf.reduce_mean(kl_loss)
total_loss = ((args.alfa * kl_loss_mean) + (args.beta * reconstruction_loss))
learning_rate = tf.train.exponential_decay(args.initial_learning_rate, global_step, args.learning_rate_decay_steps, args.learning_rate_decay_factor, staircase=True)
opt = tf.train.AdamOptimizer(learning_rate)
grads = opt.compute_gradients(total_loss, var_list=get_variables_to_train())
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
with tf.control_dependencies([apply_gradient_op]):
train_op = tf.no_op(name='train')
saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)
facenet_saver = tf.train.Saver(get_facenet_variables_to_restore())
gpu_memory_fraction = 1.0
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord, sess=sess)
with sess.as_default():
if (args.reconstruction_loss_type == 'PERCEPTUAL'):
if (not args.pretrained_model):
raise ValueError('A pretrained model must be specified when using perceptual loss')
pretrained_model_exp = os.path.expanduser(args.pretrained_model)
print(('Restoring pretrained model: %s' % pretrained_model_exp))
facenet_saver.restore(sess, pretrained_model_exp)
log = {'total_loss': np.zeros((0,), np.float), 'reconstruction_loss': np.zeros((0,), np.float), 'kl_loss': np.zeros((0,), np.float), 'learning_rate': np.zeros((0,), np.float)}
step = 0
print('Running training')
while (step < args.max_nrof_steps):
start_time = time.time()
step += 1
save_state = ((step > 0) and (((step % args.save_every_n_steps) == 0) or (step == args.max_nrof_steps)))
if save_state:
(_, reconstruction_loss_, kl_loss_mean_, total_loss_, learning_rate_, rec_) = sess.run([train_op, reconstruction_loss, kl_loss_mean, total_loss, learning_rate, reconstructed])
img = facenet.put_images_on_grid(rec_, shape=(16, 8))
misc.imsave(os.path.join(model_dir, ('reconstructed_%06d.png' % step)), img)
else:
(_, reconstruction_loss_, kl_loss_mean_, total_loss_, learning_rate_) = sess.run([train_op, reconstruction_loss, kl_loss_mean, total_loss, learning_rate])
log['total_loss'] = np.append(log['total_loss'], total_loss_)
log['reconstruction_loss'] = np.append(log['reconstruction_loss'], reconstruction_loss_)
log['kl_loss'] = np.append(log['kl_loss'], kl_loss_mean_)
log['learning_rate'] = np.append(log['learning_rate'], learning_rate_)
duration = (time.time() - start_time)
print(('Step: %d \tTime: %.3f \trec_loss: %.3f \tkl_loss: %.3f \ttotal_loss: %.3f' % (step, duration, reconstruction_loss_, kl_loss_mean_, total_loss_)))
if save_state:
print('Saving checkpoint file')
checkpoint_path = os.path.join(model_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step, write_meta_graph=False)
print('Saving log')
with h5py.File(log_file_name, 'w') as f:
for (key, value) in iteritems(log):
f.create_dataset(key, data=value) |
def load_ResNet101Model():
model = ResNet(Bottleneck, [3, 4, 23, 3])
copy_parameter_from_resnet(model, torchvision.models.resnet101(weights=models.ResNet101_Weights.DEFAULT).state_dict())
return model |
def UnpackVariable(var, num):
assert (len > 0)
if ((type(var) is list) and (len(var) == num)):
return var
else:
ret = []
if (type(var) is list):
assert (len(var) == 1)
for i in xrange(0, num):
ret.append(var[0])
else:
for i in xrange(0, num):
ret.append(var)
return ret |
def check_predictions(clf, X, y):
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert (predicted.shape == (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert (probabilities.shape == (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y) |
class QDQBertForMaskedLM(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class WriteError(Exception):
def __init__(self, filename, message):
self.filename = filename
self.message = message
def __str__(self):
return 'Error writing record to file {}: {}'.format(self.filename, self.message) |
def map_layers(weight):
return OrderedDict([((k.replace('z', 'w'), v) if ('z' in k) else (k, v)) for (k, v) in weight.items()]) |
class SummMetric():
metric_name: str = None
range: Tuple[(float, float)] = None
higher_is_better: bool = None
requires_heavy_compute: bool = None
def evaluate(self, inputs: List[str], targets: List[str], keys: List[str]) -> Dict[(str, float)]:
raise NotImplementedError("the base class for metrics shouldn't be instantiated!") |
class BindSelfPaced(_BindOptions):
def bind(subparser):
subparser.add_argument('--begin_value', default=[1000], type=float, nargs='+', help='ProjectorParams.LossParams.begin_value')
subparser.add_argument('--end_value', default=[1000], type=float, nargs='+', help='ProjectorParams.LossParams.end_value')
subparser.add_argument('--method', default='hard', type=str, nargs='+', help='ProjectorParams.LossParams.weight_update')
subparser.add_argument('--scheduler_type', default=['inversesquare'], type=str, choices=['linear', 'square', 'inversesquare'], nargs='+', help='ProjectorParams.LossParams.type')
def parse(self, args):
self.add(f"ProjectorParams.LossParams.begin_value=[{','.join([str(x) for x in args.begin_value])}]")
self.add(f"ProjectorParams.LossParams.end_value=[{','.join([str(x) for x in args.end_value])}]")
self.add(f"ProjectorParams.LossParams.weight_update=[{','.join(args.method)}]")
self.add(f"ProjectorParams.LossParams.type=[{','.join(args.scheduler_type)}]") |
def load_dataset(data_dir):
print(f'Loading dataset from {data_dir}')
if data_dir.endswith('.json'):
(src, tgt) = load_json(data_dir)
elif data_dir.endswith('.txt'):
with open(data_dir, 'r') as f:
data = [l.split('\t') for l in f.readlines()]
(src, tgt) = list(zip(*data))
elif data_dir.endswith('.csv'):
(src, tgt) = load_csv(data_dir)
else:
(src, tgt) = (None, None)
return (src, tgt) |
class TinyImagenetDataModule(ImagenetDataModule):
def __init__(self, datadir: str, train: Optional[DictConfig]=None, val: Optional[DictConfig]=None, test: Optional[DictConfig]=None) -> None:
super().__init__(datadir=datadir, train=train, val=val, test=test)
def num_classes(self) -> int:
return 200 |
class SimulationRobotActor(AbstractActor):
def __init__(self, robot, *args, **kwargs):
super(SimulationRobotActor, self).__init__(*args, **kwargs)
self.robot = robot
self.getState = self.robot.getState |
class RiskEstimator():
def __init__(self, loss):
self.loss = maps.loss[loss]()
self.risks = np.array([[]])
def return_and_save(self, loss):
self.risks = np.append(self.risks, loss)
return loss |
class Trainer(object):
def __init__(self, para, config_path=None):
self.para = para
self.config_path = config_path
def run(self):
self.para.time = datetime.now()
logger = Logger(self.para, self.config_path)
logger.record_para()
if (not self.para.test_only):
process(self.para)
test(self.para, logger) |
def sentence_bleu(hypothesis, reference):
bleu = _corpus_bleu(hypothesis, reference)
for i in range(1, 4):
bleu.counts[i] += 1
bleu.totals[i] += 1
bleu = compute_bleu(bleu.counts, bleu.totals, bleu.sys_len, bleu.ref_len, smooth_method='exp')
return bleu.score |
def image_transform(x, H, out_shape=None, interpolation='NEAREST'):
shape = x.get_shape().as_list()
if (out_shape is None):
if (len(shape) == 4):
out_shape = shape[1:3]
else:
out_shape = shape[:2]
return tf.contrib.image.transform(x, H, interpolation=interpolation, output_shape=out_shape) |
class TaskType(Enum):
Classification = 'Classification'
Summarization = 'Summarization'
PairwiseClassification = 'PairwiseClassification' |
_cache()
def statcast_outfield_catch_prob(year: int, min_opp: Union[(int, str)]='q') -> pd.DataFrame:
url = f'
res = requests.get(url, timeout=None).content
data = pd.read_csv(io.StringIO(res.decode('utf-8')))
data = sanitize_statcast_columns(data)
return data |
def rename_state_dict_key(k, patterns):
for (tf_name, hf_name) in patterns:
k = k.replace(tf_name, hf_name)
return k |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.