code stringlengths 101 5.91M |
|---|
def oe_to_igraph(inputs, output, size_dict, weight_nodes='const', weight_edges='log'):
import igraph as ig
G = ig.Graph()
ind2terms = defaultdict(list)
for (i, term) in enumerate(inputs):
nweight = calc_node_weight_float(term, size_dict, weight_nodes)
G.add_vertex(str(i), weight=nweight)
for ix in term:
if (ix not in output):
ind2terms[ix].append(str(i))
for (ix, enodes) in ind2terms.items():
if (len(enodes) != 2):
continue
eweight = calc_edge_weight_float(ix, size_dict, weight_edges)
G.add_edge(*enodes, ind=ix, weight=eweight)
return G |
class Total_Phonation_Time(object):
def __init__(self, sentence_objs, **kwArgs):
self.sentence_objs = sentence_objs
def handle(self):
tot_speech_time = 0
for so in self.sentence_objs:
tot_speech_time += so.speech_time
return tot_speech_time |
def get_target_updates(vars, target_vars, tau):
logger.info('setting up target updates ...')
soft_updates = []
init_updates = []
assert (len(vars) == len(target_vars))
for (var, target_var) in zip(vars, target_vars):
logger.info(' {} <- {}'.format(target_var.name, var.name))
init_updates.append(tf.assign(target_var, var))
soft_updates.append(tf.assign(target_var, (((1.0 - tau) * target_var) + (tau * var))))
assert (len(init_updates) == len(vars))
assert (len(soft_updates) == len(vars))
return (tf.group(*init_updates), tf.group(*soft_updates)) |
def CreateDataset(opt):
dataset = None
from data.aligned_dataset_test import AlignedDataset
dataset = AlignedDataset()
print(('dataset [%s] was created' % dataset.name()))
dataset.initialize(opt)
return dataset |
class CamembertModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class TestGetRNNCell(tf.test.TestCase):
def test_single_layer(self):
cell = training_utils.get_rnn_cell(cell_class='BasicLSTMCell', cell_params={'num_units': 16}, num_layers=1)
self.assertIsInstance(cell, tf.contrib.rnn.BasicLSTMCell)
self.assertEqual(cell.output_size, 16)
def test_multi_layer(self):
cell = training_utils.get_rnn_cell(cell_class='BasicLSTMCell', cell_params={'num_units': 16}, num_layers=2)
self.assertIsInstance(cell, rnn_cell.ExtendedMultiRNNCell)
self.assertEqual(cell.output_size, 16)
def test_full_class_path(self):
cell = training_utils.get_rnn_cell(cell_class='tensorflow.contrib.rnn.BasicRNNCell', cell_params={'num_units': 16}, num_layers=1)
self.assertIsInstance(cell, tf.contrib.rnn.BasicRNNCell)
self.assertEqual(cell.output_size, 16)
def test_dropout(self):
cell = training_utils.get_rnn_cell(cell_class='BasicLSTMCell', cell_params={'num_units': 16}, num_layers=1, dropout_input_keep_prob=0.5)
self.assertIsInstance(cell, tf.contrib.rnn.DropoutWrapper)
self.assertEqual(cell.output_size, 16)
def test_extra_args(self):
with self.assertRaises(ValueError):
training_utils.get_rnn_cell(cell_class='LSTMCell', cell_params={'num_units': 16, 'use_peepholesERROR': True}, num_layers=1)
cell = training_utils.get_rnn_cell(cell_class='LSTMCell', cell_params={'num_units': 8, 'use_peepholes': True, 'forget_bias': 0.5}, num_layers=1)
self.assertIsInstance(cell, tf.contrib.rnn.LSTMCell)
self.assertEqual(cell._use_peepholes, True)
self.assertEqual(cell._forget_bias, 0.5)
self.assertEqual(cell.output_size, 8) |
def train_model(args):
CEMBED_SIZE = args.CEMBED_SIZE
WEMBED_SIZE = args.WEMBED_SIZE
HIDDEN_SIZE = args.HIDDEN_SIZE
MLP_SIZE = args.MLP_SIZE
SPARSE = args.SPARSE
TIMEOUT = args.TIMEOUT
num_train_files = 0
best_dev = 0.0
best_test = 0.0
batch_trains = []
if args.train:
train = file_conll(args.train).tupled_data
if args.multi_train:
train_combined = []
train = []
for file_name in args.multi_train:
train += file_conll(file_name).tupled_data[:args.train_samples]
num_train_files += 1
if ('ontonotes' in file_name):
train_combined.append(file_conll(file_name).tupled_data[:2500])
else:
train_combined.append(file_conll(file_name).tupled_data[:args.train_samples])
max_samples = max(2500, args.train_samples)
for j in range(max_samples):
current_batch = []
for i in range(num_train_files):
current_batch.append(train_combined[i][(j % len(train_combined[i]))])
batch_trains.append(current_batch)
if args.dev:
dev = file_conll(args.dev).tupled_data
if args.test:
test = file_conll(args.test).tupled_data
args.num_train_files = num_train_files
words = []
tags = []
chars = set()
wc = Counter()
for sent in ((train + dev) + test):
for (w, p) in sent:
words.append(w)
tags.append(p)
wc[w] += 1
chars.update(w)
words.append('_UNK_')
chars.add('_UNK_')
chars.add('<*>')
vw = Vocab.from_corpus([words])
vt = Vocab.from_corpus([tags])
vc = Vocab.from_corpus([chars])
UNK = vw.w2i['_UNK_']
CUNK = vc.w2i['_UNK_']
pad_char = vc.w2i['<*>']
nwords = vw.size()
ntags = vt.size()
nchars = vc.size()
print(('nwords=%r, ntags=%r, nchars=%r' % (nwords, ntags, nchars)))
args.ntags = ntags
args.nwords = nwords
args.nchars = nchars
encoder_class = get_model_class('tagger')
encoder_class.add_config(parser)
encoder = encoder_class(args, vw, vc, vt, wc, UNK, CUNK, pad_char)
classifier = Classifier((2 * HIDDEN_SIZE), MLP_SIZE, ntags)
classifiers = []
for ind in range(num_train_files):
classifiers.append(Classifier((2 * HIDDEN_SIZE), MLP_SIZE, ntags))
requires_grad = (lambda x: x.requires_grad)
optimizer_encoder = optim.Adam(encoder.parameters(), weight_decay=0.0001)
task_params = list(classifier.parameters())
for x in classifiers:
task_params += list(x.parameters())
optimizer_classifier = optim.Adam(filter(requires_grad, task_params), weight_decay=0.0001)
if args.CUDA:
map((lambda m: m.cuda()), (([encoder] + [classifier]) + classifiers))
print(('startup time: %r' % (time.time() - start)))
start_time = time.time()
i = 0
for ITER in range(50):
train_epoch(encoder, classifier, classifiers, batch_trains, dev, test, optimizer_encoder, optimizer_classifier, start_time, i)
print(('epoch %r finished' % ITER))
domain_encs = None
curr_dev = evaluate(encoder, args, batch_trains, classifier, classifiers, dev, domain_encs)
curr_test = evaluate(encoder, args, batch_trains, classifier, classifiers, test, domain_encs)
if (curr_dev > best_dev):
best_dev = curr_dev
best_test = curr_test
print(best_dev, best_test) |
class TensorCollector(CollectorBase):
def __init__(self, include_nodes, qtensor_to_tensor, tensor_to_node):
self.tensors_dicts = []
self.include_nodes = include_nodes
self.qtensor_to_tensor = qtensor_to_tensor
self.tensor_to_node = tensor_to_node
rest = (set(self.include_nodes) - set(self.tensor_to_node.values()))
assert (len(rest) == 0), 'Unexpected tensors set to be collected: {}'.format(rest)
def collect_gluon(self, name, _, arr):
is_quantized = False
if (name not in self.tensor_to_node):
if (name in self.qtensor_to_tensor):
name = self.qtensor_to_tensor[name]
else:
(qname, name) = (name, _qtensor_to_tensor(name, self.tensor_to_node))
self.qtensor_to_tensor[qname] = name
if (name == ''):
return
is_quantized = (arr.dtype in QUANTIZATION_DTYPES)
node = self.tensor_to_node[name]
if (node in self.include_nodes):
self.tensors_dicts[(- 1)].setdefault(node, {})[name] = (is_quantized, arr.copy())
def pre_batch(self, m, b):
self.tensors_dicts.append({}) |
class AtariEnv(gym.Env, utils.EzPickle):
metadata = {'render.modes': ['human', 'rgb_array']}
def __init__(self, game='pong', obs_type='ram', frameskip=(2, 5), repeat_action_probability=0.0):
utils.EzPickle.__init__(self, game, obs_type)
assert (obs_type in ('ram', 'image'))
self.game_path = atari_py.get_game_path(game)
if (not os.path.exists(self.game_path)):
raise IOError(('You asked for game %s but path %s does not exist' % (game, self.game_path)))
self._obs_type = obs_type
self.frameskip = frameskip
self.ale = atari_py.ALEInterface()
self.viewer = None
assert isinstance(repeat_action_probability, (float, int)), 'Invalid repeat_action_probability: {!r}'.format(repeat_action_probability)
self.ale.setFloat('repeat_action_probability'.encode('utf-8'), repeat_action_probability)
self.seed()
self._action_set = self.ale.getMinimalActionSet()
self.action_space = spaces.Discrete(len(self._action_set))
(screen_width, screen_height) = self.ale.getScreenDims()
if (self._obs_type == 'ram'):
self.observation_space = spaces.Box(low=0, high=255, dtype=np.uint8, shape=(128,))
elif (self._obs_type == 'image'):
self.observation_space = spaces.Box(low=0, high=255, shape=(screen_height, screen_width, 3), dtype=np.uint8)
else:
raise error.Error('Unrecognized observation type: {}'.format(self._obs_type))
def seed(self, seed=None):
(self.np_random, seed1) = seeding.np_random(seed)
seed2 = (seeding.hash_seed((seed1 + 1)) % (2 ** 31))
self.ale.setInt(b'random_seed', seed2)
self.ale.loadROM(self.game_path)
return [seed1, seed2]
def step(self, a):
reward = 0.0
action = self._action_set[a]
if isinstance(self.frameskip, int):
num_steps = self.frameskip
else:
num_steps = self.np_random.randint(self.frameskip[0], self.frameskip[1])
for _ in range(num_steps):
reward += self.ale.act(action)
ob = self._get_obs()
return (ob, reward, self.ale.game_over(), {'ale.lives': self.ale.lives()})
def _get_image(self):
return self.ale.getScreenRGB2()
def _get_ram(self):
return to_ram(self.ale)
def _n_actions(self):
return len(self._action_set)
def _get_obs(self):
if (self._obs_type == 'ram'):
return self._get_ram()
elif (self._obs_type == 'image'):
img = self._get_image()
return img
def reset(self):
self.ale.reset_game()
return self._get_obs()
def render(self, mode='human'):
img = self._get_image()
if (mode == 'rgb_array'):
return img
elif (mode == 'human'):
from gym.envs.classic_control import rendering
if (self.viewer is None):
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(img)
return self.viewer.isopen
def close(self):
if (self.viewer is not None):
self.viewer.close()
self.viewer = None
def get_action_meanings(self):
return [ACTION_MEANING[i] for i in self._action_set]
def get_keys_to_action(self):
KEYWORD_TO_KEY = {'UP': ord('w'), 'DOWN': ord('s'), 'LEFT': ord('a'), 'RIGHT': ord('d'), 'FIRE': ord(' ')}
keys_to_action = {}
for (action_id, action_meaning) in enumerate(self.get_action_meanings()):
keys = []
for (keyword, key) in KEYWORD_TO_KEY.items():
if (keyword in action_meaning):
keys.append(key)
keys = tuple(sorted(keys))
assert (keys not in keys_to_action)
keys_to_action[keys] = action_id
return keys_to_action
def clone_state(self):
state_ref = self.ale.cloneState()
state = self.ale.encodeState(state_ref)
self.ale.deleteState(state_ref)
return state
def restore_state(self, state):
state_ref = self.ale.decodeState(state)
self.ale.restoreState(state_ref)
self.ale.deleteState(state_ref)
def clone_full_state(self):
state_ref = self.ale.cloneSystemState()
state = self.ale.encodeState(state_ref)
self.ale.deleteState(state_ref)
return state
def restore_full_state(self, state):
state_ref = self.ale.decodeState(state)
self.ale.restoreSystemState(state_ref)
self.ale.deleteState(state_ref) |
_model
def regnetx_002(pretrained=False, **kwargs):
return _regnet('regnetx_002', pretrained, **kwargs) |
def merge_valid_test_messup(mess_up_train_valid, mess_up_train_test):
merged_mess = []
for s in set((list(mess_up_train_valid.keys()) + list(mess_up_train_test.keys()))):
if (not s):
continue
valid = mess_up_train_valid.get(s, set())
test = mess_up_train_test.get(s, set())
merged_mess.append((s, (valid | test)))
return dict(merged_mess) |
class InputDataFields(object):
image = 'image'
original_image = 'original_image'
key = 'key'
source_id = 'source_id'
filename = 'filename'
groundtruth_image_classes = 'groundtruth_image_classes'
groundtruth_boxes = 'groundtruth_boxes'
groundtruth_classes = 'groundtruth_classes'
groundtruth_label_types = 'groundtruth_label_types'
groundtruth_is_crowd = 'groundtruth_is_crowd'
groundtruth_area = 'groundtruth_area'
groundtruth_difficult = 'groundtruth_difficult'
groundtruth_group_of = 'groundtruth_group_of'
proposal_boxes = 'proposal_boxes'
proposal_objectness = 'proposal_objectness'
groundtruth_instance_masks = 'groundtruth_instance_masks'
groundtruth_instance_boundaries = 'groundtruth_instance_boundaries'
groundtruth_instance_classes = 'groundtruth_instance_classes'
groundtruth_keypoints = 'groundtruth_keypoints'
groundtruth_keypoint_visibilities = 'groundtruth_keypoint_visibilities'
groundtruth_label_scores = 'groundtruth_label_scores' |
def is_dogmatic(a):
if isinstance(a, (DogmaticDict, DogmaticList)):
return True
elif isinstance(a, dict):
return any((is_dogmatic(v) for v in a.values()))
elif isinstance(a, (list, tuple)):
return any((is_dogmatic(v) for v in a)) |
class ScenarioTask(ABSTask):
def __init__(self, obstacles_manager: ObstaclesManager, robot_manager: RobotManager, scenario_path: str):
super().__init__(obstacles_manager, robot_manager)
self.scenario = ArenaScenario()
self.scenario.loadFromFile(scenario_path)
self.pedsim_manager = None
if (len(self.scenario.pedsimAgents) > 0):
self.pedsim_manager = PedsimManager()
peds = [agent.getPedMsg() for agent in self.scenario.pedsimAgents]
self.pedsim_manager.spawnPeds(peds)
for obstacle in self.scenario.staticObstacles:
self.obstacles_manager._srv_spawn_model.call(obstacle.flatlandModel.path, obstacle.name, 'static_obstacles', Pose2D(obstacle.pos[0], obstacle.pos[1], obstacle.angle))
self.reset_count = 0
def reset(self):
self.reset_count += 1
info = {}
with self._map_lock:
if (self.pedsim_manager != None):
self.pedsim_manager.resetAllPeds()
self.robot_manager.set_start_pos_goal_pos(Pose2D(self.scenario.robotPosition[0], self.scenario.robotPosition[1], 0), Pose2D(self.scenario.robotGoal[0], self.scenario.robotGoal[1], 0))
if (self.reset_count == 1):
info['new_scenerio_loaded'] = True
else:
info['new_scenerio_loaded'] = False
info['robot_goal_pos'] = self.scenario.robotGoal
info['num_repeats_curr_scene'] = self.reset_count
info['max_repeats_curr_scene'] = 1000
return info |
def build_decoder(opt, encoder_word_emb_weight, device, rl_model=None):
if opt.dec_feature:
n_all_feat = len(opt.feat_vocab)
feat_vocab = opt.feat_vocab[(n_all_feat - opt.dec_feature):]
else:
feat_vocab = None
d_enc_model = (opt.d_enc_model if (not opt.pretrained) else 768)
n_enc_layer = (opt.n_enc_layer if (not opt.pretrained) else 12)
if opt.dec_rnn:
options = {'n_vocab': opt.tgt_vocab_size, 'ans_n_vocab': opt.src_vocab_size, 'd_word_vec': opt.d_word_vec, 'd_model': opt.d_dec_model, 'n_layer': opt.n_dec_layer, 'rnn': opt.dec_rnn, 'd_k': opt.d_k, 'feat_vocab': feat_vocab, 'd_feat_vec': opt.d_feat_vec, 'd_enc_model': d_enc_model, 'n_enc_layer': n_enc_layer, 'input_feed': opt.input_feed, 'copy': opt.copy, 'answer': (opt.answer == 'enc'), 'coverage': opt.coverage, 'separate': (opt.answer == 'sep'), 'layer_attn': opt.layer_attn, 'encoder_word_emb': encoder_word_emb_weight, 'maxout_pool_size': opt.maxout_pool_size, 'dropout': opt.dropout, 'device': device}
(options['mode'], options['rl_model']) = ('', None)
model = RNNDecoder.from_opt(options)
else:
options = {'n_vocab': opt.tgt_vocab_size, 'len_max_seq': opt.max_token_tgt_len, 'd_word_vec': opt.d_word_vec, 'd_model': opt.d_dec_model, 'n_layer': opt.n_dec_layer, 'd_inner': opt.d_inner, 'n_head': opt.n_head, 'd_k': opt.d_k, 'd_v': opt.d_v, 'layer_attn': opt.layer_attn, 'n_enc_layer': n_enc_layer, 'feat_vocab': feat_vocab, 'd_feat_vec': opt.d_feat_vec, 'maxout_pool_size': opt.maxout_pool_size, 'dropout': opt.dropout, 'encoder_word_emb': encoder_word_emb_weight}
model = TransfDecoder.from_opt(options)
return model |
def filter_tuples(tuples, max_len, min_len):
filtered_tuples = []
for item in tuples:
if ((len(item[0]) >= min_len) and (len(item[0]) <= max_len) and (len(item[5]) >= min_len) and (len(item[5]) <= max_len)):
filtered_tuples.append(item)
return filtered_tuples |
def _set_plot_properties(properties):
if ('xlim' in properties):
plt.xlim(properties['xlim'])
if ('ylim' in properties):
plt.ylim(properties['ylim'])
if ('xlabel' in properties):
plt.xlabel(properties['xlabel'])
if ('ylabel' in properties):
plt.ylabel(properties['ylabel']) |
def train(train_data, test_data=None):
G = train_data[0]
features = train_data[1]
id_map = train_data[2]
class_map = train_data[4]
if isinstance(list(class_map.values())[0], list):
num_classes = len(list(class_map.values())[0])
else:
num_classes = len(set(class_map.values()))
if (not (features is None)):
features = np.vstack([features, np.zeros((features.shape[1],))])
context_pairs = (train_data[3] if FLAGS.random_context else None)
placeholders = construct_placeholders(num_classes)
minibatch = NodeMinibatchIterator(G, id_map, placeholders, class_map, num_classes, batch_size=FLAGS.batch_size, max_degree=FLAGS.max_degree, context_pairs=context_pairs)
adj_info_ph = tf.placeholder(tf.int32, shape=minibatch.adj.shape)
adj_info = tf.Variable(adj_info_ph, trainable=False, name='adj_info')
if (FLAGS.model == 'graphsage_mean'):
sampler = UniformNeighborSampler(adj_info)
if (FLAGS.samples_3 != 0):
layer_infos = [SAGEInfo('node', sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo('node', sampler, FLAGS.samples_2, FLAGS.dim_2), SAGEInfo('node', sampler, FLAGS.samples_3, FLAGS.dim_2)]
elif (FLAGS.samples_2 != 0):
layer_infos = [SAGEInfo('node', sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo('node', sampler, FLAGS.samples_2, FLAGS.dim_2)]
else:
layer_infos = [SAGEInfo('node', sampler, FLAGS.samples_1, FLAGS.dim_1)]
model = SupervisedGraphsage(num_classes, placeholders, features, adj_info, minibatch.deg, layer_infos, model_size=FLAGS.model_size, sigmoid_loss=FLAGS.sigmoid, identity_dim=FLAGS.identity_dim, logging=True)
elif (FLAGS.model == 'gcn'):
sampler = UniformNeighborSampler(adj_info)
layer_infos = [SAGEInfo('node', sampler, FLAGS.samples_1, (2 * FLAGS.dim_1)), SAGEInfo('node', sampler, FLAGS.samples_2, (2 * FLAGS.dim_2))]
model = SupervisedGraphsage(num_classes, placeholders, features, adj_info, minibatch.deg, layer_infos=layer_infos, aggregator_type='gcn', model_size=FLAGS.model_size, concat=False, sigmoid_loss=FLAGS.sigmoid, identity_dim=FLAGS.identity_dim, logging=True)
elif (FLAGS.model == 'graphsage_seq'):
sampler = UniformNeighborSampler(adj_info)
layer_infos = [SAGEInfo('node', sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo('node', sampler, FLAGS.samples_2, FLAGS.dim_2)]
model = SupervisedGraphsage(num_classes, placeholders, features, adj_info, minibatch.deg, layer_infos=layer_infos, aggregator_type='seq', model_size=FLAGS.model_size, sigmoid_loss=FLAGS.sigmoid, identity_dim=FLAGS.identity_dim, logging=True)
elif (FLAGS.model == 'graphsage_maxpool'):
sampler = UniformNeighborSampler(adj_info)
layer_infos = [SAGEInfo('node', sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo('node', sampler, FLAGS.samples_2, FLAGS.dim_2)]
model = SupervisedGraphsage(num_classes, placeholders, features, adj_info, minibatch.deg, layer_infos=layer_infos, aggregator_type='maxpool', model_size=FLAGS.model_size, sigmoid_loss=FLAGS.sigmoid, identity_dim=FLAGS.identity_dim, logging=True)
elif (FLAGS.model == 'graphsage_meanpool'):
sampler = UniformNeighborSampler(adj_info)
layer_infos = [SAGEInfo('node', sampler, FLAGS.samples_1, FLAGS.dim_1), SAGEInfo('node', sampler, FLAGS.samples_2, FLAGS.dim_2)]
model = SupervisedGraphsage(num_classes, placeholders, features, adj_info, minibatch.deg, layer_infos=layer_infos, aggregator_type='meanpool', model_size=FLAGS.model_size, sigmoid_loss=FLAGS.sigmoid, identity_dim=FLAGS.identity_dim, logging=True)
else:
raise Exception('Error: model name unrecognized.')
config = tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
sess = tf.Session(config=config)
merged = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(log_dir(), sess.graph)
sess.run(tf.global_variables_initializer(), feed_dict={adj_info_ph: minibatch.adj})
total_steps = 0
avg_time = 0.0
epoch_val_costs = []
train_adj_info = tf.assign(adj_info, minibatch.adj)
val_adj_info = tf.assign(adj_info, minibatch.test_adj)
for epoch in range(FLAGS.epochs):
minibatch.shuffle()
iter = 0
print(('Epoch: %04d' % (epoch + 1)))
epoch_val_costs.append(0)
while (not minibatch.end()):
(feed_dict, labels) = minibatch.next_minibatch_feed_dict()
feed_dict.update({placeholders['dropout']: FLAGS.dropout})
t = time.time()
outs = sess.run([merged, model.opt_op, model.loss, model.preds], feed_dict=feed_dict)
train_cost = outs[2]
if ((iter % FLAGS.validate_iter) == 0):
sess.run(val_adj_info.op)
if (FLAGS.validate_batch_size == (- 1)):
(val_cost, val_f1_mic, val_f1_mac, duration) = incremental_evaluate(sess, model, minibatch, FLAGS.batch_size)
else:
(val_cost, val_f1_mic, val_f1_mac, duration) = evaluate(sess, model, minibatch, FLAGS.validate_batch_size)
sess.run(train_adj_info.op)
epoch_val_costs[(- 1)] += val_cost
if ((total_steps % FLAGS.print_every) == 0):
summary_writer.add_summary(outs[0], total_steps)
avg_time = ((((avg_time * total_steps) + time.time()) - t) / (total_steps + 1))
if ((total_steps % FLAGS.print_every) == 0):
(train_f1_mic, train_f1_mac) = calc_f1(labels, outs[(- 1)])
print('Iter:', ('%04d' % iter), 'train_loss=', '{:.5f}'.format(train_cost), 'train_f1_mic=', '{:.5f}'.format(train_f1_mic), 'train_f1_mac=', '{:.5f}'.format(train_f1_mac), 'val_loss=', '{:.5f}'.format(val_cost), 'val_f1_mic=', '{:.5f}'.format(val_f1_mic), 'val_f1_mac=', '{:.5f}'.format(val_f1_mac), 'time=', '{:.5f}'.format(avg_time))
iter += 1
total_steps += 1
if (total_steps > FLAGS.max_total_steps):
break
if (total_steps > FLAGS.max_total_steps):
break
print('Optimization Finished!')
sess.run(val_adj_info.op)
(val_cost, val_f1_mic, val_f1_mac, duration) = incremental_evaluate(sess, model, minibatch, FLAGS.batch_size)
print('Full validation stats:', 'loss=', '{:.5f}'.format(val_cost), 'f1_micro=', '{:.5f}'.format(val_f1_mic), 'f1_macro=', '{:.5f}'.format(val_f1_mac), 'time=', '{:.5f}'.format(duration))
with open((log_dir() + 'val_stats.txt'), 'w') as fp:
fp.write('loss={:.5f} f1_micro={:.5f} f1_macro={:.5f} time={:.5f}'.format(val_cost, val_f1_mic, val_f1_mac, duration))
print("Writing test set stats to file (don't peak!)")
(val_cost, val_f1_mic, val_f1_mac, duration) = incremental_evaluate(sess, model, minibatch, FLAGS.batch_size, test=True)
with open((log_dir() + 'test_stats.txt'), 'w') as fp:
fp.write('loss={:.5f} f1_micro={:.5f} f1_macro={:.5f}'.format(val_cost, val_f1_mic, val_f1_mac)) |
class History():
def __init__(self, state, next_state, action, reward):
self.state = state
self.action = action
self.reward = reward
self.next_state = next_state |
def make_env(env_name: str, seed: int, save_folder: Optional[str]=None, add_episode_monitor: bool=True, action_repeat: int=1, frame_stack: int=1, from_pixels: bool=False, pixels_only: bool=True, image_size: int=84, sticky: bool=False, gray_scale: bool=False, flatten: bool=True) -> gym.Env:
all_envs = gym.envs.registry.all()
env_ids = [env_spec.id for env_spec in all_envs]
if (env_name in env_ids):
env = gym.make(env_name)
else:
(domain_name, task_name) = env_name.split('-')
env = wrappers.DMCEnv(domain_name=domain_name, task_name=task_name, task_kwargs={'random': seed})
if (flatten and isinstance(env.observation_space, gym.spaces.Dict)):
env = gym.wrappers.FlattenObservation(env)
if add_episode_monitor:
env = wrappers.EpisodeMonitor(env)
if (action_repeat > 1):
env = wrappers.RepeatAction(env, action_repeat)
env = RescaleAction(env, (- 1.0), 1.0)
if (save_folder is not None):
env = gym.wrappers.RecordVideo(env, save_folder)
if from_pixels:
if (env_name in env_ids):
camera_id = 0
else:
camera_id = (2 if (domain_name == 'quadruped') else 0)
env = PixelObservationWrapper(env, pixels_only=pixels_only, render_kwargs={'pixels': {'height': image_size, 'width': image_size, 'camera_id': camera_id}})
env = wrappers.TakeKey(env, take_key='pixels')
if gray_scale:
env = wrappers.RGB2Gray(env)
else:
env = wrappers.SinglePrecision(env)
if (frame_stack > 1):
env = wrappers.FrameStack(env, num_stack=frame_stack)
if sticky:
env = wrappers.StickyActionEnv(env)
env.seed(seed)
env.action_space.seed(seed)
env.observation_space.seed(seed)
return env |
('torch.distributed._broadcast_coalesced', mock)
('torch.distributed.broadcast', mock)
('torch.nn.parallel.DistributedDataParallel._ddp_init_helper', mock)
def test_is_module_wrapper():
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(2, 2, 1)
def forward(self, x):
return self.conv(x)
if hasattr(torch.distributed, '_verify_model_across_ranks'):
torch.distributed._verify_model_across_ranks = mock
if hasattr(torch.distributed, '_verify_params_across_processes'):
torch.distributed._verify_params_across_processes = mock
model = Model()
assert (not is_module_wrapper(model))
dp = DataParallel(model)
assert is_module_wrapper(dp)
mmdp = MMDataParallel(model)
assert is_module_wrapper(mmdp)
ddp = DistributedDataParallel(model, process_group=MagicMock())
assert is_module_wrapper(ddp)
mmddp = MMDistributedDataParallel(model, process_group=MagicMock())
assert is_module_wrapper(mmddp)
deprecated_mmddp = DeprecatedMMDDP(model)
assert is_module_wrapper(deprecated_mmddp)
_WRAPPERS.register_module()
class ModuleWrapper(object):
def __init__(self, module):
self.module = module
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
module_wraper = ModuleWrapper(model)
assert is_module_wrapper(module_wraper) |
class FlaxMT5EncoderModel(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
class MT5Config(PretrainedConfig):
model_type = 'mt5'
keys_to_ignore_at_inference = ['past_key_values']
def __init__(self, vocab_size=250112, d_model=512, d_kv=64, d_ff=1024, num_layers=8, num_decoder_layers=None, num_heads=6, relative_attention_num_buckets=32, relative_attention_max_distance=128, dropout_rate=0.1, layer_norm_epsilon=1e-06, initializer_factor=1.0, feed_forward_proj='gated-gelu', is_encoder_decoder=True, use_cache=True, tokenizer_class='T5Tokenizer', tie_word_embeddings=False, pad_token_id=0, eos_token_id=1, decoder_start_token_id=0, **kwargs):
super().__init__(is_encoder_decoder=is_encoder_decoder, tokenizer_class=tokenizer_class, tie_word_embeddings=tie_word_embeddings, pad_token_id=pad_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, **kwargs)
self.vocab_size = vocab_size
self.d_model = d_model
self.d_kv = d_kv
self.d_ff = d_ff
self.num_layers = num_layers
self.num_decoder_layers = (num_decoder_layers if (num_decoder_layers is not None) else self.num_layers)
self.num_heads = num_heads
self.relative_attention_num_buckets = relative_attention_num_buckets
self.relative_attention_max_distance = relative_attention_max_distance
self.dropout_rate = dropout_rate
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_factor = initializer_factor
self.feed_forward_proj = feed_forward_proj
self.use_cache = use_cache
act_info = self.feed_forward_proj.split('-')
self.dense_act_fn = act_info[(- 1)]
self.is_gated_act = (act_info[0] == 'gated')
if (((len(act_info) > 1) and (act_info[0] != 'gated')) or (len(act_info) > 2)):
raise ValueError(f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.Please make sure `feed_forward_proj` is of the format `gated-{{ACT_FN}}` or `{{ACT_FN}}`, e.g. 'gated-gelu' or 'relu'")
if (feed_forward_proj == 'gated-gelu'):
self.dense_act_fn = 'gelu_new'
def hidden_size(self):
return self.d_model
def num_attention_heads(self):
return self.num_heads
def num_hidden_layers(self):
return self.num_layers |
class Discriminator(BaseNetwork):
def __init__(self, in_channels, use_sigmoid=True, use_spectral_norm=True, init_weights=True):
super(Discriminator, self).__init__()
self.use_sigmoid = use_sigmoid
self.conv1 = self.features = nn.Sequential(spectral_norm(nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=4, stride=2, padding=1, bias=(not use_spectral_norm)), use_spectral_norm), nn.LeakyReLU(0.2, inplace=True))
self.conv2 = nn.Sequential(spectral_norm(nn.Conv2d(in_channels=64, out_channels=128, kernel_size=4, stride=2, padding=1, bias=(not use_spectral_norm)), use_spectral_norm), nn.LeakyReLU(0.2, inplace=True))
self.conv3 = nn.Sequential(spectral_norm(nn.Conv2d(in_channels=128, out_channels=256, kernel_size=4, stride=2, padding=1, bias=(not use_spectral_norm)), use_spectral_norm), nn.LeakyReLU(0.2, inplace=True))
self.conv4 = nn.Sequential(spectral_norm(nn.Conv2d(in_channels=256, out_channels=512, kernel_size=4, stride=1, padding=1, bias=(not use_spectral_norm)), use_spectral_norm), nn.LeakyReLU(0.2, inplace=True))
self.conv5 = nn.Sequential(spectral_norm(nn.Conv2d(in_channels=512, out_channels=1, kernel_size=4, stride=1, padding=1, bias=(not use_spectral_norm)), use_spectral_norm))
if init_weights:
self.init_weights()
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
conv5 = self.conv5(conv4)
outputs = conv5
if self.use_sigmoid:
outputs = torch.sigmoid(conv5)
return (outputs, [conv1, conv2, conv3, conv4, conv5]) |
class SmallNN(nn.Module, metaclass=Named):
def __init__(self, dim_in=768, num_classes=4, k=512):
super().__init__()
self.num_classes = num_classes
self.net = nn.Sequential(nn.Linear(dim_in, k), nn.ReLU(), nn.Dropout(0.5), nn.Linear(k, k), nn.ReLU(), nn.Dropout(0.5), nn.Linear(k, num_classes))
def forward(self, x):
return self.net(x) |
def so3_rotation(x, alpha, beta, gamma):
b = (x.size()[(- 1)] // 2)
x_size = x.size()
Us = _setup_so3_rotation(b, alpha, beta, gamma, device_type=x.device.type, device_index=x.device.index)
x = SO3_fft_real.apply(x)
Fz_list = []
begin = 0
for l in range(b):
L = ((2 * l) + 1)
size = (L ** 2)
Fx = x[begin:(begin + size)]
Fx = Fx.view(L, (- 1), 2)
U = Us[l].view(L, L, 2)
Fz = complex_mm(U, Fx, conj_x=True)
Fz = Fz.view(size, (- 1), 2)
Fz_list.append(Fz)
begin += size
Fz = torch.cat(Fz_list, 0)
z = SO3_ifft_real.apply(Fz)
z = z.contiguous()
z = z.view(*x_size)
return z |
class FastRCNNPredictor(nn.Module):
def __init__(self, in_channels, num_classes):
super(FastRCNNPredictor, self).__init__()
self.cls_score = nn.Linear(in_channels, num_classes)
self.bbox_pred = nn.Linear(in_channels, (num_classes * 4))
def forward(self, x):
if (x.dim() == 4):
assert (list(x.shape[2:]) == [1, 1])
x = x.flatten(start_dim=1)
scores = self.cls_score(x)
bbox_deltas = self.bbox_pred(x)
return (scores, bbox_deltas) |
def process_data_single(args, f, eos_token_ids):
print('running')
BOS_TOKEN = 0
with open(os.path.join(args.cur_dir, f), 'rb') as fd:
data = pickle.load(fd)
(attentions, pred_distb, logits, input_doc) = (data['attentions'], data['pred_distributions'], data['logits'], data['input_doc'])
timesteps = len(attentions)
attentions_tlle = convert_enc_attn(attentions, merge_layer_head=False)
attention_tle = convert_enc_attn(attentions, merge_layer_head=True)
document_len = input_doc.shape[0]
input_doc = input_doc.astype(np.int).tolist()
logits = logits.tolist()
indices = [i for (i, x) in enumerate(logits) if (x in eos_token_ids)]
good_indices = detect_useless_ids(indices)
if good_indices:
max_t = good_indices[(- 1)]
else:
max_t = attentions_tlle.shape[0]
pred_distb = np.exp(pred_distb)
idf_flag = compute_idf(attention_tle[:max_t])
ban_positions = get_ban_positions(idf_flag)
data_pairs = analyze_attention_y_entropy(max_t, attentions_tlle, pred_distb, input_doc, ban_positions, logits, args.nucleus, args.nuc_prob)
return data_pairs |
def Perlin(nrow, specs={}):
size = specs.get('size', 5)
base = specs.get('base', 0)
assert (size > 0)
x = y = np.linspace(0, size, nrow)
n = [[noise.pnoise2(i, j, repeatx=size, repeaty=size, base=base) for j in y] for i in x]
m = (n - np.min(n))
landscape = np.array(np.round((m * 7)), dtype=int)
return landscape.ravel() |
def check_tree(json_file_path, gt_info, pred_info):
if (len(gt_info) != len(pred_info)):
logging.error('ERROR while processing {}, ERR_CODE={}, message:{}'.format(json_file_path, 2, 'number of nodes not equal'))
return 2
parent_ids = {}
for i in range(len(pred_info)):
parent_ids[i] = pred_info[i]['parent_id']
for loop_time in range(len(pred_info)):
Valid = True
for item_id in range(len(pred_info)):
if (parent_ids[item_id] == (- 1)):
continue
Valid = False
parent_ids[item_id] = pred_info[parent_ids[item_id]]['parent_id']
if Valid:
break
if (len(set(parent_ids.values())) != 1):
vis_digraph_py(complete_json(pred_info, gt_info), os.path.splitext(json_file_path)[0])
logging.error('ERROR while processing {}, ERR_CODE={}, message:{}'.format(json_file_path, 3, 'parent loop exists, visualization has been saved in {}'.format(os.path.splitext(json_file_path)[0])))
return 3
return (- 1) |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('experiment', help='name of the experiment that is being run')
parser.add_argument('dataset', help='.h5 File containing the train/valid/test datasets')
parser.add_argument('--results_dir', default='/shared/results', help='Directory to save resulting models and visualizations')
parser.add_argument('--plot_error', action='store_true', help='If arg present, plot absolute error plots')
parser.add_argument('--treatment_idx', default=None, type=int, help='(Optional) column index of treatment variable in dataset. If present, run treatment visualizations.')
return parser.parse_args() |
def require_torch_bf16_gpu(test_case):
return unittest.skipUnless(is_torch_bf16_gpu_available(), 'test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0')(test_case) |
def conv2d(inputs, num_output_channels, kernel_size, scope, stride=[1, 1], padding='SAME', data_format='NHWC', use_xavier=True, stddev=0.001, weight_decay=None, activation_fn=tf.nn.relu, bn=False, bn_decay=None, is_training=None):
with tf.variable_scope(scope) as sc:
(kernel_h, kernel_w) = kernel_size
assert ((data_format == 'NHWC') or (data_format == 'NCHW'))
if (data_format == 'NHWC'):
num_in_channels = inputs.get_shape()[(- 1)].value
elif (data_format == 'NCHW'):
num_in_channels = inputs.get_shape()[1].value
kernel_shape = [kernel_h, kernel_w, num_in_channels, num_output_channels]
kernel = _variable_with_weight_decay('weights', shape=kernel_shape, use_xavier=use_xavier, stddev=stddev, wd=weight_decay)
(stride_h, stride_w) = stride
outputs = tf.nn.conv2d(inputs, kernel, [1, stride_h, stride_w, 1], padding=padding, data_format=data_format)
biases = _variable_on_cpu('biases', [num_output_channels], tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases, data_format=data_format)
if bn:
outputs = batch_norm_for_conv2d(outputs, is_training, bn_decay=bn_decay, scope='bn', data_format=data_format)
if (activation_fn is not None):
outputs = activation_fn(outputs)
return outputs |
def analyze(airline_table, text_file, callword_mapping=dict()):
skip_words = set((list(letters.values()) + list(numbers.values())))
text_file_lines = []
with open(text_file) as f:
for line in f:
text_file_lines.append(((' ' + line) + ' '))
for callword in tqdm(load_callwords(airline_table)):
print(('INFO: Analyzing %s...' % callword), file=sys.stderr)
callword = callword.lower()
callword_ = re.sub('[ -]', '_', callword)
if (np.sum([(wrd in skip_words) for wrd in callword.split()]) > 0):
continue
split_to_char = [str(c) for c in re.sub('[ -_]', '', callword)]
regex = re.compile(((' ' + '[ _-]{0,1}'.join(split_to_char)) + ' '))
for line in text_file_lines:
for matched in re.findall(regex, ((' ' + line) + ' ')):
matched_ = matched.strip()
if ((matched_ != callword_) and (matched_ not in callword_mapping)):
callword_mapping[matched_] = callword_
return callword_mapping |
def test_hourglass_backbone():
with pytest.raises(AssertionError):
HourglassNet(num_stacks=0)
with pytest.raises(AssertionError):
HourglassNet(stage_channels=[256, 256, 384, 384, 384], stage_blocks=[2, 2, 2, 2, 2, 4])
with pytest.raises(AssertionError):
HourglassNet(downsample_times=5, stage_channels=[256, 256, 384, 384, 384], stage_blocks=[2, 2, 2, 2, 2])
model = HourglassNet(num_stacks=1)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert (len(feat) == 1)
assert (feat[0].shape == torch.Size([1, 256, 64, 64]))
model = HourglassNet(num_stacks=2)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert (len(feat) == 2)
assert (feat[0].shape == torch.Size([1, 256, 64, 64]))
assert (feat[1].shape == torch.Size([1, 256, 64, 64])) |
def generate_xml(name, lines, img_size, class_sets, doncateothers=True):
doc = Document()
def append_xml_node_attr(child, parent=None, text=None):
ele = doc.createElement(child)
if (not (text is None)):
text_node = doc.createTextNode(text)
ele.appendChild(text_node)
parent = (doc if (parent is None) else parent)
parent.appendChild(ele)
return ele
img_name = (name + '.jpg')
annotation = append_xml_node_attr('annotation')
append_xml_node_attr('folder', parent=annotation, text='text')
append_xml_node_attr('filename', parent=annotation, text=img_name)
source = append_xml_node_attr('source', parent=annotation)
append_xml_node_attr('database', parent=source, text='coco_text_database')
append_xml_node_attr('annotation', parent=source, text='text')
append_xml_node_attr('image', parent=source, text='text')
append_xml_node_attr('flickrid', parent=source, text='000000')
owner = append_xml_node_attr('owner', parent=annotation)
append_xml_node_attr('name', parent=owner, text='ms')
size = append_xml_node_attr('size', annotation)
append_xml_node_attr('width', size, str(img_size[1]))
append_xml_node_attr('height', size, str(img_size[0]))
append_xml_node_attr('depth', size, str(img_size[2]))
append_xml_node_attr('segmented', parent=annotation, text='0')
objs = []
for line in lines:
splitted_line = line.strip().lower().split()
cls = splitted_line[0].lower()
if ((not doncateothers) and (cls not in class_sets)):
continue
cls = ('dontcare' if (cls not in class_sets) else cls)
if (cls == 'dontcare'):
continue
obj = append_xml_node_attr('object', parent=annotation)
occlusion = int(0)
(x1, y1, x2, y2) = (int((float(splitted_line[1]) + 1)), int((float(splitted_line[2]) + 1)), int((float(splitted_line[3]) + 1)), int((float(splitted_line[4]) + 1)))
truncation = float(0)
difficult = (1 if _is_hard(cls, truncation, occlusion, x1, y1, x2, y2) else 0)
truncted = (0 if (truncation < 0.5) else 1)
append_xml_node_attr('name', parent=obj, text=cls)
append_xml_node_attr('pose', parent=obj, text='none')
append_xml_node_attr('truncated', parent=obj, text=str(truncted))
append_xml_node_attr('difficult', parent=obj, text=str(int(difficult)))
bb = append_xml_node_attr('bndbox', parent=obj)
append_xml_node_attr('xmin', parent=bb, text=str(x1))
append_xml_node_attr('ymin', parent=bb, text=str(y1))
append_xml_node_attr('xmax', parent=bb, text=str(x2))
append_xml_node_attr('ymax', parent=bb, text=str(y2))
o = {'class': cls, 'box': np.asarray([x1, y1, x2, y2], dtype=float), 'truncation': truncation, 'difficult': difficult, 'occlusion': occlusion}
objs.append(o)
return (doc, objs) |
def record_request(request_url: str, request_body: Dict, user_id: str):
mysqldb = MysqlDb()
mysqldb._set_db('requests')
SHA_TZ = timezone(timedelta(hours=8), name='Asia/Shanghai')
utc_now = datetime.datetime.utcnow().replace(tzinfo=timezone.utc)
beijing_time = utc_now.astimezone(SHA_TZ).strftime('%Y-%m-%d %H:%M:%S')
if (not isinstance(request_body, Dict)):
request_body = request_body.__dict__
request_body = json.dumps(request_body).replace("'", '^')
sql = f"INSERT INTO record VALUES (null, '{request_url}', '{request_body}', '{user_id}', '{beijing_time}');"
try:
with mysqldb.transaction():
mysqldb.insert(sql, None)
except:
raise Exception('Exception occurred when inserting data into MySQL, please check the db session and your syntax.')
mysqldb._close() |
def build_categorical_crossentropy(weights=None):
def categorical_crossentropy(y_true, y_pred):
y_pred /= K.sum(y_pred, axis=(- 1), keepdims=True)
y_pred = K.clip(y_pred, K.epsilon(), (1.0 - K.epsilon()))
loss = (y_true * K.log(y_pred))
if (weights is not None):
loss = (loss * weights)
loss = (- K.sum(loss, axis=(- 1)))
return loss
return categorical_crossentropy |
class CategoricalGRUPolicy(StochasticPolicy):
def __init__(self, env_spec, name='CategoricalGRUPolicy', hidden_dim=32, hidden_nonlinearity=tf.nn.tanh, hidden_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), hidden_b_init=tf.zeros_initializer(), recurrent_nonlinearity=tf.nn.sigmoid, recurrent_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), output_nonlinearity=None, output_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), output_b_init=tf.zeros_initializer(), hidden_state_init=tf.zeros_initializer(), hidden_state_init_trainable=False, state_include_action=True, layer_normalization=False):
if (not isinstance(env_spec.action_space, akro.Discrete)):
raise ValueError('CategoricalGRUPolicy only workswith akro.Discrete action space.')
super().__init__(name, env_spec)
self._obs_dim = env_spec.observation_space.flat_dim
self._action_dim = env_spec.action_space.n
self._hidden_dim = hidden_dim
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._recurrent_nonlinearity = recurrent_nonlinearity
self._recurrent_w_init = recurrent_w_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._hidden_state_init = hidden_state_init
self._hidden_state_init_trainable = hidden_state_init_trainable
self._layer_normalization = layer_normalization
self._state_include_action = state_include_action
if state_include_action:
self._input_dim = (self._obs_dim + self._action_dim)
else:
self._input_dim = self._obs_dim
self._f_step_prob = None
self.model = CategoricalGRUModel(output_dim=self._action_dim, hidden_dim=self._hidden_dim, name='prob_network', hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, recurrent_nonlinearity=recurrent_nonlinearity, recurrent_w_init=recurrent_w_init, hidden_state_init=hidden_state_init, hidden_state_init_trainable=hidden_state_init_trainable, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, layer_normalization=layer_normalization)
self._prev_actions = None
self._prev_hiddens = None
self._dist = None
self._init_hidden = None
self._initialize()
def _initialize(self):
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
state_input = tf.compat.v1.placeholder(shape=(None, None, self._input_dim), name='state_input', dtype=tf.float32)
step_input_var = tf.compat.v1.placeholder(shape=(None, self._input_dim), name='step_input', dtype=tf.float32)
step_hidden_var = tf.compat.v1.placeholder(shape=(None, self._hidden_dim), name='step_hidden_input', dtype=tf.float32)
(self._dist, step_out, step_hidden, self._init_hidden) = self.model.build(state_input, step_input_var, step_hidden_var).outputs
self._f_step_prob = tf.compat.v1.get_default_session().make_callable([step_out, step_hidden], feed_list=[step_input_var, step_hidden_var])
def build(self, state_input, name=None):
with tf.compat.v1.variable_scope(self._variable_scope):
(_, step_input_var, step_hidden_var) = self.model.inputs
return self.model.build(state_input, step_input_var, step_hidden_var, name=name)
def input_dim(self):
return self._input_dim
def vectorized(self):
return True
def reset(self, do_resets=None):
if (do_resets is None):
do_resets = [True]
do_resets = np.asarray(do_resets)
if ((self._prev_actions is None) or (len(do_resets) != len(self._prev_actions))):
self._prev_actions = np.zeros((len(do_resets), self.action_space.flat_dim))
self._prev_hiddens = np.zeros((len(do_resets), self._hidden_dim))
self._prev_actions[do_resets] = 0.0
self._prev_hiddens[do_resets] = self._init_hidden.eval()
def get_action(self, observation):
(actions, agent_infos) = self.get_actions([observation])
return (actions[0], {k: v[0] for (k, v) in agent_infos.items()})
def get_actions(self, observations):
observations = self.observation_space.flatten_n(observations)
if self._state_include_action:
assert (self._prev_actions is not None)
all_input = np.concatenate([observations, self._prev_actions], axis=(- 1))
else:
all_input = observations
(probs, hidden_vec) = self._f_step_prob(all_input, self._prev_hiddens)
actions = list(map(self.action_space.weighted_sample, probs))
prev_actions = self._prev_actions
self._prev_actions = self.action_space.flatten_n(actions)
self._prev_hiddens = hidden_vec
agent_info = dict(prob=probs)
if self._state_include_action:
agent_info['prev_action'] = np.copy(prev_actions)
return (actions, agent_info)
def distribution(self):
return self._dist
def state_info_specs(self):
if self._state_include_action:
return [('prev_action', (self._action_dim,))]
return []
def clone(self, name):
new_policy = self.__class__(name=name, env_spec=self._env_spec, hidden_dim=self._hidden_dim, hidden_nonlinearity=self._hidden_nonlinearity, hidden_w_init=self._hidden_w_init, hidden_b_init=self._hidden_b_init, recurrent_nonlinearity=self._recurrent_nonlinearity, recurrent_w_init=self._recurrent_w_init, output_nonlinearity=self._output_nonlinearity, output_w_init=self._output_w_init, output_b_init=self._output_b_init, hidden_state_init=self._hidden_state_init, hidden_state_init_trainable=self._hidden_state_init_trainable, state_include_action=self._state_include_action, layer_normalization=self._layer_normalization)
new_policy.model.parameters = self.model.parameters
return new_policy
def __getstate__(self):
new_dict = super().__getstate__()
del new_dict['_f_step_prob']
del new_dict['_dist']
del new_dict['_init_hidden']
return new_dict
def __setstate__(self, state):
super().__setstate__(state)
self._initialize() |
def optimizer(optim, eta, loss_fn, at_step, decay_rate):
global_step = tf.Variable(0, trainable=False)
optz = optim
if (optim == 'Adadelta'):
optz = (lambda lr: tf.train.AdadeltaOptimizer(lr, 0.95, 1e-06))
lr_decay_fn = None
elif (optim == 'Momentum'):
optz = (lambda lr: tf.train.MomentumOptimizer(lr, MOM))
lr_decay_fn = exponential_staircase_decay(at_step, decay_rate)
return tf.contrib.layers.optimize_loss(loss_fn, global_step, eta, optz, clip_gradients=4.0, learning_rate_decay_fn=lr_decay_fn) |
class mobilenetv1(Network):
def __init__(self):
Network.__init__(self)
self._feat_stride = [16]
self._feat_compress = [(1.0 / float(self._feat_stride[0]))]
self._depth_multiplier = cfg.MOBILENET.DEPTH_MULTIPLIER
self._net_conv_channels = 512
self._fc7_channels = 1024
def init_weights(self):
def normal_init(m, mean, stddev, truncated=False):
if (m.__class__.__name__.find('Conv') == (- 1)):
return
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean)
else:
m.weight.data.normal_(mean, stddev)
if (m.bias is not None):
m.bias.data.zero_()
self.mobilenet.apply((lambda m: normal_init(m, 0, 0.09, True)))
normal_init(self.rpn_net, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.rpn_cls_score_net, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.rpn_bbox_pred_net, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.cls_score_net, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.bbox_pred_net, 0, 0.001, cfg.TRAIN.TRUNCATED)
def _image_to_head(self):
net_conv = self._layers['head'](self._image)
self._act_summaries['conv'] = net_conv
return net_conv
def _head_to_tail(self, pool5):
fc7 = self._layers['tail'](pool5)
fc7 = fc7.mean(3).mean(2)
return fc7
def _init_head_tail(self):
self.mobilenet = mobilenet_v1_base()
assert (0 <= cfg.MOBILENET.FIXED_LAYERS <= 12)
for m in list(self.mobilenet.children())[:cfg.MOBILENET.FIXED_LAYERS]:
for p in m.parameters():
p.requires_grad = False
def set_bn_fix(m):
classname = m.__class__.__name__
if (classname.find('BatchNorm') != (- 1)):
for p in m.parameters():
p.requires_grad = False
self.mobilenet.apply(set_bn_fix)
def l2_regularizer(m, wd, regu_depth):
if (m.__class__.__name__.find('Conv') != (- 1)):
if (regu_depth or (m.groups == 1)):
m.weight.weight_decay = wd
else:
m.weight.weight_decay = 0
self.mobilenet.apply((lambda x: l2_regularizer(x, cfg.MOBILENET.WEIGHT_DECAY, cfg.MOBILENET.REGU_DEPTH)))
self._layers['head'] = nn.Sequential(*list(self.mobilenet.children())[:12])
self._layers['tail'] = nn.Sequential(*list(self.mobilenet.children())[12:])
def train(self, mode=True):
nn.Module.train(self, mode)
if mode:
for m in list(self.mobilenet.children())[:cfg.MOBILENET.FIXED_LAYERS]:
m.eval()
def set_bn_eval(m):
classname = m.__class__.__name__
if (classname.find('BatchNorm') != (- 1)):
m.eval()
self.mobilenet.apply(set_bn_eval)
def load_pretrained_cnn(self, state_dict):
print('Warning: No available pretrained model yet')
self.mobilenet.load_state_dict({k: state_dict[('features.' + k)] for k in list(self.mobilenet.state_dict())}) |
class XMLCNN_encoder(nn.Module):
def __init__(self, dropout, labels_num, dynamic_pool_length, bottleneck_dim, num_filters, vocab_size=None, emb_size=None, emb_trainable=True, emb_init=None, padding_idx=0, **kwargs):
super(XMLCNN_encoder, self).__init__()
if (emb_init is not None):
if (vocab_size is not None):
assert (vocab_size == emb_init.shape[0])
if (emb_size is not None):
assert (emb_size == emb_init.shape[1])
(vocab_size, emb_size) = emb_init.shape
self.output_channel = num_filters
self.num_bottleneck_hidden = bottleneck_dim
self.dynamic_pool_length = dynamic_pool_length
self.emb = nn.Embedding(vocab_size, emb_size, padding_idx=padding_idx, sparse=True, _weight=(torch.from_numpy(emb_init).float() if (emb_init is not None) else None))
self.emb.weight.requires_grad = emb_trainable
self.ks = 3
self.conv1 = nn.Conv2d(1, self.output_channel, (2, emb_size), padding=(1, 0))
self.conv2 = nn.Conv2d(1, self.output_channel, (4, emb_size), padding=(3, 0))
self.conv3 = nn.Conv2d(1, self.output_channel, (8, emb_size), padding=(7, 0))
self.pool = nn.AdaptiveMaxPool1d(self.dynamic_pool_length)
self.bottleneck = nn.Linear(((self.ks * self.output_channel) * self.dynamic_pool_length), self.num_bottleneck_hidden)
self.dropout = nn.Dropout(dropout)
nn.init.xavier_uniform_(self.conv1.weight)
nn.init.xavier_uniform_(self.conv2.weight)
nn.init.xavier_uniform_(self.conv3.weight)
nn.init.xavier_uniform_(self.bottleneck.weight)
def forward(self, x):
embe_out = self.emb(x)
x = embe_out.unsqueeze(1)
x = [F.relu(self.conv1(x)).squeeze(3), F.relu(self.conv2(x)).squeeze(3), F.relu(self.conv3(x)).squeeze(3)]
x = [self.pool(i).squeeze(2) for i in x]
x = torch.cat(x, 1)
x = F.relu(self.bottleneck(x.view((- 1), ((self.ks * self.output_channel) * self.dynamic_pool_length))))
context_vectors = self.dropout(x)
return context_vectors |
class DogsDataModule(pl.LightningDataModule):
def __init__(self, args):
super().__init__()
self.data_root = args.data_root
self.batch_size = args.batch_size
self.num_workers = args.num_workers
self.train_dataset = DogsDataset(args.data_root, args.resolution, 'train', use_flip=False, use_rescale=args.use_rescale)
self.val_dataset = DogsDataset(args.data_root, args.resolution, 'val', use_flip=False, use_rescale=args.use_rescale)
self.test_dataset = DogsDataset(args.data_root, args.resolution, 'test', use_flip=False, use_rescale=args.use_rescale)
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, pin_memory=True)
def val_dataloader(self):
return DataLoader(self.val_dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers, pin_memory=True)
def test_dataloader(self):
return DataLoader(self.test_dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers, pin_memory=True) |
class _DeepIndepMixtureNormal(DeepConditional):
def __init__(self, backbone: nn.Module, mean_head: nn.ModuleList, logstd_head: nn.Module, component_head: nn.Module):
super().__init__()
self.backbone = backbone
self.mean_head = mean_head
self.logstd_head = logstd_head
self.component_head = component_head
def _init_normal(m):
if (type(m) == nn.Linear):
nn.init.normal_(m.weight, 0.0, 0.02)
nn.init.constant_(m.bias, 0.0)
self.apply(_init_normal)
def forward(self, x):
h = self.backbone(x)
mean = torch.stack([mh(h) for mh in self.mean_head], 1)
logstd = self.logstd_head(h)
component = self.component_head(h)
return (mean, logstd, component)
def predict(self, x) -> Independent:
(mean, logstd, component) = self(x)
std = (F.softplus(logstd) + 1e-05)
component = torch.log_softmax(component, dim=(- 1))
event_ndim = 0
return MixtureOfDiagNormalsSharedCovariance(mean, std, component).to_event(event_ndim) |
def train_svm():
output_dir = os.path.join(FLAGS.output_dir, FLAGS.category)
log_dir = os.path.join(output_dir, 'log')
if tf.gfile.Exists(log_dir):
tf.gfile.DeleteRecursively(log_dir)
tf.gfile.MakeDirs(log_dir)
(train_data, train_labels) = data_io.getAll(FLAGS.data_path, cube_len=FLAGS.cube_len, low_bound=0, up_bound=1, num_voxels=FLAGS.train_size)
(test_data, test_labels) = data_io.getAll(FLAGS.data_path, cube_len=FLAGS.cube_len, low_bound=0, up_bound=1, num_voxels=FLAGS.test_size, train=False)
voxel_mean = train_data.mean()
train_data = (train_data - voxel_mean)
test_data = (test_data - voxel_mean)
train_data = train_data[(..., np.newaxis)]
test_data = test_data[(..., np.newaxis)]
print('Reading voxel data, shape: {}'.format(train_data.shape))
print(('min: %.4f\tmax: %.4f' % (train_data.min(), train_data.max())))
obs = tf.placeholder(shape=[None, FLAGS.cube_len, FLAGS.cube_len, FLAGS.cube_len, 1], dtype=tf.float32)
features = extract_features(obs)
saver = tf.train.Saver(tf.trainable_variables(scope='des'))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, FLAGS.ckpt)
print('Loading checkpoint {}'.format(FLAGS.ckpt))
num_batches_train = int(math.ceil((len(train_data) / FLAGS.batch_size)))
num_batches_test = int(math.ceil((len(test_data) / FLAGS.batch_size)))
train_features = []
test_features = []
for i in range(num_batches_train):
train_x = train_data[(i * FLAGS.batch_size):min(len(train_data), ((i + 1) * FLAGS.batch_size))]
ff = sess.run(features, feed_dict={obs: train_x})
train_features.append(ff)
train_features = np.concatenate(train_features, axis=0)
print(train_features.shape)
for i in range(num_batches_test):
test_x = test_data[(i * FLAGS.batch_size):min(len(test_data), ((i + 1) * FLAGS.batch_size))]
ff = sess.run(features, feed_dict={obs: test_x})
test_features.append(ff)
test_features = np.concatenate(test_features, axis=0)
print('Begin to train SVM ')
prob = problem(train_labels, train_features)
param = parameter('-s 2 -c 0.01')
svm_model = train(prob, param)
(_, train_acc, _) = predict(train_labels, train_features, svm_model)
(_, test_acc, _) = predict(test_labels, test_features, svm_model)
print(('train acc: %.4f, test acc: %.4f' % (train_acc[0], test_acc[0]))) |
def _get_ngrams_with_counter(segment: Sequence[str], max_order: List[int]) -> collections.Counter:
ngram_counts = collections.Counter()
for order in xrange(1, (max_order + 1)):
for i in xrange(0, ((len(segment) - order) + 1)):
ngram = tuple(segment[i:(i + order)])
ngram_counts[ngram] += 1
return ngram_counts |
def main():
args = parser.parse_args()
if args.log_path:
set_logger(args.log_path)
save_folder = args.pruning_ratio_to_acc_record_file.rsplit('/', 1)[0]
with open(args.baseline_acc_file, 'r') as jsonfile:
json_data = json.load(jsonfile)
criterion_acc = float(json_data[args.dataset])
with open(args.pruning_ratio_to_acc_record_file, 'r') as json_file:
json_data = json.load(json_file)
acc_before_prune = json_data['0.0']
json_data.pop('0.0')
available_pruning_ratios = list(json_data.keys())
available_pruning_ratios.reverse()
flag_there_is_pruning_ratio_that_match_our_need = False
chosen_pruning_ratio = 0.0
for pruning_ratio in available_pruning_ratios:
acc = json_data[pruning_ratio]
if (((acc + args.allow_acc_loss) >= criterion_acc) or ((args.network_width_multiplier == args.max_allowed_network_width_multiplier) and (acc_before_prune < criterion_acc))):
chosen_pruning_ratio = pruning_ratio
checkpoint_folder = os.path.join(save_folder, str(pruning_ratio))
for filename in os.listdir(checkpoint_folder):
shutil.copyfile(os.path.join(checkpoint_folder, filename), os.path.join(save_folder, filename))
flag_there_is_pruning_ratio_that_match_our_need = True
break
for pruning_ratio in available_pruning_ratios:
checkpoint_folder = os.path.join(save_folder, str(pruning_ratio))
shutil.rmtree(checkpoint_folder)
if (not flag_there_is_pruning_ratio_that_match_our_need):
folder_that_contain_checkpoint_before_pruning = os.path.join(save_folder.rsplit('/', 1)[0], 'scratch')
for filename in os.listdir(folder_that_contain_checkpoint_before_pruning):
shutil.copyfile(os.path.join(folder_that_contain_checkpoint_before_pruning, filename), os.path.join(save_folder, filename))
logging.info('We choose pruning_ratio {}'.format(chosen_pruning_ratio)) |
def test_test_quasiisothermaldf_setup_profileAsQuantity():
from galpy.actionAngle import actionAngleAdiabatic
from galpy.df import quasiisothermaldf
from galpy.orbit import Orbit
from galpy.potential import MWPotential
aA = actionAngleAdiabatic(pot=MWPotential, c=True)
(ro, vo) = (7.0, 250.0)
qdf = quasiisothermaldf((3.0 * units.kpc), ((30.0 * units.km) / units.s), ((20.0 * units.pc) / units.Myr), (10.0 * units.kpc), (8000.0 * units.lyr), pot=MWPotential, aA=aA, cutcounter=True, ro=ro, vo=vo)
assert (numpy.fabs((qdf._hr - (3.0 / ro))) < (10.0 ** (- 10.0))), 'hr in quasiisothermaldf setup as Quantity does not work as expected'
assert (numpy.fabs((qdf._sr - (30.0 / vo))) < (10.0 ** (- 10.0))), 'sr in quasiisothermaldf setup as Quantity does not work as expected'
assert (numpy.fabs((qdf._sz - ((20.0 * (units.pc / units.Myr).to((units.km / units.s))) / vo))) < (10.0 ** (- 10.0))), 'sz in quasiisothermaldf setup as Quantity does not work as expected'
assert (numpy.fabs((qdf._hsr - (10.0 / ro))) < (10.0 ** (- 10.0))), 'hr in quasiisothermaldf setup as Quantity does not work as expected'
assert (numpy.fabs((qdf._hsz - ((8000.0 * units.lyr.to(units.kpc)) / ro))) < (10.0 ** (- 10.0))), 'hsz in quasiisothermaldf setup as Quantity does not work as expected'
return None |
class TestCasePlus(unittest.TestCase):
def setUp(self):
self.teardown_tmp_dirs = []
def get_auto_remove_tmp_dir(self, tmp_dir=None, after=True, before=False):
if (tmp_dir is not None):
path = Path(tmp_dir).resolve()
if (not tmp_dir.startswith('./')):
raise ValueError(f'`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`')
if ((before is True) and path.exists()):
shutil.rmtree(tmp_dir, ignore_errors=True)
path.mkdir(parents=True, exist_ok=True)
else:
tmp_dir = tempfile.mkdtemp()
if (after is True):
self.teardown_tmp_dirs.append(tmp_dir)
return tmp_dir
def tearDown(self):
for path in self.teardown_tmp_dirs:
shutil.rmtree(path, ignore_errors=True)
self.teardown_tmp_dirs = [] |
def hanoi(height, start=1, end=3):
steps = []
if (height > 0):
helper = (({1, 2, 3} - {start}) - {end}).pop()
steps.extend(hanoi((height - 1), start, helper))
steps.append((start, end))
steps.extend(hanoi((height - 1), helper, end))
return steps |
def instances2dict(imageFileList, verbose=False):
imgCount = 0
instanceDict = {}
if (not isinstance(imageFileList, list)):
imageFileList = [imageFileList]
if verbose:
print('Processing {} images...'.format(len(imageFileList)))
for imageFileName in imageFileList:
img = Image.open(imageFileName)
imgNp = np.array(img)
instances = {}
for label in labels:
instances[label.name] = []
for instanceId in np.unique(imgNp):
instanceObj = Instance(imgNp, instanceId)
instances[id2label[instanceObj.labelID].name].append(instanceObj.toDict())
imgKey = os.path.abspath(imageFileName)
instanceDict[imgKey] = instances
imgCount += 1
if verbose:
print('\rImages Processed: {}'.format(imgCount), end=' ')
sys.stdout.flush()
if verbose:
print('')
return instanceDict |
def UpdateIncludeState(filename, include_state, io=codecs):
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
include_state.setdefault(include, ('%s:%d' % (filename, linenum)))
return True |
def test_is_tree_with_leaves_of_type(jax_tree: Dict, np_tree: Dict, jax_and_numpy_tree: Dict) -> None:
assert pytree_test_utils.is_tree_with_leaves_of_type(jax_tree, jnp.ndarray)
assert pytree_test_utils.is_tree_with_leaves_of_type(np_tree, np.ndarray)
assert (not pytree_test_utils.is_tree_with_leaves_of_type(jax_and_numpy_tree, jnp.ndarray))
assert (not pytree_test_utils.is_tree_with_leaves_of_type(jax_and_numpy_tree, np.ndarray)) |
def get_color_distortion(s=1.0):
color_jitter = transforms.ColorJitter((0.8 * s), (0.8 * s), (0.8 * s), (0.2 * s))
rnd_color_jitter = transforms.RandomApply([color_jitter], p=0.8)
rnd_gray = transforms.RandomGrayscale(p=0.2)
color_distort = transforms.Compose([rnd_color_jitter, rnd_gray])
return color_distort |
def rotateX(angle):
phi = ((angle * math.pi) / 180)
return np.array([[1, 0, 0], [0, math.cos(phi), (- math.sin(phi))], [0, math.sin(phi), math.cos(phi)]]) |
def save_labels(dataset_list, output_dir):
if is_main_process():
logger = logging.getLogger(__name__)
ids_to_labels = {}
for dataset in dataset_list:
if hasattr(dataset, 'categories'):
ids_to_labels.update(dataset.categories)
else:
logger.warning("Dataset [{}] has no categories attribute, labels.json file won't be created".format(dataset.__class__.__name__))
if ids_to_labels:
labels_file = os.path.join(output_dir, 'labels.json')
logger.info('Saving labels mapping into {}'.format(labels_file))
with open(labels_file, 'w') as f:
json.dump(ids_to_labels, f, indent=2) |
def to_x1y1x2y2(obj):
x1 = obj['x']
y1 = obj['y']
x2 = (obj['x'] + obj['w'])
y2 = (obj['y'] + obj['h'])
return [x1, y1, x2, y2] |
def evaluate_sessions(pr, metrics, test_data, train_data, items=None, cut_off=20, session_key='SessionId', item_key='ItemId', time_key='Time'):
actions = len(test_data)
sessions = len(test_data[session_key].unique())
count = 0
print('START evaluation of ', actions, ' actions in ', sessions, ' sessions')
sc = time.clock()
st = time.time()
time_sum = 0
time_sum_clock = 0
time_count = 0
for m in metrics:
m.reset()
test_data.sort_values([session_key, time_key], inplace=True)
test_data = test_data.reset_index(drop=True)
items_to_predict = train_data[item_key].unique()
offset_sessions = np.zeros((test_data[session_key].nunique() + 1), dtype=np.int32)
length_session = np.zeros(test_data[session_key].nunique(), dtype=np.int32)
offset_sessions[1:] = test_data.groupby(session_key).size().cumsum()
length_session[0:] = test_data.groupby(session_key).size()
current_session_idx = 0
pos = offset_sessions[current_session_idx]
position = 0
finished = False
for i in tqdm(range(len(test_data))):
crs = time.clock()
trs = time.time()
current_item = test_data[item_key][pos]
current_session = test_data[session_key][pos]
ts = test_data[time_key][pos]
rest = test_data[item_key][(pos + 1):(offset_sessions[current_session_idx] + length_session[current_session_idx])].values
for m in metrics:
if hasattr(m, 'start_predict'):
m.start_predict(pr)
preds = pr.predict_next(current_session, current_item, items_to_predict, timestamp=ts)
for m in metrics:
if hasattr(m, 'start_predict'):
m.stop_predict(pr)
preds[np.isnan(preds)] = 0
preds.sort_values(ascending=False, inplace=True)
time_sum_clock += (time.clock() - crs)
time_sum += (time.time() - trs)
time_count += 1
count += 1
for m in metrics:
if hasattr(m, 'add_multiple'):
m.add_multiple(preds, rest, for_item=current_item, session=current_session, position=position)
pos += 1
position += 1
if ((pos + 1) == (offset_sessions[current_session_idx] + length_session[current_session_idx])):
current_session_idx += 1
if (current_session_idx == test_data[session_key].nunique()):
finished = True
break
pos = offset_sessions[current_session_idx]
position = 0
count += 1
print('END evaluation in ', (time.clock() - sc), 'c / ', (time.time() - st), 's')
print(' avg rt ', (time_sum / time_count), 's / ', (time_sum_clock / time_count), 'c')
print(' time count ', time_count, 'count/', time_sum, ' sum')
res = []
for m in metrics:
if (type(m).__name__ == 'Time_usage_testing'):
res.append(m.result_second((time_sum_clock / time_count)))
res.append(m.result_cpu((time_sum_clock / time_count)))
else:
res.append(m.result())
return res |
class PhantomEnv(gym.Env):
class Step(NamedTuple):
observations: Dict[(AgentID, Any)]
rewards: Dict[(AgentID, float)]
terminations: Dict[(AgentID, bool)]
truncations: Dict[(AgentID, bool)]
infos: Dict[(AgentID, Any)]
def __init__(self, num_steps: int, network: Optional[Network]=None, env_supertype: Optional[Supertype]=None, agent_supertypes: Optional[Mapping[(AgentID, Supertype)]]=None) -> None:
self.network = (network or Network())
self._current_step = 0
self.num_steps = num_steps
self.env_supertype: Optional[Supertype] = None
self.env_type: Optional[Supertype] = None
self._terminations: Set[AgentID] = set()
self._truncations: Set[AgentID] = set()
self._ctxs: Dict[(AgentID, Context)] = {}
self._samplers: List[Sampler] = []
if (env_supertype is not None):
if isinstance(env_supertype, dict):
env_supertype = self.Supertype(**env_supertype)
else:
assert isinstance(env_supertype, self.Supertype)
env_supertype._managed = True
for value in env_supertype.__dict__.values():
if (isinstance(value, Sampler) and (value not in self._samplers)):
self._samplers.append(value)
self.env_supertype = env_supertype
if (agent_supertypes is not None):
for (agent_id, agent_supertype) in agent_supertypes.items():
if isinstance(agent_supertype, dict):
agent_supertype = self.agents[agent_id].Supertype(**agent_supertype)
agent_supertype._managed = True
for value in agent_supertype.__dict__.values():
if (isinstance(value, Sampler) and (value not in self._samplers)):
self._samplers.append(value)
agent = self.network.agents[agent_id]
agent.supertype = agent_supertype
for sampler in self._samplers:
sampler.sample()
for agent in self.agents.values():
agent.reset()
def current_step(self) -> int:
return self._current_step
def n_agents(self) -> int:
return len(self.agent_ids)
def agents(self) -> Dict[(AgentID, Agent)]:
return self.network.agents
def agent_ids(self) -> List[AgentID]:
return list(self.network.agent_ids)
def strategic_agents(self) -> List[StrategicAgent]:
return [a for a in self.agents.values() if isinstance(a, StrategicAgent)]
def non_strategic_agents(self) -> List[Agent]:
return [a for a in self.agents.values() if (not isinstance(a, StrategicAgent))]
def strategic_agent_ids(self) -> List[AgentID]:
return [a.id for a in self.agents.values() if isinstance(a, StrategicAgent)]
def non_strategic_agent_ids(self) -> List[AgentID]:
return [a.id for a in self.agents.values() if (not isinstance(a, StrategicAgent))]
def view(self, agent_views: Dict[(AgentID, AgentView)]) -> EnvView:
return EnvView(self.current_step, (self.current_step / self.num_steps))
def pre_message_resolution(self) -> None:
for ctx in self._ctxs.values():
ctx.agent.pre_message_resolution(ctx)
def post_message_resolution(self) -> None:
for ctx in self._ctxs.values():
ctx.agent.post_message_resolution(ctx)
def resolve_network(self) -> None:
self.pre_message_resolution()
self.network.resolve(self._ctxs)
self.post_message_resolution()
def reset(self, seed: Optional[int]=None, options: Optional[Dict[(str, Any)]]=None) -> Tuple[(Dict[(AgentID, Any)], Dict[(str, Any)])]:
logger.log_reset()
super().reset(seed=seed, options=options)
self._current_step = 0
for sampler in self._samplers:
sampler.sample()
if (self.env_supertype is not None):
self.env_type = self.env_supertype.sample()
self.network.reset()
self._terminations = set()
self._truncations = set()
self._make_ctxs(self.strategic_agent_ids)
obs = {ctx.agent.id: ctx.agent.encode_observation(ctx) for ctx in self._ctxs.values()}
logger.log_observations(obs)
return ({k: v for (k, v) in obs.items() if (v is not None)}, {})
def step(self, actions: Mapping[(AgentID, Any)]) -> 'PhantomEnv.Step':
self._current_step += 1
logger.log_step(self.current_step, self.num_steps)
logger.log_actions(actions)
logger.log_start_decoding_actions()
self._make_ctxs(self.agent_ids)
self._handle_acting_agents(self.agent_ids, actions)
self.resolve_network()
observations: Dict[(AgentID, Any)] = {}
rewards: Dict[(AgentID, Any)] = {}
terminations: Dict[(AgentID, bool)] = {}
truncations: Dict[(AgentID, bool)] = {}
infos: Dict[(AgentID, Dict[(str, Any)])] = {}
for aid in self.strategic_agent_ids:
if ((aid in self._terminations) or (aid in self._truncations)):
continue
ctx = self._ctxs[aid]
obs = ctx.agent.encode_observation(ctx)
if (obs is not None):
observations[aid] = obs
infos[aid] = ctx.agent.collect_infos(ctx)
rewards[aid] = ctx.agent.compute_reward(ctx)
terminations[aid] = ctx.agent.is_terminated(ctx)
truncations[aid] = ctx.agent.is_truncated(ctx)
if terminations[aid]:
self._terminations.add(aid)
if truncations[aid]:
self._truncations.add(aid)
logger.log_step_values(observations, rewards, terminations, truncations, infos)
logger.log_metrics(self)
terminations['__all__'] = self.is_terminated()
truncations['__all__'] = self.is_truncated()
if (terminations['__all__'] or truncations['__all__']):
logger.log_episode_done()
return self.Step(observations, rewards, terminations, truncations, infos)
def render(self) -> None:
return None
def is_terminated(self) -> bool:
return (len(self._terminations) == len(self.strategic_agents))
def is_truncated(self) -> bool:
is_at_max_step = ((self.num_steps is not None) and (self.current_step == self.num_steps))
return (is_at_max_step or (len(self._truncations) == len(self.strategic_agents)))
def _handle_acting_agents(self, agent_ids: Sequence[AgentID], actions: Mapping[(AgentID, Any)]) -> None:
for aid in agent_ids:
if ((aid in self._terminations) or (aid in self._truncations)):
continue
ctx = self._ctxs[aid]
if (aid in actions):
messages = (ctx.agent.decode_action(ctx, actions[aid]) or [])
else:
messages = (ctx.agent.generate_messages(ctx) or [])
for (receiver_id, message) in messages:
self.network.send(aid, receiver_id, message)
def _make_ctxs(self, agent_ids: Sequence[AgentID]) -> None:
env_view = self.view({agent_id: agent.view() for (agent_id, agent) in self.agents.items()})
self._ctxs = {aid: self.network.context_for(aid, env_view) for aid in agent_ids if ((aid not in self._terminations) and (aid not in self._truncations))}
def __getitem__(self, agent_id: AgentID) -> Agent:
return self.network[agent_id] |
def test_check_parameters_min_values_int():
x = torch.tensor([1, 6, 24], dtype=torch.int32)
dtypes = [torch.bool]
_check_parameter(x, 'x', min_value=1)
_check_parameter(x, 'x', min_value=(- 1.0))
assert_raises(ValueError, _check_parameter, x, 'x', min_value=2)
assert_raises(ValueError, _check_parameter, x, 'x', min_value=25.0) |
def gen_state_dict(weights_path):
st = torch.load(weights_path)
st_ks = list(st.keys())
st_vs = list(st.values())
state_dict = {}
for (st_k, st_v) in zip(st_ks, st_vs):
state_dict[st_k.replace('module.', '')] = st_v
return state_dict |
def _get_graph_from_original_keras_v2(model, output_dir):
from tensorflow.lite.python.convert import OpsSet
from tensorflow.lite.python.util import get_grappler_config, model_input_signature, run_graph_optimizations, trace_model_call
from tensorflow.python.eager import def_function
from tensorflow.python.framework import convert_to_constants, dtypes
input_signature = None
if (not isinstance(model.call, def_function.Function)):
input_signature = model_input_signature(model, keep_original_batch_size=False)
func = trace_model_call(model, input_signature)
concrete_func = func.get_concrete_function()
funcs = [concrete_func]
(frozen_func, graph_def) = convert_to_constants.convert_variables_to_constants_v2_as_graph(funcs[0], lower_control_flow=False)
input_tensors = [tensor for tensor in frozen_func.inputs if (tensor.dtype != dtypes.resource)]
output_tensors = frozen_func.outputs
graph = convert_to_constants.disable_lower_using_switch_merge(graph_def)
input_names = [tensor.name.split(':')[0] for tensor in input_tensors]
output_names = [tensor.name.split(':')[0] for tensor in output_tensors]
return (graph_def, input_names, output_names) |
class Encoder(nn.Module):
def __init__(self, din=32, hidden_dim=128):
super(Encoder, self).__init__()
self.fc = nn.Linear(din, hidden_dim)
def forward(self, x):
embedding = F.relu(self.fc(x))
return embedding |
def _identify_bool_attributes_with_defaults(attributes, attr_name, attr_value, default=True):
output = default
if ((attr_name in attributes) and (attributes[attr_name] != attr_value)):
output = (not default)
return output |
class GeomtricFixedGridODESolver(metaclass=abc.ABCMeta):
order: int
def __init__(self, func, y0, step_size=None, grid_constructor=None, interp='linear', perturb=False, **unused_kwargs):
self.func = func
self.y0 = y0
self.dtype = y0.dtype
self.device = y0.device
self.step_size = step_size
self.interp = interp
self.perturb = perturb
self.grid_constructor = self._grid_constructor_from_step_size(step_size)
def _grid_constructor_from_step_size(step_size):
def _grid_constructor(func, y0, t):
start_time = t[0]
end_time = t[(- 1)]
niters = torch.ceil((((end_time - start_time) / step_size) + 1)).item()
t_infer = ((torch.arange(0, niters, dtype=t.dtype, device=t.device) * step_size) + start_time)
t_infer[(- 1)] = t[(- 1)]
return t_infer
return _grid_constructor
def _step_func(self, func, t0, dt, t1, y0):
pass
def integrate(self, t):
time_grid = self.grid_constructor(self.func, self.y0, t)
assert ((time_grid[0] == t[0]) and (time_grid[(- 1)] == t[(- 1)]))
solution = torch.empty(len(t), *self.y0.shape, dtype=self.y0.dtype, device=self.y0.device)
solution[0] = self.y0
j = 1
y0 = self.y0
for (t0, t1) in zip(time_grid[:(- 1)], time_grid[1:]):
dt = (t1 - t0)
y1 = self._step_func(self.func, t0, dt, t1, y0)
while ((j < len(t)) and (t1 >= t[j])):
if (self.interp == 'linear'):
solution[j] = self._linear_interp(t0, t1, y0, y1, t[j])
elif (self.interp == 'cubic'):
raise NotImplementedError('Not implemented for geometric integrators')
else:
raise ValueError(f'Unknown interpolation method {self.interp}')
j += 1
y0 = y1
return solution
def integrate_until_event(self, t0, event_fn):
raise NotImplementedError('Not implemented for geometric integrators')
def _cubic_hermite_interp(self, t0, y0, f0, t1, y1, f1, t):
h = ((t - t0) / (t1 - t0))
h00 = (((1 + (2 * h)) * (1 - h)) * (1 - h))
h10 = ((h * (1 - h)) * (1 - h))
h01 = ((h * h) * (3 - (2 * h)))
h11 = ((h * h) * (h - 1))
dt = (t1 - t0)
return ((((h00 * y0) + ((h10 * dt) * f0)) + (h01 * y1)) + ((h11 * dt) * f1))
def _linear_interp(self, t0, t1, y0, y1, t):
if (t == t0):
return y0
if (t == t1):
return y1
slope = ((t - t0) / (t1 - t0))
return (y0 + (slope * (y1 - y0))) |
class HyperConv(nn.Module):
def __init__(self, levels, in_channels: int, out_channels: int, kernel_size: _size_2_t, stride: _size_2_t=1, padding: _size_2_t=0, dilation: _size_2_t=1, groups: int=1, padding_mode: str='zeros', device='cpu'):
super(HyperConv, self).__init__()
self.levels = levels
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.padding_mode = padding_mode
self.device = device
self.fc = nn.Linear(1, (self.out_channels * ((self.in_channels * (kernel_size * kernel_size)) + 1)))
self.w_len = (self.in_channels * (self.out_channels * (self.kernel_size * self.kernel_size)))
def forward(self, x):
out = [None for _ in range(len(self.levels))]
scale = [torch.tensor([l]).type(torch.float32).to(x[0].device) for l in self.levels]
for i in range(len(scale)):
tot_weights = self.fc(scale[i])
weights = tot_weights[:self.w_len].reshape(self.out_channels, self.in_channels, self.kernel_size, self.kernel_size)
bias = tot_weights[self.w_len:]
out[i] = F.conv2d(x[i], weights, bias, stride=self.stride, padding=self.padding, dilation=self.dilation)
return out |
.slow
def test_extended_orbital_matrix_ferminet_can_be_evaluated():
(key, init_pos, slog_psis) = _make_extended_orbital_matrix_ferminets()
[_jit_eval_model_and_verify_output_shape(key, init_pos, slog_psi) for slog_psi in slog_psis] |
class DataCollatorForWholeWordMask():
def __init__(self, *args, **kwargs):
requires_pytorch(self) |
def test_print_log_logger(caplog):
print_log('welcome', logger='mmcv')
assert (caplog.record_tuples[(- 1)] == ('mmcv', logging.INFO, 'welcome'))
print_log('welcome', logger='mmcv', level=logging.ERROR)
assert (caplog.record_tuples[(- 1)] == ('mmcv', logging.ERROR, 'welcome'))
with tempfile.NamedTemporaryFile(delete=False) as f:
logger = get_logger('abc', log_file=f.name)
print_log('welcome', logger=logger)
assert (caplog.record_tuples[(- 1)] == ('abc', logging.INFO, 'welcome'))
with open(f.name, 'r') as fin:
log_text = fin.read()
regex_time = '\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}'
match = re.fullmatch((regex_time + ' - abc - INFO - welcome\\n'), log_text)
assert (match is not None)
logging.shutdown()
os.remove(f.name) |
def test_fs_observer_completed_event_updates_run(dir_obs, sample_run):
(basedir, obs) = dir_obs
_id = obs.started_event(**sample_run)
run_dir = basedir.join(_id)
obs.completed_event(stop_time=T2, result=42)
run = json.loads(run_dir.join('run.json').read())
assert (run['stop_time'] == T2.isoformat())
assert (run['status'] == 'COMPLETED')
assert (run['result'] == 42) |
class TimeColumn(ProgressColumn):
max_refresh = 0.5
def render(self, task):
elapsed_time = _format_time(task.elapsed)
eta = _format_time(task.time_remaining)
speed = (f'{task.speed:.2f}/s' if task.speed else '?/s')
return Text(f'[{elapsed_time}<{eta}, {speed}]', style='progress.remaining') |
class _RenameConverter():
RENAME: List[Tuple[(str, str)]] = []
def upgrade(cls, cfg: CN) -> None:
for (old, new) in cls.RENAME:
_rename(cfg, old, new)
def downgrade(cls, cfg: CN) -> None:
for (old, new) in cls.RENAME[::(- 1)]:
_rename(cfg, new, old) |
def test_stl_bind_global():
import pybind11_cross_module_tests as cm
with pytest.raises(RuntimeError) as excinfo:
cm.register_nonlocal_map()
assert (str(excinfo.value) == 'generic_type: type "NonLocalMap" is already registered!')
with pytest.raises(RuntimeError) as excinfo:
cm.register_nonlocal_vec()
assert (str(excinfo.value) == 'generic_type: type "NonLocalVec" is already registered!')
with pytest.raises(RuntimeError) as excinfo:
cm.register_nonlocal_map2()
assert (str(excinfo.value) == 'generic_type: type "NonLocalMap2" is already registered!') |
class RobertaConfig(BertConfig):
model_type = 'roberta'
def __init__(self, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) |
def build_encoder_w2v(tparams, options):
opt_ret = dict()
trng = RandomStreams(1234)
embedding = tensor.tensor3('embedding', dtype='float32')
x_mask = tensor.matrix('x_mask', dtype='float32')
proj = get_layer(options['encoder'])[1](tparams, embedding, None, options, prefix='encoder', mask=x_mask)
ctx = proj[0][(- 1)]
return (trng, embedding, x_mask, ctx) |
_registry(operator_type='MergedEmbeddingbag')
class MergedEmbeddingbag(Operator):
def __init__(self):
super().__init__() |
def initialize_network(params, device, state=None, runtime=None):
if params:
network_cls = NETWORKS[params.pop('type')]
else:
network_cls = NETWORKS[state['net']['type']]
if state:
return network_cls.initialize_from_state(state, device, params, runtime)
return network_cls.initialize(params, device) |
class CLIPImageProjection(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch'])
def from_config(cls, *args, **kwargs):
requires_backends(cls, ['torch'])
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ['torch']) |
class BaseModel(ABC):
check_optional_config = False
config = None
model = None
def fit_eval(self, data, validation_data=None, **kwargs):
invalidInputError(False, 'not implement')
def save(self, checkpoint):
pass
def restore(self, checkpoint):
pass
def get_model(self):
return self.model
def _get_required_parameters(self):
return set()
def _get_optional_parameters(self):
return set()
def _check_config(self, **config):
config_parameters = set(config.keys())
if (not config_parameters.issuperset(self._get_required_parameters())):
invalidInputError(False, (('Missing required parameters in configuration. ' + 'Required parameters are: ') + str(self._get_required_parameters())))
if (self.check_optional_config and (not config_parameters.issuperset(self._get_optional_parameters()))):
invalidInputError(False, (('Missing optional parameters in configuration. ' + 'Optional parameters are: ') + str(self._get_optional_parameters())))
return True |
def lpips(input0, input1, model='net-lin', net='alex', version=0.1):
batch_shape = tf.shape(input0)[:(- 3)]
input0 = tf.reshape(input0, tf.concat([[(- 1)], tf.shape(input0)[(- 3):]], axis=0))
input1 = tf.reshape(input1, tf.concat([[(- 1)], tf.shape(input1)[(- 3):]], axis=0))
input0 = tf.transpose(input0, [0, 3, 1, 2])
input1 = tf.transpose(input1, [0, 3, 1, 2])
input0 = ((input0 * 2.0) - 1.0)
input1 = ((input1 * 2.0) - 1.0)
(input0_name, input1_name) = ('0:0', '1:0')
default_graph = tf.get_default_graph()
producer_version = default_graph.graph_def_versions.producer
cache_dir = os.path.expanduser('~/.lpips')
os.makedirs(cache_dir, exist_ok=True)
pb_fnames = [('%s_%s_v%s_%d.pb' % (model, net, version, producer_version)), ('%s_%s_v%s.pb' % (model, net, version))]
for pb_fname in pb_fnames:
if (not os.path.isfile(os.path.join(cache_dir, pb_fname))):
try:
_download(os.path.join(_URL, pb_fname), cache_dir)
except urllib.error.HTTPError:
pass
if os.path.isfile(os.path.join(cache_dir, pb_fname)):
break
with open(os.path.join(cache_dir, pb_fname), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, input_map={input0_name: input0, input1_name: input1})
(distance,) = default_graph.get_operations()[(- 1)].outputs
if (distance.shape.ndims == 4):
distance = tf.squeeze(distance, axis=[(- 3), (- 2), (- 1)])
distance = tf.reshape(distance, batch_shape)
return distance |
class OpenAIGPTTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, merges_file, unk_token='<unk>', **kwargs):
super().__init__(unk_token=unk_token, **kwargs)
try:
import ftfy
from spacy.lang.en import English
_nlp = English()
self.nlp = _nlp.tokenizer
self.fix_text = ftfy.fix_text
except ImportError:
logger.warning('ftfy or spacy is not installed using BERT BasicTokenizer instead of SpaCy & ftfy.')
self.nlp = BasicTokenizer(do_lower_case=True)
self.fix_text = None
with open(vocab_file, encoding='utf-8') as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for (k, v) in self.encoder.items()}
with open(merges_file, encoding='utf-8') as merges_handle:
merges = merges_handle.read().split('\n')[1:(- 1)]
merges = [tuple(merge.split()) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
def do_lower_case(self):
return True
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),))
if (token in self.cache):
return self.cache[token]
pairs = get_pairs(word)
if (not pairs):
return (token + '</w>')
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
if (word == '\n </w>'):
word = '\n</w>'
self.cache[token] = word
return word
def _tokenize(self, text):
split_tokens = []
if (self.fix_text is None):
text = self.nlp.tokenize(text)
for token in text:
split_tokens.extend([t for t in self.bpe(token).split(' ')])
else:
text = self.nlp(text_standardize(self.fix_text(text)))
for token in text:
split_tokens.extend([t for t in self.bpe(token.text.lower()).split(' ')])
return split_tokens
def _convert_token_to_id(self, token):
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
out_string = ''.join(tokens).replace('</w>', ' ').strip()
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
merge_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']))
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, 'w', encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!')
index = token_index
writer.write((' '.join(bpe_tokens) + '\n'))
index += 1
return (vocab_file, merge_file) |
class _num_class_mixin():
_model: nn.Module
def num_classes(self):
return get_model(self._model).num_classes |
class ArgMutate(ExternalCallHandler):
def handle(self) -> None:
for mutated_sym in self.arg_syms:
if ((mutated_sym is None) or mutated_sym.is_anonymous):
continue
mutated_sym.update_deps(set(), overwrite=False, mutated=True) |
def refine_entity(entity):
entity = re.sub('-LRB- .+ -RRB-$', '', entity)
entity = re.sub('LRB .+ RRB$', '', entity)
entity = re.sub('\\(.*\\)', '', entity)
entity = re.sub('_', ' ', entity)
entity = re.sub('\\s+', ' ', entity)
return entity.strip() |
_grad()
def evaluate(data_loader, model, device, amp=True, choices=None, mode='super', retrain_config=None, is_visual_prompt_tuning=False, is_adapter=False, is_LoRA=False, is_prefix=False):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=' ')
header = 'Test:'
model.eval()
if (mode == 'super'):
config = sample_configs(choices=choices, is_visual_prompt_tuning=is_visual_prompt_tuning, is_adapter=is_adapter, is_LoRA=is_LoRA, is_prefix=False)
model_module = unwrap_model(model)
model_module.set_sample_config(config=config)
else:
config = retrain_config
model_module = unwrap_model(model)
model_module.set_sample_config(config=config)
print('sampled model config: {}'.format(config))
parameters = model_module.get_sampled_params_numel(config)
print('sampled model parameters: {}'.format(parameters))
for (images, target) in metric_logger.log_every(data_loader, 10, header):
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
if amp:
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
else:
output = model(images)
loss = criterion(output, target)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
metric_logger.synchronize_between_processes()
print('* {top1.global_avg:.3f} {top5.global_avg:.3f} loss {losses.global_avg:.3f}'.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()} |
def disable_running_stats(model):
def _disable(module):
if isinstance(module, nn.BatchNorm2d):
module.backup_momentum = module.momentum
module.momentum = 0
model.apply(_disable) |
class SpatialShareConvolution(Layer):
def __init__(self, n_input_plane, n_output_plane, kernel_w, kernel_h, stride_w=1, stride_h=1, pad_w=0, pad_h=0, n_group=1, propagate_back=True, wRegularizer=None, bRegularizer=None, init_weight=None, init_bias=None, init_grad_weight=None, init_grad_bias=None, with_bias=True, bigdl_type='float'):
super(SpatialShareConvolution, self).__init__(None, bigdl_type, n_input_plane, n_output_plane, kernel_w, kernel_h, stride_w, stride_h, pad_w, pad_h, n_group, propagate_back, wRegularizer, bRegularizer, JTensor.from_ndarray(init_weight), JTensor.from_ndarray(init_bias), JTensor.from_ndarray(init_grad_weight), JTensor.from_ndarray(init_grad_bias), with_bias)
def set_init_method(self, weight_init_method=None, bias_init_method=None):
callBigDlFunc(self.bigdl_type, 'setInitMethod', self.value, weight_init_method, bias_init_method)
return self |
class PillowCodec(Codec):
fmt = None
def name(self):
raise NotImplementedError()
def _load_img(self, img):
return read_image(img)
def _run(self, img, quality, return_rec=False, return_metrics=True):
start = time.time()
tmp = io.BytesIO()
img.save(tmp, format=self.fmt, quality=int(quality))
enc_time = (time.time() - start)
tmp.seek(0)
size = tmp.getbuffer().nbytes
start = time.time()
rec = Image.open(tmp)
rec.load()
dec_time = (time.time() - start)
bpp_val = ((float(size) * 8) / (img.size[0] * img.size[1]))
out = {'bpp': bpp_val, 'encoding_time': enc_time, 'decoding_time': dec_time}
if return_metrics:
(psnr_val, msssim_val) = compute_metrics(rec, img)
out['psnr'] = psnr_val
out['ms-ssim'] = msssim_val
if return_rec:
return (out, rec)
return out |
def test_UnetFCAM():
import datetime as dt
cuda = '1'
DEVICE = torch.device(('cuda:{}'.format(cuda) if torch.cuda.is_available() else 'cpu'))
encoders = dlib.encoders.get_encoder_names()
encoders = [constants.INCEPTIONV3, constants.VGG16, constants.RESNET50]
SZ = 224
in_channels = 3
bsz = 8
sample = torch.rand((bsz, in_channels, SZ, SZ)).to(DEVICE)
classes = 2
loss = torch.nn.CrossEntropyLoss(reduction='mean').to(DEVICE)
seg_h_out_channels = classes
for encoder_name in encoders:
vgg_encoders = dlib.encoders.vgg_encoders
if (encoder_name == constants.VGG16):
decoder_channels = (256, 128, 64)
encoder_depth = vgg_encoders[encoder_name]['params']['depth']
else:
decoder_channels = (256, 128, 64, 32, 16)
encoder_depth = 5
announce_msg('Testing backbone {}'.format(encoder_name))
for support_background in [False, True]:
for im_rec in [True, False]:
target = torch.randint(low=0, high=classes, size=(bsz, SZ, SZ)).to(DEVICE)
model = UnetFCAM(task=constants.F_CL, encoder_name=encoder_name, encoder_depth=encoder_depth, decoder_channels=decoder_channels, encoder_weights=constants.IMAGENET, in_channels=in_channels, seg_h_out_channels=seg_h_out_channels, aux_params=dict(pooling_head=constants.WILDCATHEAD, classes=classes, support_background=support_background), im_rec=im_rec, img_range=constants.RANGE_SIGMOID).to(DEVICE)
announce_msg('TESTING: {} -- ]n {}'.format(model, model.get_info_nbr_params()))
glabel = torch.randint(low=0, high=classes, size=(bsz,), dtype=torch.long, device=DEVICE)
t0 = dt.datetime.now()
out = model(sample)
(cl_logits, fcams, im_recon) = out
cams_low = model.classification_head.cams
print('FCAMs shape: {}'.format(fcams.shape))
print('{}: forward time {} [SUPBACK {}]'.format(model, (dt.datetime.now() - t0), support_background))
t0 = dt.datetime.now()
l = loss(out[1], target)
print('{}: loss eval time {} [SUPBACK {}]'.format(model, (dt.datetime.now() - t0), support_background))
t0 = dt.datetime.now()
l.backward()
print('{}: loss backward time {} [SUPBACK {}]'.format(model, (dt.datetime.now() - t0), support_background))
print('x: {} \t cams_low: {} \t \t cl_logits: {} \t fcam: {}'.format(sample.shape, cams_low.shape, cl_logits.shape, fcams.shape))
if im_rec:
print('IMAGE reconstruction: x shape: {}, im recon.shape: {}'.format(sample.shape, im_recon.shape)) |
class DataProcessor(object):
def get_conll_train_examples(self, data_dir):
return self._create_examples(self._read_pkl(os.path.join(data_dir, 'conll_train.pkl')), 'conll_train')
def get_conll_dev_examples(self, data_dir):
return self._create_examples(self._read_pkl(os.path.join(data_dir, 'conll_test.pkl')), 'conll_dev')
def get_sep_scitech_train_examples(self, data_dir):
return self._create_examples(self._read_pkl(os.path.join(data_dir, 'sep_scitech_train.pkl')), 'twitter_train')
def get_sep_scitech_test_examples(self, data_dir):
return self._create_examples(self._read_pkl(os.path.join(data_dir, 'sep_scitech_test.pkl')), 'twitter_test')
def get_labels(self, data_dir):
return ['B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC', 'B-MISC', 'I-MISC', 'O']
def _create_examples(self, data, set_type):
examples = []
for (i, elem) in enumerate(data):
guid = i
text = elem[0]
label = elem[1]
examples.append(InputExample(guid=guid, text=text, label=label))
return examples
def _read_pkl(self, input_file):
data = pickle.load(open(input_file, 'rb'))
return data |
def _latex_line_begin_tabular(colwidths, colaligns):
alignment = {'left': 'l', 'right': 'r', 'center': 'c', 'decimal': 'r'}
tabular_columns_fmt = ''.join([alignment.get(a, 'l') for a in colaligns])
return (('\\begin{tabular}{' + tabular_columns_fmt) + '}\n\\hline') |
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=True, drop_path=0.0, norm_layer=nn.LayerNorm, act_layer=nn.GELU, use_rel_pos=False, rel_pos_zero_init=True, window_size=0, use_residual_block=False, input_size=None):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, use_rel_pos=use_rel_pos, rel_pos_zero_init=rel_pos_zero_init, input_size=(input_size if (window_size == 0) else (window_size, window_size)))
self.drop_path = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
self.norm2 = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int((dim * mlp_ratio)), act_layer=act_layer)
self.window_size = window_size
self.use_residual_block = use_residual_block
if use_residual_block:
self.residual = ResBottleneckBlock(in_channels=dim, out_channels=dim, bottleneck_channels=(dim // 2), norm='LN', act_layer=act_layer)
def forward(self, x):
shortcut = x
x = self.norm1(x)
if (self.window_size > 0):
(H, W) = (x.shape[1], x.shape[2])
(x, pad_hw) = window_partition(x, self.window_size)
x = self.attn(x)
if (self.window_size > 0):
x = window_unpartition(x, self.window_size, pad_hw, (H, W))
x = (shortcut + self.drop_path(x))
x = (x + self.drop_path(self.mlp(self.norm2(x))))
if self.use_residual_block:
x = self.residual(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
return x |
def filter_exists(filenames, base_path):
full_paths = [os.path.join(base_path, 'configs', fl) for fl in filenames]
full_paths = [fl for fl in full_paths if os.path.exists(fl)]
return full_paths |
def get_glow_cnn(num_input_channels, num_hidden_channels, num_output_channels, zero_init_output):
conv1 = nn.Conv2d(in_channels=num_input_channels, out_channels=num_hidden_channels, kernel_size=3, padding=1, bias=False)
bn1 = nn.BatchNorm2d(num_hidden_channels)
conv2 = nn.Conv2d(in_channels=num_hidden_channels, out_channels=num_hidden_channels, kernel_size=1, padding=0, bias=False)
bn2 = nn.BatchNorm2d(num_hidden_channels)
conv3 = nn.Conv2d(in_channels=num_hidden_channels, out_channels=num_output_channels, kernel_size=3, padding=1)
if zero_init_output:
conv3.weight.data.zero_()
conv3.bias.data.zero_()
relu = nn.ReLU()
return nn.Sequential(conv1, bn1, relu, conv2, bn2, relu, conv3) |
class Predictor(BasePredictor):
def setup(self):
args = parse_args(parse=False, backbone='t5-base', load='VL-T5/snap/pretrain/VLT5/Epoch30')
args.gpu = 0
self.trainer = Trainer(args, train=False)
OBJ_URL = '
ATTR_URL = '
self.object_ids = get_data(OBJ_URL)
self.attr_ids = get_data(ATTR_URL)
self.frcnn_cfg = Config.from_pretrained('unc-nlp/frcnn-vg-finetuned')
self.frcnn = GeneralizedRCNN.from_pretrained('unc-nlp/frcnn-vg-finetuned', config=self.frcnn_cfg)
self.image_preprocess = Preprocess(self.frcnn_cfg)
self.tokenizer = VLT5TokenizerFast.from_pretrained('t5-base')
def predict(self, image: Path=Input(description='Input image.'), question: str=Input(description='question for VQA')) -> str:
frcnn_visualizer = SingleImageViz(str(image), id2obj=self.object_ids, id2attr=self.attr_ids)
(images, sizes, scales_yx) = self.image_preprocess(str(image))
output_dict = self.frcnn(images, sizes, scales_yx=scales_yx, padding='max_detections', max_detections=self.frcnn_cfg.max_detections, return_tensors='pt')
frcnn_visualizer.draw_boxes(output_dict.get('boxes'), output_dict.get('obj_ids'), output_dict.get('obj_probs'), output_dict.get('attr_ids'), output_dict.get('attr_probs'))
normalized_boxes = output_dict.get('normalized_boxes')
features = output_dict.get('roi_features')
input_ids = self.tokenizer(question, return_tensors='pt', padding=True).input_ids
batch = {}
batch['input_ids'] = input_ids
batch['vis_feats'] = features
batch['boxes'] = normalized_boxes
result = self.trainer.model.test_step(batch)
return result['pred_ans'][0] |
class GraphConvolution(nn.Module):
def __init__(self, in_feature, out_feature, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_feature
self.out_features = out_feature
self.weight = Parameter(torch.FloatTensor(in_feature, out_feature))
if bias:
self.bias = Parameter(torch.FloatTensor(out_feature))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = (1.0 / math.sqrt(self.weight.size(1)))
self.weight.data.uniform_((- stdv), stdv)
if (self.bias is not None):
self.bias.data.uniform_((- stdv), stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if (self.bias is not None):
return (output + self.bias)
else:
return output |
def _get_bin_idx(label):
if (label == 5.0):
return (bins - 1)
else:
return (np.where((bins_edges > label))[0][0] - 1) |
def parseArgs():
parser = argparse.ArgumentParser(description='Write image label path pairs of train and valid sets to txt file.')
parser.add_argument('-d', dest='data_home', type=str, default='./', help='dataset home directory.')
parser.add_argument('-t', dest='useTrain', help='use train set directory.', action='store_true')
parser.add_argument('-v', dest='useValid', help='use valid set directory.', action='store_true')
args = parser.parse_args()
return args |
def createModel(net, domain, domain_name):
(net_weights, net_create) = net
domain.name = domain_name
net = net_create(num_classes).infer(input_dims)
net.load_state_dict(net_weights.state_dict())
model = Top(args, net, domain)
model.clip_norm()
if h.use_cuda:
model.cuda()
model.optimizer = optim.Adam(model.parameters(), lr=(args.lr if (not (domain in POINT_DOMAINS)) else 0.0001))
model.lrschedule = optim.lr_scheduler.ReduceLROnPlateau(model.optimizer, 'min', patience=args.patience, threshold=args.threshold, min_lr=1e-06, factor=args.factor, verbose=True)
model.name = net_create.__name__
return model |
class BertConfig(PretrainedConfig):
model_type = 'bert'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', use_cache=True, classifier_dropout=None, exit_layers=None, freeze_previous_layers=False, probe_model=False, gold_exit_layer=None, exit_thresholds=1, sweet=True, **kwargs):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.use_cache = use_cache
self.classifier_dropout = classifier_dropout
self.exit_layers = ([num_hidden_layers] if (exit_layers is None) else exit_layers)
self.freeze_previous_layers = freeze_previous_layers
self.probe_model = probe_model
self.gold_exit_layer = gold_exit_layer
self.exit_thresholds = exit_thresholds
self.loss_fct = None
self.exit_strategy = None
self.exit_kwargs = None
self.loss_kwargs = None
self.SWEET = sweet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.