code stringlengths 17 6.64M |
|---|
def unfreeze_t5(model):
model = (model.module if hasattr(model, 'module') else model)
for (name, child) in model.named_children():
if (name == 'gnn_model'):
continue
for param in child.parameters():
param.requires_grad = True
|
def overwrite_t5stack_forward(t5_stack):
def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, gnn_fid=None, layer2insert=None, graphs=None, node_indices=None, relation_memory=None, graph_mode=True):
if self.model_parallel:
torch.cuda.set_device(self.first_device)
self.embed_tokens = self.embed_tokens.to(self.first_device)
use_cache = (use_cache if (use_cache is not None) else self.config.use_cache)
output_attentions = (output_attentions if (output_attentions is not None) else self.config.output_attentions)
output_hidden_states = (output_hidden_states if (output_hidden_states is not None) else self.config.output_hidden_states)
return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict)
if ((input_ids is not None) and (inputs_embeds is not None)):
err_msg_prefix = ('decoder_' if self.is_decoder else '')
raise ValueError(f'You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time')
elif (input_ids is not None):
input_shape = input_ids.size()
input_ids = input_ids.view((- 1), input_shape[(- 1)])
elif (inputs_embeds is not None):
input_shape = inputs_embeds.size()[:(- 1)]
else:
err_msg_prefix = ('decoder_' if self.is_decoder else '')
raise ValueError(f'You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds')
if (inputs_embeds is None):
assert (self.embed_tokens is not None), 'You have to initialize the model with valid token embeddings'
inputs_embeds = self.embed_tokens(input_ids)
(batch_size, seq_length) = input_shape
mask_seq_length = ((past_key_values[0][0].shape[2] + seq_length) if (past_key_values is not None) else seq_length)
if (use_cache is True):
assert self.is_decoder, f'`use_cache` can only be set to `True` if {self} is used as a decoder'
if (attention_mask is None):
attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device)
if (self.is_decoder and (encoder_attention_mask is None) and (encoder_hidden_states is not None)):
encoder_seq_length = encoder_hidden_states.shape[1]
encoder_attention_mask = torch.ones(batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long)
if (past_key_values is None):
past_key_values = ([None] * len(self.block))
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, inputs_embeds.device)
if (self.is_decoder and (encoder_hidden_states is not None)):
(encoder_batch_size, encoder_sequence_length, _) = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if (encoder_attention_mask is None):
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
head_mask = self.get_head_mask(head_mask, self.config.num_layers)
cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers)
present_key_value_states = (() if use_cache else None)
all_hidden_states = (() if output_hidden_states else None)
all_attentions = (() if output_attentions else None)
all_cross_attentions = (() if (output_attentions and self.is_decoder) else None)
position_bias = None
encoder_decoder_position_bias = None
hidden_states = self.dropout(inputs_embeds)
for (i, (layer_module, past_key_value)) in enumerate(zip(self.block, past_key_values)):
layer_head_mask = head_mask[i]
cross_attn_layer_head_mask = cross_attn_head_mask[i]
if self.model_parallel:
torch.cuda.set_device(hidden_states.device)
if (attention_mask is not None):
attention_mask = attention_mask.to(hidden_states.device)
if (position_bias is not None):
position_bias = position_bias.to(hidden_states.device)
if (encoder_hidden_states is not None):
encoder_hidden_states = encoder_hidden_states.to(hidden_states.device)
if (encoder_extended_attention_mask is not None):
encoder_extended_attention_mask = encoder_extended_attention_mask.to(hidden_states.device)
if (encoder_decoder_position_bias is not None):
encoder_decoder_position_bias = encoder_decoder_position_bias.to(hidden_states.device)
if (layer_head_mask is not None):
layer_head_mask = layer_head_mask.to(hidden_states.device)
if (cross_attn_layer_head_mask is not None):
cross_attn_layer_head_mask = cross_attn_layer_head_mask.to(hidden_states.device)
if output_hidden_states:
all_hidden_states = (all_hidden_states + (hidden_states,))
if (self.gradient_checkpointing and self.training):
if use_cache:
logger.warning('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return tuple(module(*inputs, use_cache, output_attentions))
return custom_forward
layer_outputs = checkpoint(create_custom_forward(layer_module), hidden_states, extended_attention_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask, cross_attn_layer_head_mask, None)
else:
layer_outputs = layer_module(hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions)
if (use_cache is False):
layer_outputs = ((layer_outputs[:1] + (None,)) + layer_outputs[1:])
(hidden_states, present_key_value_state) = layer_outputs[:2]
if (((i + 1) == layer2insert) and graph_mode):
hidden_states_ = hidden_states.view(len(graphs), gnn_fid.encoder.n_passages, gnn_fid.encoder.text_length, (- 1))
for (idx, graph) in enumerate(graphs):
if ((graph.num_nodes() != 0) and (len(node_indices[idx]['passage_mention']) != 0) and (len(node_indices[idx]['question_mention']) != 0)):
feat = gnn_fid.graph_attriution_before(hidden_states_[idx], node_indices[idx])
hidden_states_[idx] = gnn_fid.graph_attriution_after(hidden_states_[idx], gnn_fid.gnn_model(graph, feat, relation_memory, node_indices[idx]), node_indices[idx])
hidden_states = hidden_states_.view(hidden_states.shape)
position_bias = layer_outputs[2]
if (self.is_decoder and (encoder_hidden_states is not None)):
encoder_decoder_position_bias = layer_outputs[(4 if output_attentions else 3)]
if use_cache:
present_key_value_states = (present_key_value_states + (present_key_value_state,))
if output_attentions:
all_attentions = (all_attentions + (layer_outputs[3],))
if self.is_decoder:
all_cross_attentions = (all_cross_attentions + (layer_outputs[5],))
if self.model_parallel:
for (k, v) in self.device_map.items():
if ((i == v[(- 1)]) and (('cuda:' + str(k)) != self.last_device)):
hidden_states = hidden_states.to(('cuda:' + str((k + 1))))
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
if output_hidden_states:
all_hidden_states = (all_hidden_states + (hidden_states,))
if (not return_dict):
return tuple((v for v in [hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions] if (v is not None)))
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions)
t5_stack.forward = types.MethodType(forward, t5_stack)
|
def missing_grad_handler(model):
temp = 0
for (k, v) in model.named_parameters():
temp += v.sum()
return (temp * 0.0)
|
def train(model, optimizer, scheduler, step, train_dataset, eval_dataset, test_dataset, opt, collator, best_dev_em, checkpoint_path, relation_bank):
torch.manual_seed((opt.local_rank + opt.seed))
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=opt.per_gpu_batch_size, drop_last=True, collate_fn=collator)
(loss, curr_loss) = (0.0, 0.0)
epoch = 1
model.train()
model.zero_grad()
inner_step = 0
while (step < opt.total_steps):
epoch += 1
for (i, batch) in enumerate(train_dataloader):
inner_step += 1
(idx, labels, target_mask, context_ids, context_mask, graphs, node_indices) = batch
train_loss = model(input_ids=context_ids.to(opt.device), attention_mask=context_mask.to(opt.device), labels=labels.to(opt.device), graphs=[g.to(opt.device) for g in graphs], node_indices=node_indices, relation_bank_ids=relation_bank['input_ids'].to(opt.device), relation_bank_masks=relation_bank['attention_mask'].to(opt.device))[0]
train_loss.backward()
train_loss = src.util.average_main(train_loss, opt)
curr_loss += train_loss.item()
if ((inner_step % opt.accumulation_steps) == 0):
step += 1
torch.nn.utils.clip_grad_norm_(model.parameters(), opt.clip)
optimizer.step()
scheduler.step()
model.zero_grad()
if (opt.wandb and opt.is_main):
wandb.log({'dynamic training loss': train_loss.item()})
if ((step % opt.eval_freq) == 0):
dev_em = evaluate(model, eval_dataset, tokenizer, collator, opt, relation_bank)
test_em = 0
if (test_dataset != None):
collator.for_eval = True
test_em = evaluate(model, test_dataset, tokenizer, collator, opt, relation_bank, ifTest=True, step=step, checkpoint_path=checkpoint_path)
collator.for_eval = False
if opt.is_main:
if (dev_em > best_dev_em):
best_dev_em = dev_em
src.util.save(model, optimizer, scheduler, step, best_dev_em, opt, checkpoint_path, 'best_dev')
log = f'{step} / {opt.total_steps} |'
log += f'train: {(curr_loss / (opt.eval_freq * opt.accumulation_steps)):.3f} |'
log += f'dev evaluation: {(100 * dev_em):.2f} EM |'
log += f'lr: {scheduler.get_last_lr()[0]:.5f}'
if (test_dataset != None):
log += f'test evaluation: {(100 * test_em):.2f} EM |'
model_outputs = {}
with open((checkpoint_path / 'test_outputs_{}.jsonl'.format(str(step))), encoding='utf8') as f:
for line in f:
line = json.loads(line)
model_outputs[line['id']] = line
ground_truth = json.load(open(opt.ground_truth_for_test, encoding='utf8'))
total_onehop_solvable = [idx for (idx, element) in ground_truth.items() if element['1hop_solvable']]
correct = []
for (_, element) in model_outputs.items():
if (src.evaluation.normalize_answer(element['hyp']) in [src.evaluation.normalize_answer(e) for e in element['gold']]):
correct.append(str(element['id']))
my_onehop_solvable = []
for correcdt_idx in correct:
if ground_truth[correcdt_idx]['1hop_solvable']:
my_onehop_solvable.append(correcdt_idx)
log += '| 1-hop solvables: {:.4f} ({}/{})'.format((len(my_onehop_solvable) / len(total_onehop_solvable)), len(my_onehop_solvable), len(total_onehop_solvable))
logger.info(log)
curr_loss = 0.0
if opt.wandb:
wandb.log({'dev EM': dev_em, 'test EM': test_em})
model.train()
if (opt.is_main and ((step % opt.save_freq) == 0)):
src.util.save(model, optimizer, scheduler, step, best_dev_em, opt, checkpoint_path, f'step-{step}')
if (step > opt.total_steps):
break
|
def evaluate(model, dataset, tokenizer, collator, opt, relation_bank, ifTest=False, step=0, checkpoint_path=None):
sampler = SequentialSampler(dataset)
dataloader = DataLoader(dataset, sampler=sampler, batch_size=(opt.per_gpu_batch_size * 2), drop_last=False, collate_fn=collator)
model.eval()
total = 0
exactmatch = []
model = (model.module if hasattr(model, 'module') else model)
with torch.no_grad():
for (i, batch) in enumerate(dataloader):
if ifTest:
(idx, labels, target_mask, context_ids, context_mask, graphs, node_indices, real_index) = batch
else:
(idx, labels, target_mask, context_ids, context_mask, graphs, node_indices) = batch
outputs = model.generate(input_ids=context_ids.to(opt.device), attention_mask=context_mask.to(opt.device), graphs=[g.to(opt.device) for g in graphs], node_indices=node_indices, max_length=10, relation_bank_ids=relation_bank['input_ids'].to(opt.device), relation_bank_masks=relation_bank['attention_mask'].to(opt.device))
for (k, o) in enumerate(outputs):
ans = tokenizer.decode(o, skip_special_tokens=True)
gold = dataset.get_example(idx[k])['answers']
score = src.evaluation.ems(ans, gold)
total += 1
exactmatch.append(score)
if ifTest:
with open((checkpoint_path / 'test_outputs_{}.jsonl'.format(str(step))), mode='a', encoding='utf-8') as f:
f.write((json.dumps({'id': (real_index[k] + 1), 'hyp': ans, 'gold': gold}) + '\n'))
(exactmatch, total) = src.util.weighted_average(np.mean(exactmatch), total, opt)
return exactmatch
|
def play_game(model_name, env_name, game_queue, reward_queue, index):
'Plays one game with the given model and gym environment\n and returns the final score (i.e. cumulative reward)'
print('Starting process #{}..'.format(index))
if (not args.random):
model = torch.load(model_name, map_location=device)
model.eval()
env = gym.make(env_name, full_action_space=True)
rng = np.random.default_rng()
while (not game_queue.empty()):
try:
game = game_queue.get(False, None)
except Empty:
print('Game queue empty')
return
no_ops = randint(0, args.no_op)
no_ops_done = 0
o = env.reset()
(r, d, i) = (0.0, False, None)
total_reward = 0
total_frames = 0
stack = []
for _ in range(args.framestack):
stack.append(np.zeros((args.width, args.height, 3), dtype=np.uint8))
while True:
if args.display:
env.render()
img = Image.fromarray(o)
img = img.resize((args.width, args.height), Image.BILINEAR)
img = np.asarray(img)
stack.insert(0, img)
while (len(stack) > args.framestack):
stack.pop()
if (len(stack) != args.framestack):
continue
if args.merge:
image_stack = map(Image.fromarray, stack)
img = reduce(ImageChops.lighter, image_stack)
np_stack = np.asarray(img, dtype=np.float32)
np_stack = np.expand_dims(np_stack, axis=0)
else:
np_stack = np.concatenate(stack, axis=2)
np_stack = np.expand_dims(np_stack, axis=0)
np_stack = np_stack.astype(np.float32)
np_stack /= 255
if (no_ops_done < no_ops):
(o, r, d, i) = env.step(0)
no_ops_done += 1
elif (not args.random):
prediction = model(torch.Tensor(np.swapaxes(np_stack, 1, 3)).to(device)).detach().cpu()
prediction = F.softmax(prediction, dim=1)
if (args.action == 'argmax'):
prediction = np.argmax(prediction)
elif (args.action == 'sampling'):
prediction = np.array(prediction[0])
p = (prediction / np.sum(prediction))
prediction = rng.choice(list(range(len(prediction))), p=p)
(o, r, d, i) = env.step(prediction)
elif args.random:
(o, r, d, i) = env.step(np.random.randint(18))
total_reward += r
total_frames += 1
if (d or (total_frames > args.max_frames)):
reward_queue.put(total_reward)
break
print('#{} finished game {}'.format(index, game))
|
def main():
set_start_method('spawn')
for model in args.models:
model_name = os.path.basename(os.path.normpath(model))
results_path = os.path.normpath(args.save)
if (not os.path.exists(results_path)):
os.mkdir(results_path)
results_name = '{}.txt'.format(model_name)
results_file = os.path.normpath(os.path.join(results_path, results_name))
print('Evaluating model {}'.format(model))
rewards = multiprocessing.Manager().Queue(1000000)
games = multiprocessing.Manager().Queue(1000000)
for i in range(args.games):
games.put(i)
procs = []
for i in range(args.processes):
proc = Process(target=play_game, args=(model, args.env, games, rewards, i))
proc.start()
procs.append(proc)
print('Processes started')
for (k, proc) in enumerate(procs):
print('Waiting to join process #{}'.format(k))
proc.join()
print('Joined process #{}'.format(k))
print('Processes joined')
with open(results_file, 'w') as f:
rewards_list = []
while (not rewards.empty()):
r = rewards.get()
rewards_list.append(r)
f.write('{}\n'.format(r))
print(r)
if (len(rewards_list) <= 1):
avg = 0
std = 0
minim = 0
maxim = 0
else:
avg = round(statistics.mean(rewards_list), 1)
std = round(statistics.stdev(rewards_list), 1)
minim = min(rewards_list)
maxim = max(rewards_list)
f.write('Avg: {}'.format(avg))
print('Avg: {}, std: {}, min: {}, max: {}'.format(avg, std, minim, maxim))
|
def main():
model = None
if (not args.random):
model = torch.load(args.model, map_location=device)
model.eval()
c = Connection(start_binary=(not args.dont_start_binary), binary_path=args.binary)
buttons = [buttons[0] for buttons in KEY_MAPPING[args.game]]
num_buttons = len(buttons)
is_playing = False
target_time_per_frame = (1.0 / args.framerate)
frame_time = None
stack = []
for _ in range(args.framestack):
stack.append(np.zeros((args.width, args.height, 3), dtype=np.float32))
print('Ready to play (Page Up + p)...')
recording_id = None
image_directory = None
recorded_data = []
recording_index = 0
while True:
frame_time = time.time()
c.req.allow_user_override = True
c.req.get_keys = True
c.req.get_image = True
c.req.quality = args.quality
c.req.process_name = args.process
response = c.send_request()
if ('page up' in response.pressed_keys):
if (('p' in response.pressed_keys) and (not is_playing)):
is_playing = True
print('Starting to play (stop with Page Up + s)')
print(('Currently playing: ' + str(is_playing)))
for _ in range(args.framestack):
stack.append(np.zeros((args.width, args.height, 3), dtype=np.uint8))
if (args.output is not None):
recording_index = 0
(recording_id, image_directory) = start_recording(args.output, args.game)
recorded_data = []
elif (('s' in response.pressed_keys) and is_playing):
print('Stopped playing. Start with Page Up + p')
is_playing = False
if (args.output is not None):
finish_recording(args.output, args.game, recording_id, recorded_data)
if is_playing:
img = Image.open(io.BytesIO(response.image))
img = img.resize((args.width, args.height), Image.BILINEAR)
img = np.asarray(img, dtype=np.float32)
stack.insert(0, img)
while (len(stack) > args.framestack):
stack.pop()
if (len(stack) != args.framestack):
continue
np_stack = np.concatenate(stack, axis=2)
np_stack = np.expand_dims(np_stack, axis=0)
np_stack = np_stack.astype(np.float32)
np_stack /= 255
prediction = None
if (not args.random):
prediction = model(torch.Tensor(np.swapaxes(np_stack, 1, 3)).to(device)).detach().cpu()[0]
prediction = torch.sigmoid(prediction).numpy()
prediction = (np.random.random(size=prediction.shape) < prediction).astype(np.int)
prediction = prediction.tolist()
else:
prediction = np.random.randint(2, size=num_buttons).tolist()
for i in range(len(buttons)):
if (prediction[i] == 1):
c.req.press_keys.append(buttons[i])
else:
c.req.release_keys.append(buttons[i])
c.req.get_image = False
c.req.get_keys = False
_ = c.send_request()
if (args.output is not None):
image = response.image
with open(os.path.join(image_directory, '{}.jpg'.format(recording_index)), 'wb') as f:
f.write(image)
recorded_data.append({'b': [buttons[i] for i in range(len(buttons)) if prediction[i]]})
recording_index += 1
sleep_time = ((target_time_per_frame - time.time()) + frame_time)
if (sleep_time <= 0.0):
print('[Warning] Can not keep up with the desired framerate.')
sleep_time = 0.0
else:
time.sleep(sleep_time)
|
def play_game(model_name, queue, index):
'Plays one game with the given model and gym environment\n and returns the final score (i.e. cumulative reward)'
print('Starting process #{}..'.format(index))
if (not args.random):
model = torch.load(model_name, map_location=device)
model.eval()
env = vzd.DoomGame()
env.load_config(args.config)
if args.display:
env.set_window_visible(True)
env.set_mode(vzd.Mode.ASYNC_PLAYER)
else:
env.set_mode(vzd.Mode.PLAYER)
env.init()
rng = np.random.default_rng()
for game in range(args.games):
env.new_episode()
o = env.get_state()
(r, d, i) = (0.0, False, None)
total_reward = 0
stack = []
for _ in range(args.framestack):
stack.append(np.zeros((args.width, args.height, 3), dtype=np.uint8))
while True:
img = o.screen_buffer
img = img.transpose([1, 2, 0])
img = Image.fromarray(img)
img = img.resize((args.width, args.height), Image.BILINEAR)
img = np.asarray(img, dtype=np.float32)
stack.insert(0, img)
while (len(stack) > args.framestack):
stack.pop()
if (len(stack) != args.framestack):
continue
np_stack = np.concatenate(stack, axis=2)
np_stack = np.expand_dims(np_stack, axis=0)
np_stack = np_stack.astype(np.float32)
np_stack /= 255
if args.random:
actions_num = env.get_available_buttons_size()
prediction = np.random.randint(2, size=actions_num).tolist()
else:
prediction = model(torch.Tensor(np.swapaxes(np_stack, 1, 3)).to(device)).detach().cpu()[0]
prediction = torch.sigmoid(prediction).numpy()
prediction = (np.random.random(size=prediction.shape) < prediction).astype(np.int)
prediction = prediction.tolist()
r = env.make_action(prediction, args.rate)
d = env.is_episode_finished()
total_reward += r
if d:
queue.put(total_reward)
break
else:
o = env.get_state()
print('#{} finished game {}'.format(index, game))
|
def main():
set_start_method('spawn')
for model in args.models:
model_name = os.path.basename(os.path.normpath(model))
results_path = os.path.normpath(args.save)
if (not os.path.exists(results_path)):
os.mkdir(results_path)
results_name = '{}.txt'.format(model_name)
results_file = os.path.normpath(os.path.join(results_path, results_name))
print('Evaluating model {}'.format(model))
rewards = Queue()
procs = []
for i in range(args.processes):
proc = Process(target=play_game, args=(model, rewards, i))
proc.start()
procs.append(proc)
for proc in procs:
proc.join()
with open(results_file, 'w') as f:
rewards_list = []
while (not rewards.empty()):
r = rewards.get()
rewards_list.append(r)
f.write('{}\n'.format(r))
print(r)
if (len(rewards_list) < 1):
avg = 0
else:
avg = (sum(rewards_list) / len(rewards_list))
f.write('Avg: {}'.format(avg))
print('Avg: {}'.format(avg))
|
def get_avg_from_file(file_path):
with open(file_path) as f:
avg_line = f.readlines()[(- 1)]
match = re.match('Avg: (.*)', avg_line)
return float(match.group(1))
|
def get_stdev_from_file(file_path):
values = get_datapoints_from_file(file_path)
return statistics.stdev(values)
|
def get_datapoints_from_file(file_path):
with open(file_path) as f:
lines = f.readlines()
values = []
for line in lines:
try:
values.append(float(line))
except ValueError:
pass
return values
|
def finish_recording(recording_path, env_name, unique_id, data):
'Store recorded data into a json file'
trajectory_file = os.path.join(recording_path, 'trajectories_pressed_buttons', '{}'.format(env_name), '{}.json'.format(unique_id))
with open(trajectory_file, 'w') as f:
json.dump(data, f)
|
def start_recording(recording_path, env_name):
'\n Create and initialize any directories/files\n for recording, and return unique\n ID for this recording (timestamp).\n '
unique_id = str(int(time.time()))
screens_dir = os.path.join(recording_path, 'screens', '{}'.format(env_name), unique_id)
trajectories_dir = os.path.join(recording_path, 'trajectories_pressed_buttons', '{}'.format(env_name))
os.makedirs(screens_dir)
os.makedirs(trajectories_dir, exist_ok=True)
return (unique_id, screens_dir)
|
def main(args):
c = Connection(start_binary=(not args.dont_start_binary), binary_path=args.binary)
record = False
recording_id = None
image_directory = None
recorded_data = []
recording_index = 0
recording_start_time = None
previous_response = None
previous_frame_time = None
frame_time = None
target_time_per_frame = (1.0 / args.framerate)
print('Ready to record (Page Up + r)...')
try:
while True:
frame_time = time.time()
c.req.get_keys = True
c.req.get_mouse = True
c.req.get_image = True
c.req.quality = args.quality
c.req.process_name = args.process_name
response = c.send_request()
if ('page up' in response.pressed_keys):
if ('q' in response.pressed_keys):
if record:
finish_recording(args.output, args.env_name, recording_id, recorded_data)
exit()
if ('r' in response.pressed_keys):
if (record and (recording_index > args.framerate)):
finish_recording(args.output, args.env_name, recording_id, recorded_data)
print('Saved {} frames'.format(recording_index))
elif (record and (recording_index < args.framerate)):
continue
if (not record):
print('Recording started (Page Up + s to stop)...')
print('Or Page Up + r to save current frames.')
record = True
recorded_data = []
previous_response = None
previous_frame_time = None
recording_id = None
recording_index = 0
recording_start_time = time.time()
(recording_id, image_directory) = start_recording(args.output, args.env_name)
continue
elif ('s' in response.pressed_keys):
if record:
record = False
finish_recording(args.output, args.env_name, recording_id, recorded_data)
print('Recording done with {} frames'.format(recording_index))
if record:
image = response.image
with open(os.path.join(image_directory, '{}.jpg'.format(recording_index)), 'wb') as f:
f.write(image)
recording_index += 1
if previous_response:
(x, y) = (previous_response.mouse.x, previous_response.mouse.y)
pressed_keys = tuple(previous_response.pressed_keys)
recording_time_ms = int(((previous_frame_time - recording_start_time) * 1000))
recorded_data.append({'m': (x, y), 'b': pressed_keys, 't': recording_time_ms})
previous_frame_time = frame_time
previous_response = response
sleep_time = ((target_time_per_frame - time.time()) + frame_time)
if (sleep_time <= 0.0):
print('[Warning] Can not keep up with the desired framerate.')
sleep_time = 0.0
else:
time.sleep(sleep_time)
except KeyboardInterrupt:
if record:
print('Saving current data to disk...')
finish_recording(args.output, args.process_name, recording_id, recorded_data)
|
def compress_to_bytes(compress=True, **kwargs):
'\n Compress a dict of numpy arrays with .savez\n and return the bytes.\n\n Parameters:\n compress: If True, compress the bytes using\n compression algorithm (using LZ4)\n kwargs: Numpy arrays that will be stored.\n Fed directly to numpy.savez\n '
bytes_buffer = BytesIO()
return_bytes = None
if compress:
np.savez(bytes_buffer, **kwargs)
return_bytes = lz4.frame.compress(bytes_buffer.getvalue())
else:
np.savez(bytes_buffer, **kwargs)
return_bytes = bytes_buffer.getvalue()
return return_bytes
|
def decompress_to_arrays(array_bytes, compress=True):
'\n Decompress bytearray back to numpy arrays. Inverse\n of `compress_to_bytes`\n\n Parameters:\n bytearray: Bytearray to be decompressed\n compress: If True, bytes were compressed with\n LZ4 and require decompressing.\n '
if compress:
array_bytes = lz4.frame.decompress(array_bytes)
bytes_buffer = BytesIO(array_bytes)
return_arrays = np.load(bytes_buffer)
return return_arrays
|
class AtariDataLoader():
'Keras Sequence where the elements are batches from the Atari dataset'
def __init__(self, directory, game, batch_size=32, stack=3, controls=18, size=(84, 84), percentile=None, top_n=None, augment=False, preload=False, merge=False, dqn=False, json=False, fileformat='png', action_delay=0):
self.dir = directory
self.game = game
self.fileformat = fileformat
self.batch_size = batch_size
self.stack = stack
self.controls = controls
self.size = size
self.traj_path = os.path.join(self.dir, 'trajectories', self.game)
self.screen_path = os.path.join(self.dir, 'screens', self.game)
self.all_trajs = self._get_trajectory_list()
self.n_traj = len(self.all_trajs)
self.augment = augment
self.merge = merge
self.dqn = dqn
self.json = json
self.action_delay = action_delay
self.traj_len = []
self.scores = []
self.total_len = 0
for i in range(self.n_traj):
self.traj_len.append(self._get_samples_in_trajectory(i))
self.scores.append(self._get_sample_score(i))
self.total_len += self.traj_len[i]
self.traj_len_all = self.traj_len[:]
if (percentile is not None):
p = np.percentile(self.scores, percentile)
top = filter((lambda x: (x[1] >= p)), zip(range(self.n_traj), self.scores))
self.traj_len = list(map((lambda x: (x[0], self.traj_len[x[0]])), top))
self.total_len = sum(map((lambda x: x[1]), self.traj_len))
elif (top_n is not None):
top = sorted(zip(range(self.n_traj), self.scores), key=(lambda x: x[1]), reverse=True)[:top_n]
self.traj_len = list(map((lambda x: (x[0], self.traj_len[x[0]])), top))
self.total_len = sum(map((lambda x: x[1]), self.traj_len))
else:
self.traj_len = list(zip(range(self.total_len), self.traj_len))
self.cache = []
if preload:
for batch in range(len(self)):
data = self.get_batch(batch)
b = compress_to_bytes(img=data[0], action=data[1])
self.cache.append(b)
print('Cached {}/{}'.format(batch, len(self)))
print('Preload done!')
def _get_image_stacked(self, traj, id, augments=None):
'Returns time-stacked or merged images from\n the given trajectory and sample ID,\n depending on the value of self.merge\n '
stack = []
shape = None
for i in range(self.stack):
ix = (id - i)
if (ix >= 0):
stack.insert(0, self._get_image(traj, ix, augments))
if (shape is None):
shape = stack[0].shape
else:
stack.insert(0, np.zeros(shape, dtype=np.uint8))
if self.merge:
stack = map(Image.fromarray, stack)
img = reduce(ImageChops.lighter, stack)
return np.asarray(img, dtype=np.uint8)
else:
return np.concatenate(stack, axis=2)
@lru_cache(maxsize=int(1000000.0))
def _get_image(self, traj, id, augments=None):
'Returns image from the given trajectory and sample ID as\n a numpy array\n '
traj_name = self.all_trajs[traj]
filename = '{}.{}'.format(id, self.fileformat)
path = os.path.join(self.dir, 'screens', self.game, traj_name, filename)
img = Image.open(path)
img.load()
if (augments is not None):
if ('shadow' in augments):
draw = Draw(img, 'RGBA')
rect_color = (0, 0, 0, augments['shadow'][0])
rect_w = augments['shadow'][3]
rect_h = augments['shadow'][4]
rect_x = augments['shadow'][1]
rect_y = augments['shadow'][2]
draw.rectangle([(rect_x - (rect_w / 2)), (rect_y - (rect_h / 2)), (rect_x + (rect_w / 2)), (rect_y + (rect_h / 2))], rect_color)
if ('brightness' in augments):
enhancer = ImageEnhance.Brightness(img)
img = enhancer.enhance(augments['brightness'])
if ('rotate' in augments):
img = img.rotate(augments['rotate'])
if ('shear' in augments):
raise NotImplementedError
if (('tx' in augments) and ('ty' in augments)):
img = ImageChops.offset(img, xoffset=augments['tx'], yoffset=augments['ty'])
if (('zx' in augments) and ('zy' in augments)):
raise NotImplementedError
if (('flip' in augments) and augments['flip']):
img = img.transpose(Image.FLIP_LEFT_RIGHT)
img = img.resize(self.size, Image.BILINEAR)
img = np.asarray(img, dtype=np.uint8)
return img
def _flip_controls(self, control, game=None):
"Flips the controls horizontally, i.e. switches left and right buttons.\n Since qbert has diagonal movement, flipping the controls is\n more complicated, and the 'game' parameter must be set to 'qbert'."
controls = ['NOOP', 'FIRE', 'UP', 'RIGHT', 'LEFT', 'DOWN', 'UPRIGHT', 'UPLEFT', 'DOWNRIGHT', 'DOWNLEFT', 'UPFIRE', 'RIGHTFIRE', 'LEFTFIRE', 'DOWNFIRE', 'UPRIGHTFIRE', 'UPLEFTFIRE', 'DOWNRIGHTFIRE', 'DOWNLEFTFIRE']
control_name = controls[control]
new_control_name = control_name
if (game == 'qbert'):
if (control_name == 'UP'):
new_control_name = 'LEFT'
elif (control_name == 'RIGHT'):
new_control_name = 'DOWN'
elif (control_name == 'LEFT'):
new_control_name = 'UP'
elif (control_name == 'DOWN'):
new_control_name = 'RIGHT'
elif (control_name == 'UPRIGHT'):
new_control_name = 'DOWNLEFT'
elif (control_name == 'DOWNLEFT'):
new_control_name = 'UPRIGHT'
elif (control_name == 'UPFIRE'):
new_control_name = 'LEFTFIRE'
elif (control_name == 'RIGHTFIRE'):
new_control_name = 'DOWNFIRE'
elif (control_name == 'LEFTFIRE'):
new_control_name = 'UPFIRE'
elif (control_name == 'DOWNFIRE'):
new_control_name = 'RIGHTFIRE'
elif (control_name == 'UPRIGHTFIRE'):
new_control_name = 'DOWNLEFTFIRE'
elif (control_name == 'DOWNLEFTFIRE'):
new_control_name = 'UPRIGHTFIRE'
else:
control_name = controls[control]
if ('RIGHT' in control_name):
new_control_name = control_name.replace('RIGHT', 'LEFT')
elif ('LEFT' in control_name):
new_control_name = control_name.replace('LEFT', 'RIGHT')
return controls.index(new_control_name)
@lru_cache(maxsize=128)
def _get_data_lines(self, traj):
traj_name = '{}.txt'.format(self.all_trajs[traj])
with open(os.path.join(self.traj_path, traj_name)) as f:
return f.read().splitlines()
def _get_data(self, traj, id, flip=False):
'Returns a list with the following contents:\n [frame, reward, score, terminal, action, last]\n '
lines = self._get_data_lines(traj)
num_frames = (len(lines) - 2)
data = lines[(id + 2)].split(',')
data = [s.strip() for s in data]
for i in range(5):
if (i == 3):
data[i] = (True if (data[i].lower() == 'true') else False)
else:
data[i] = int(float(data[i]))
if flip:
data[4] = self._flip_controls(data[4], game=('qbert' if (self.game == 'qbert') else None))
if (id >= (num_frames - 1)):
last = 1
else:
last = 0
data.append(last)
return list(data)
@lru_cache(maxsize=128)
def _get_json(self, traj):
traj_name = '{}.json'.format(self.all_trajs[traj])
with open(os.path.join(self.traj_path, traj_name)) as f:
return json.load(f)
def _get_data_json(self, traj, id, flip=False):
'Returns a list with the following contents:\n [frame, reward, score, terminal, action, last]\n '
data = self._get_json(traj)['steps']
last_id = (len(data) - 1)
data = data[id]
return [id, data['r'], 0, (id == last_id), data['a'], (id == last_id)]
def _get_trajectory_list(self):
'Returns a sorted list of all trajectory names'
traj_datas = set(map((lambda x: x.split('.')[0]), os.listdir(self.traj_path)))
traj_screens = set(os.listdir(self.screen_path))
trajs = (traj_datas & traj_screens)
return list(sorted(trajs, key=int))
def _get_num_of_trajectories(self):
'Returns the number of trajectories in this dataset'
trajectories = os.listdir(os.path.join(self.dir, 'trajectories', self.game))
return len(trajectories)
def _get_sample_score(self, traj):
'Returns the final score of the given trajectory ID'
if self.json:
traj_name = '{}.json'.format(self.all_trajs[traj])
with open(os.path.join(self.traj_path, traj_name)) as f:
steps = json.load(f)['steps']
score = 0
for step in steps:
score += step['r']
return int(score)
else:
traj_name = '{}.txt'.format(self.all_trajs[traj])
with open(os.path.join(self.traj_path, traj_name)) as f:
lines = f.read().splitlines()
return int(float(lines[(- 1)].split(',')[2]))
def _get_samples_in_trajectory(self, traj):
'Returns the number of samples in the given trajectory ID'
if self.json:
traj_name = '{}.json'.format(self.all_trajs[traj])
with open(os.path.join(self.traj_path, traj_name)) as f:
lines = len(json.load(f)['steps'])
return lines
else:
traj_name = '{}.txt'.format(self.all_trajs[traj])
with open(os.path.join(self.traj_path, traj_name)) as f:
lines = f.read().splitlines()
return (int(lines[(- 1)].split(',')[0]) + 1)
def _get_index_traj_and_sample(self, index):
'Returns the corresponding trajectory ID and sample ID for the\n given index'
total = 0
for (i, t_len) in self.traj_len:
if (index < (total + t_len)):
return (i, (index - total))
total += t_len
def __len__(self):
return int((self.total_len / self.batch_size))
def get_batch(self, samples):
batch_x = []
batch_y = []
if self.dqn:
batch_x_next = []
batch_reward = []
batch_done = []
for sample in samples:
(traj, ix) = self._get_index_traj_and_sample(sample)
flip = False
if self.augment:
flip = choice([True, False])
augments = {'shadow': (randrange(128, 255), uniform(0, ATARI_W), uniform(0, ATARI_H), uniform(10, 100), uniform(10, 100)), 'brightness': uniform(0.5, 1.5), 'rotate': uniform((- 2), 2), 'tx': randrange((- 5), 5), 'ty': randrange((- 5), 5), 'flip': flip}
else:
augments = None
if self.json:
data = self._get_data_json(traj, ix, flip)
else:
data = self._get_data(traj, ix, flip)
if (self.action_delay != 0):
action_ix = (ix + self.action_delay)
if (action_ix >= self.traj_len_all[traj]):
action_ix = (self.traj_len_all[traj] - 1)
if (action_ix < 0):
action_ix = 0
if self.json:
delayed_data = self._get_data_json(traj, action_ix, flip)
else:
delayed_data = self._get_data(traj, action_ix, flip)
data[4] = delayed_data[4]
if (self.dqn and (data[5] == 1) and (data[3] == 0)):
continue
batch_x.append(self._get_image_stacked(traj, ix, augments))
if self.json:
batch_y.append(np.array([data[4]]))
else:
batch_y.append(np.eye(self.controls)[data[4]])
if self.dqn:
batch_reward.append(data[1])
batch_done.append(data[5])
if (not data[5]):
batch_x_next.append(self._get_image_stacked(traj, (ix + 1), augments))
else:
batch_x_next.append(np.zeros(batch_x[0].shape, dtype=np.uint8))
if (self.dqn == False):
return (np.array(batch_x, dtype=np.uint8), np.array(batch_y, dtype=np.uint8))
else:
return (np.array(batch_x, dtype=np.uint8), np.array(batch_y, dtype=np.uint8), np.array(batch_x_next, dtype=np.uint8), np.array(batch_reward), np.array(batch_done, dtype=np.uint8))
|
class AtariDataLoaderProcess(multiprocessing.Process):
'Process that runs a single AtariDataLoader instance'
def __init__(self, request_queue, response_queue, dataloader_args):
self.loader = AtariDataLoader(**dataloader_args)
self.request_queue = request_queue
self.response_queue = response_queue
super().__init__()
def __len__(self):
return len(self.loader)
def run(self):
while True:
response = self.loader.get_batch(self.request_queue.get())
self.response_queue.put(response)
|
class MultiprocessAtariDataLoader():
'Creates multiple dataloader processes and serves data from them\n as an iterator\n \n Note: The iterator can return batches in any order, but is guaranteed\n to return every batch exactly once.\n '
def __init__(self, dataloader_args, workers):
super().__init__()
self.request_queue = multiprocessing.Manager().Queue()
self.queue = multiprocessing.Queue(maxsize=workers)
loader = AtariDataLoader(**dataloader_args)
self.batch_size = loader.batch_size
self.sample_length = loader.total_len
self.length = len(loader)
self.shape = loader._get_image(0, 0).shape
self.loaders = []
for i in range(workers):
new_loader = AtariDataLoaderProcess(self.request_queue, self.queue, dataloader_args)
self.loaders.append(new_loader)
for i in self.loaders:
i.start()
def stop(self):
for i in self.loaders:
i.terminate()
def __len__(self):
return self.length
def __next__(self):
if (self.iters < self.length):
response = self.queue.get()
self.iters += 1
return response
else:
raise StopIteration
def __iter__(self):
self.iters = 0
samples = list(range(self.sample_length))
shuffle(samples)
for i in range(self.length):
if ((i % 1000) == 0):
print('Adding batch {} to queue'.format(i))
batch = []
for _ in range(self.batch_size):
batch.append(samples.pop())
self.request_queue.put(batch)
return self
|
class AtariHeadDataloader():
def __init__(self, directory, batch_size=32, stack=3, controls=18, size=(84, 84), percentile=None, top_n=None, augment=False, preload=False, merge=False, dqn=False, action_delay=0, print_stats=False):
self.batch_size = batch_size
self.stack = stack
self.controls = controls
self.size = size
self.merge = merge
self.dqn = dqn
self.action_delay = action_delay
self.directory = directory
self.all_trajs = self._get_trajectory_list()
self.n_traj = len(self.all_trajs)
self.traj_len = []
for traj in range(len(self.all_trajs)):
self.traj_len.append(self._get_samples_in_trajectory(traj))
self.total_len = sum(self.traj_len)
def _get_trajectory_list(self):
'Returns a sorted list of all trajectory names'
names = os.listdir(self.directory)
names = list(filter((lambda x: x.endswith('.txt')), names))
names = list(map((lambda x: x[:(- 4)]), names))
names = sorted(names)
return names
def _get_samples_in_trajectory(self, traj):
'Returns the number of samples in the given trajectory ID'
lines = self._get_data_lines(traj)
return (len(lines) - 1)
def _get_index_traj_and_sample(self, index):
'Returns the corresponding trajectory ID and sample ID for the\n given index'
total = 0
for (i, t_len) in enumerate(self.traj_len):
if (index < (total + t_len)):
return (i, (index - total))
total += t_len
def _get_frame_id(self, traj, index):
'Returns the frame_id field for the given trajectory and index'
lines = self._get_data_lines(traj)
return lines[(index + 1)].split(',')[0]
def _get_image_stacked(self, traj, id):
'Returns time-stacked or merged images from\n the given trajectory and sample ID,\n depending on the value of self.merge\n '
stack = []
shape = None
for i in range(self.stack):
ix = (id - i)
if (ix >= 0):
stack.insert(0, self._get_image(traj, ix))
if (shape is None):
shape = stack[0].shape
else:
stack.insert(0, np.zeros(shape, dtype=np.uint8))
if self.merge:
stack = map(Image.fromarray, stack)
img = reduce(ImageChops.lighter, stack)
return np.asarray(img, dtype=np.uint8)
else:
return np.concatenate(stack, axis=2)
def _get_image(self, traj, index):
traj_name = self.all_trajs[traj]
frame_id = self._get_frame_id(traj, index)
filename = '{}.png'.format(frame_id)
path = os.path.join(self.directory, traj_name, filename)
img = Image.open(path)
img.load()
img = img.resize(self.size, Image.BILINEAR)
img = np.asarray(img, dtype=np.uint8)
return img
@lru_cache(maxsize=128)
def _get_data_lines(self, traj):
traj_name = '{}.txt'.format(self.all_trajs[traj])
with open(os.path.join(self.directory, traj_name)) as f:
return f.read().splitlines()
def _get_data(self, traj, id):
'Returns a list with the following contents:\n [frame, reward, score, terminal, action, last]\n '
lines = self._get_data_lines(traj)
num_frames = (len(lines) - 1)
data = lines[(id + 1)].split(',')[:6]
data = [s.strip() for s in data]
try:
data[2] = int(data[2])
except ValueError:
data[2] = (- 1)
try:
data[4] = int(data[4])
except ValueError:
data[4] = 0
try:
data[5] = int(data[5])
except ValueError:
data[5] = 0
if (id >= (num_frames - 1)):
last = 1
else:
last = 0
data.append(last)
return [id, data[4], data[2], False, data[5], last]
def __len__(self):
return int((self.total_len / self.batch_size))
def get_batch(self, samples):
batch_x = []
batch_y = []
if self.dqn:
batch_x_next = []
batch_reward = []
batch_done = []
for sample in samples:
(traj, ix) = self._get_index_traj_and_sample(sample)
data = self._get_data(traj, ix)
if (self.action_delay != 0):
action_ix = (ix + self.action_delay)
if (action_ix >= self.traj_len[traj]):
action_ix = (self.traj_len[traj] - 1)
if (action_ix < 0):
action_ix = 0
delayed_data = self._get_data(traj, action_ix)
data[4] = delayed_data[4]
if (self.dqn and (data[5] == 1) and (data[3] == 0)):
continue
batch_x.append(self._get_image_stacked(traj, ix))
batch_y.append(np.eye(self.controls)[data[4]])
if self.dqn:
batch_reward.append(data[1])
batch_done.append(data[5])
if (not data[5]):
batch_x_next.append(self._get_image_stacked(traj, (ix + 1)))
else:
batch_x_next.append(np.zeros(batch_x[0].shape, dtype=np.uint8))
if (self.dqn == False):
return (np.array(batch_x, dtype=np.uint8), np.array(batch_y, dtype=np.uint8))
else:
return (np.array(batch_x, dtype=np.uint8), np.array(batch_y, dtype=np.uint8), np.array(batch_x_next, dtype=np.uint8), np.array(batch_reward), np.array(batch_done, dtype=np.uint8))
|
class AtariDataLoaderProcess(multiprocessing.Process):
'Process that runs a single AtariDataLoader instance'
def __init__(self, request_queue, response_queue, dataloader_args):
self.loader = AtariHeadDataloader(**dataloader_args)
self.request_queue = request_queue
self.response_queue = response_queue
super().__init__()
def __len__(self):
return len(self.loader)
def run(self):
while True:
response = self.loader.get_batch(self.request_queue.get())
self.response_queue.put(response)
|
class MultiprocessAtariHeadDataLoader():
'Creates multiple dataloader processes and serves data from them\n as an iterator\n \n Note: The iterator can return batches in any order, but is guaranteed\n to return every batch exactly once.\n '
def __init__(self, dataloader_args, workers):
super().__init__()
self.request_queue = multiprocessing.Manager().Queue()
self.queue = multiprocessing.Queue(maxsize=workers)
loader = AtariHeadDataloader(**dataloader_args)
self.batch_size = loader.batch_size
self.sample_length = loader.total_len
self.length = len(loader)
self.shape = loader._get_image(0, 0).shape
self.loaders = []
for i in range(workers):
new_loader = AtariDataLoaderProcess(self.request_queue, self.queue, dataloader_args)
self.loaders.append(new_loader)
for i in self.loaders:
i.start()
def stop(self):
for i in self.loaders:
i.terminate()
def __len__(self):
return self.length
def __next__(self):
if (self.iters < self.length):
response = self.queue.get()
self.iters += 1
return response
else:
raise StopIteration
def __iter__(self):
self.iters = 0
samples = list(range(self.sample_length))
shuffle(samples)
for i in range(self.length):
if ((i % 1000) == 0):
print('Adding batch {} to queue'.format(i))
batch = []
for _ in range(self.batch_size):
batch.append(samples.pop())
self.request_queue.put(batch)
return self
|
def main(args):
input_data = None
with open(args.input) as f:
input_data = json.load(f)
key_mapping = KEY_MAPPING[args.game]
button_representatives = [buttons[0] for buttons in key_mapping]
new_data = {'allowed_buttons': button_representatives, 'steps': None}
new_steps = []
for step in input_data:
new_step = {'r': 0.0, 't': step['t']}
pressed_buttons = step['b']
new_action = [int(any([(button in pressed_buttons) for button in buttons])) for buttons in key_mapping]
new_step['a'] = new_action
new_steps.append(new_step)
new_data['steps'] = new_steps
with open(args.output, 'w') as f:
json.dump(new_data, f)
|
def human_normalized_score(score, random, human, stdev=None):
norm_score = ((100 * (score - random)) / (human - random))
if (stdev is not None):
upper = ((100 * ((score + stdev) - random)) / (human - random))
lower = ((100 * ((score - stdev) - random)) / (human - random))
return (norm_score, (upper - norm_score), (norm_score - lower))
else:
return (norm_score, 0, 0)
|
def figure_nodelay_atari():
with open('results.json') as f:
results = json.load(f)
atari_games = ['Ms. Pac-Man', 'Video Pinball', 'Q*bert', "Montezuma's Revenge", 'Space Invaders']
(_, axs) = plt.subplots(len(atari_games), 1, sharex=True, figsize=(6, 8))
for (k, game) in enumerate(atari_games):
labels = []
means = []
stdevs_low = []
stdevs_high = []
for dataset in ['Top 5%', 'Top 50%', 'All', 'Atari-HEAD']:
if (dataset not in results['bc'][game]):
continue
labels.append('{}'.format(dataset))
mean = results['bc'][game][dataset]['mean']
stdev = results['bc'][game][dataset]['stdev']
(norm_mean, norm_upper, norm_lower) = human_normalized_score(mean, results['random'][game]['mean'], results['human'][game]['mean'], stdev)
means.append(norm_mean)
stdevs_low.append(norm_lower)
stdevs_high.append(norm_upper)
stdevs = [stdevs_low, stdevs_high]
axs[k].invert_yaxis()
axs[k].set_yticks(range(len(labels)))
axs[k].set_yticklabels(labels)
axs[k].tick_params(axis='both', which='major')
if (k != (len(atari_games) - 1)):
plt.setp(axs[k].get_xticklabels(), visible=False)
else:
axs[k].set_xlabel('% of human score')
axs[k].set_xlim(left=(- 10), right=35)
axs[k].set_title(game, fontsize='medium')
axs[k].barh(range(len(labels)), means, xerr=stdevs)
axs[k].grid(b=True, which='major', axis='x', color='#999999', linestyle='-', linewidth=0.25)
axs[k].set_axisbelow(True)
plt.tight_layout()
plt.savefig('figure_atari.pdf', dpi=400, bbox_inches='tight', pad_inches=0)
plt.savefig('figure_atari.png', dpi=400, bbox_inches='tight', pad_inches=0)
|
def figure_nodelay():
with open('results.json') as f:
results = json.load(f)
games = results['bc'].keys()
atari_games = ['Ms. Pac-Man', 'Video Pinball', 'Q*bert', "Montezuma's Revenge", 'Space Invaders']
games = [game for game in games if (game not in atari_games)]
(_, ax) = plt.subplots(1, figsize=(6, 3.25))
labels = []
means = []
stdevs_low = []
stdevs_high = []
for game in games:
labels.append('{}'.format(game))
mean = results['bc'][game]['All']['mean']
stdev = results['bc'][game]['All']['stdev']
(norm_mean, norm_upper, norm_lower) = human_normalized_score(mean, results['random'][game]['mean'], results['human'][game]['mean'], stdev)
means.append(norm_mean)
stdevs_low.append(norm_lower)
stdevs_high.append(norm_upper)
stdevs = [stdevs_low, stdevs_high]
ax.invert_yaxis()
ax.set_yticks(range(len(labels)))
ax.set_yticklabels(labels, fontsize=8.5)
ax.tick_params(axis='both', which='major')
ax.set_xlabel('% of human score')
ax.barh(range(len(labels)), means, xerr=stdevs)
ax.grid(b=True, which='major', axis='x', color='#999999', linestyle='-', linewidth=0.25)
ax.set_axisbelow(True)
plt.tight_layout()
plt.savefig('figure_all.pdf', dpi=400, bbox_inches='tight', pad_inches=0)
plt.savefig('figure_all.png', dpi=400, bbox_inches='tight', pad_inches=0)
|
def figure_delay():
with open('results.json') as f:
results = json.load(f)
(_, axs) = plt.subplots(2, 5, figsize=(12, 5), sharex=True)
coolwarm = cm.get_cmap('coolwarm', 9)
colors = [coolwarm(x) for x in np.linspace(0, 1, 9)]
for row in range(2):
if (row == 0):
dataset = 'atarigc_95'
else:
dataset = 'atarihead'
games = results['delay_{}'.format(dataset)].keys()
for (k, game) in enumerate(games):
game_name = game.replace('\n(Atari-HEAD)', '').replace('\n(Atari GC)', '')
labels = []
means = []
stdevs_low = []
stdevs_high = []
for delay in ['-100', '-10', '-5', '-2', '0', '2', '5', '10', '100']:
mean = results['delay_{}'.format(dataset)][game][delay]['mean']
stdev = results['delay_{}'.format(dataset)][game][delay]['stdev']
(norm_mean, norm_upper, norm_lower) = human_normalized_score(mean, results['random'][game_name]['mean'], results['human'][game_name]['mean'], stdev)
means.append(norm_mean)
stdevs_low.append(norm_lower)
stdevs_high.append(norm_upper)
labels.append(delay)
axs[(row, k)].bar(range(len(labels)), means, yerr=[stdevs_low, stdevs_high], width=1.0, color=colors)
axs[(row, k)].set_xticks(range(len(labels)))
axs[(row, k)].set_xticklabels(labels, rotation='vertical')
axs[(row, k)].tick_params(axis='x', which='major')
if (k == 0):
axs[(row, k)].set_ylabel('% of human score')
axs[(row, k)].set_title(game, fontsize='medium')
plt.tight_layout()
plt.savefig('figure_delay.pdf', dpi=400, bbox_inches='tight', pad_inches=0)
plt.savefig('figure_delay.png', dpi=400, bbox_inches='tight', pad_inches=0)
|
def figure_learning():
def get_avg_from_file(file_path):
with open(file_path) as f:
avg_line = f.readlines()[(- 1)]
match = re.match('Avg: (.*)', avg_line)
return float(match.group(1))
def get_stdev_from_file(file_path):
values = get_datapoints_from_file(file_path)
return statistics.stdev(values)
def get_datapoints_from_file(file_path):
with open(file_path) as f:
lines = f.readlines()
values = []
for line in lines:
try:
values.append(float(line))
except ValueError:
pass
return values
with open('results/space_invaders_all_2-history.json', 'r') as f:
history = json.load(f)
(_, axs) = plt.subplots(1, 2, figsize=(6, 3))
axs[0].plot(history['loss'], label='loss')
axs[0].set_title('Training loss', fontsize='medium')
axs[0].set_ylabel('Loss')
axs[0].set_xlabel('Epoch')
axs[0].set_xticks(range(10))
axs[0].set_xticklabels([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
repeat = 2
r = re.compile('(.*)_{}_([0-9]{{1,4}})\\.pt\\.txt'.format(repeat))
files = []
path = os.path.normpath('results/')
for entry in os.listdir(path):
full_entry = os.path.join(path, entry)
if os.path.isfile(full_entry):
match = r.match(entry)
if ((match is not None) and (match.group(1) == 'space_invaders_all')):
epoch = int(match.group(2))
files.append((epoch, get_avg_from_file(full_entry), get_stdev_from_file(full_entry), get_datapoints_from_file(full_entry)))
files.sort(key=(lambda x: x[0]))
(x, y, yerr, points) = zip(*files)
x = list(x)
y = list(y)
yerr = list(yerr)
for (epoch, entry, stdev, _) in files:
print('{}: {} (std {})'.format(epoch, entry, stdev))
for (i, v) in enumerate(x):
for _y in points[i]:
plt.scatter(v, _y, marker='_', c='#00000028', linewidths=1)
axs[1].errorbar(x, y, yerr=yerr)
axs[1].set_title('Evaluation score', fontsize='medium')
axs[1].set_xlabel('Epoch')
axs[1].set_ylabel('Score')
axs[1].set_xticks([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
axs[1].set_xticklabels([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
plt.tight_layout()
plt.savefig('figure_learning.pdf', dpi=400, bbox_inches='tight', pad_inches=0)
plt.savefig('figure_learning.png', dpi=400, bbox_inches='tight', pad_inches=0)
|
def main(args):
scores = []
for filepath in args.inputs:
json_data = None
with open(filepath) as f:
json_data = json.load(f)
rewards = [step['r'] for step in json_data['steps']]
scores.append(sum(rewards))
print('Individual scores: ')
pprint(scores)
print('Mean: {:.3f}. Std: {:.3f}'.format(np.mean(scores), np.std(scores)))
|
class Mnih2015(nn.Module):
'CNN head similar to one used in Mnih 2015\n (Human-level control through deep reinforcement learning, Mnih 2015)'
def __init__(self, image_shape, num_channels, num_actions):
super(Mnih2015, self).__init__()
self.num_actions = num_actions
self.conv1 = nn.Conv2d(num_channels, 32, 8, stride=4)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 64, 3, stride=1)
c_out = self.conv3(self.conv2(self.conv1(torch.randn(1, num_channels, *image_shape))))
self.conv3_size = np.prod(c_out.shape)
print('conv3: {}'.format(self.conv3_size))
self.fc1 = nn.Linear(self.conv3_size, 512)
self.fc2 = nn.Linear(512, num_actions)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view((- 1), self.conv3_size)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
|
class Connection():
'Automatically starts the binary and creates a socket connection to it.\n\n When started with the default arguments, will start the binary on an open\n port and connect to it.\n\n If start_binary is set to False, the binary will\n not be automatically started, and connection will instead be made to the\n given address and port.\n\n Once connection has been made, the req member will be a protobuf Request\n class as defined in messages.proto. This member can be edited to set the\n message fields for the next request.\n\n The send_request() method will send the current request to the binary.\n '
def __init__(self, address='localhost', port=None, start_binary=True, binary_path='main'):
self.req = messages_pb2.Request()
if (port is None):
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp.bind(('', 0))
(_, port) = tcp.getsockname()
tcp.close()
if start_binary:
try:
if (platform.system() == 'Windows'):
subprocess.Popen([binary_path, '-p', str(port)], stdout=subprocess.DEVNULL)
else:
subprocess.Popen([binary_path, '-p', str(port)], stdout=subprocess.DEVNULL)
except OSError:
print('Starting the binary failed')
sys.exit()
for _ in range(10):
try:
self.s = socket.create_connection((address, port))
break
except ConnectionRefusedError:
time.sleep(0.1)
continue
self.s.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
def send_request(self):
'Send the Request message stored in this.req and resets\n it to default values.\n Returns the Response object received from the binary,\n or False if decoding the incoming message failed.\n '
serialized = self.req.SerializeToString()
self.req = messages_pb2.Request()
msg_len = len(serialized)
sent = self.s.send(msg_len.to_bytes(4, 'big'))
total_sent = 0
while (total_sent < msg_len):
sent = self.s.send(serialized[total_sent:])
total_sent += sent
data = b''
while (len(data) < 4):
received = self.s.recv((4 - len(data)))
if (len(received) == 0):
raise ConnectionResetError('Connection was closed')
data += received
msg_len = int.from_bytes(data, 'big')
data = b''
while (len(data) < msg_len):
received = self.s.recv((msg_len - len(data)))
if (len(received) == 0):
raise ConnectionResetError('Connection was closed')
data += received
try:
resp_msg = messages_pb2.Response()
resp_msg.ParseFromString(data)
return resp_msg
except google.protobuf.message.DecodeError as e:
print('DecodeError in reponse: {}'.format(e))
return False
|
def load(in_file=None, format='tsv'):
' Load a clustering from a file. By default the input file is a\n tab-separated listing of words and their cluster ID. Returns a dictionary of\n the clustering.\n\n Args:\n in_file (string): path to input file\n format (string): input file format (default: tsv)\n\n Returns:\n dict: word-to-tag mapping\n '
mapping = {}
if (format == 'tsv'):
with open(in_file) as f:
for line in f:
tokens = line.split()
mapping[tokens[0]] = int(tokens[1])
return mapping
|
def save(mapping=None, out=None, format='tsv'):
' Save a clustering (dictionary) to file. By default the output file is\n a tab-separated listing of words and their cluster ID.\n\n Args:\n mapping (dict): word-to-tag mapping\n out (string): path to output file\n format (string): output file format (default: tsv)\n '
if (format == 'tsv'):
with open(out, 'w') as outfile:
for key in sorted(sorted(mapping), key=mapping.get):
line = (((str(key) + '\t') + str(mapping[key])) + '\n')
outfile.write(line)
|
def tag_string(mapping=None, text=None, unk=unk):
"Tag a string with the corresponding cluster ID's. If a word is not\n found in the clustering, use unk.\n\n Args:\n mapping (dict): word-to-tag mapping\n text (string): the string to be tagged\n unk (string): what to label unknown/unseen words that are not in\n mapping (default: <unk>)\n\n Returns:\n string: sequence of tags\n "
newsent = ''
for word in text.split():
if (word in mapping):
newsent += (' ' + str(mapping[word]))
elif (unk in mapping):
newsent += (' ' + str(mapping[unk]))
else:
newsent += (' ' + '<unk>')
return newsent.lstrip()
|
def tag_stdin(mapping=None, unk=unk):
' This calls tag_string() for each line in stdin, and prints the\n result to stdout.\n\n Args:\n mapping (dict): word-to-tag mapping\n unk (string): what to label unknown/unseen words that are not in\n mapping (default: <unk>)\n '
for line in sys.stdin:
print(tag_string(mapping=mapping, text=line, unk=unk))
|
def cluster(text=None, in_file=None, classes=None, class_file=None, class_offset=None, forward_lambda=None, ngram_input=None, min_count=None, out=None, print_freqs=None, quiet=None, refine=None, rev_alternate=None, threads=None, tune_cycles=None, unidirectional=None, verbose=None, word_vectors=None):
'\n Produce a clustering, given a textual input. There is one required argument\n (the training input text), and many optional arguments. The one required\n argument is either text or in_file. The argument text is a list of Python\n strings. The argument in_file is a path to a text file, consisting of\n preprocessed (eg. tokenized) one-sentence-per-line text. The use of text\n is probably not a good idea for large corpora.\n\n The other optional arguments are described by running the compiled\n clustercat binary with the --help argument, except that the\n leading -- from the shell argument is removed, and - is replaced with _.\n So for example, instead of --tune-cycles 15, the Python function argument\n would be tune_cycles=15 .\n\n Returns a dictionary of the form { word : cluster_id } .\n '
cc_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
cc_bin = os.path.join(cc_dir, 'bin', 'clustercat')
if os.path.isfile(cc_bin):
cmd_str = [cc_bin]
elif distutils.spawn.find_executable('clustercat'):
cmd_str = ['clustercat']
else:
print('Error: Unable to access clustercat binary from either ', cc_dir, " or $PATH. In the parent directory, first run 'make install', and then add $HOME/bin/ to your $PATH, by typing the following command:\necho 'PATH=$PATH:$HOME/bin' >> $HOME/.bashrc && source $HOME/.bashrc")
exit(1)
clustercat_params = {'in_file': '--in', 'out': '--out', 'classes': '--classes', 'class_file': '--class-file', 'class_offset': '--class-offset', 'forward_lambda': '--forward-lambda', 'ngram_input': '--ngram-input', 'min_count': '--min-count', 'refine': '--refine', 'rev_alternate': '--rev-alternate', 'threads': '--threads', 'tune_cycles': '--tune-cycles', 'word_vectors': '--word-vectors'}
boolean_params = {'print_freqs': '--print-freqs', 'quiet': '--quiet', 'unidirectional': '--unidirectional', 'verbose': '--verbose'}
for (arg, value) in locals().items():
if ((arg in boolean_params) and (value is True)):
cmd_str.append(boolean_params[arg])
elif ((arg in clustercat_params) and (value is not None)):
cmd_str.append(clustercat_params[arg])
cmd_str.append(str(value))
cmd_out = ''
if (text and (not in_file)):
p1 = subprocess.Popen(['printf', '\n'.join(text)], stdout=subprocess.PIPE, universal_newlines=True)
p2 = subprocess.Popen(cmd_str, stdin=p1.stdout, stdout=subprocess.PIPE, universal_newlines=True)
p1.stdout.close()
cmd_out = p2.communicate()[0]
elif (in_file and (not text)):
cmd_out = subprocess.check_output(cmd_str, universal_newlines=True)
else:
print('Error: supply either text or in_file argument to clustercat.cluster(), but not both')
clusters = {}
for line in cmd_out.split('\n'):
split_line = line.split('\t')
try:
clusters[split_line[0]] = int(split_line[1])
except:
pass
return clusters
|
def main():
' No real reason to use this as a standalone script. Just invoke the\n C-compiled binary for standalone applications. But here you\n go, anyways.\n '
import argparse
parser = argparse.ArgumentParser(description='Clusters words, or tags them')
parser.add_argument('-i', '--in', help='Load input training file')
parser.add_argument('-o', '--out', help='Save final mapping to file')
parser.add_argument('-t', '--tag', help='Tag stdin input, using clustering in supplied argument')
args = parser.parse_args()
if args.tag:
mapping = load(in_file=args.tag)
tag_stdin(mapping=mapping)
else:
mapping = cluster(text=sys.stdin)
if args.out:
save(mapping=mapping, out=args.out)
else:
print(mapping)
|
class Generator(nn.Module):
def __init__(self, params):
super().__init__()
self.noise_dim = params.noise_dims
self.gkernel = gkern1D(params.gkernlen, params.gkernsig)
self.FC = nn.Sequential(nn.Linear(self.noise_dim, 256), nn.LeakyReLU(0.2), nn.Dropout(p=0.2), nn.Linear(256, (32 * 16), bias=False), nn.BatchNorm1d((32 * 16)), nn.LeakyReLU(0.2))
self.CONV = nn.Sequential(ConvTranspose1d_meta(16, 16, 5, stride=2, bias=False), nn.BatchNorm1d(16), nn.LeakyReLU(0.2), ConvTranspose1d_meta(16, 8, 5, stride=2, bias=False), nn.BatchNorm1d(8), nn.LeakyReLU(0.2), ConvTranspose1d_meta(8, 4, 5, stride=2, bias=False), nn.BatchNorm1d(4), nn.LeakyReLU(0.2), ConvTranspose1d_meta(4, 1, 5))
def forward(self, noise, params):
net = self.FC(noise)
net = net.view((- 1), 16, 32)
net = self.CONV(net)
net = conv1d_meta((net + noise.unsqueeze(1)), self.gkernel)
net = (torch.tanh((net * params.binary_amp)) * 1.05)
return net
|
class Params():
'Class that loads hyperparameters from a json file.\n\n Example:\n ```\n params = Params(json_path)\n print(params.learning_rate)\n params.learning_rate = 0.5 # change the value of learning_rate in params\n ```\n '
def __init__(self, json_path):
self.update(json_path)
def save(self, json_path):
'Saves parameters to json file'
with open(json_path, 'w') as f:
json.dump(self.__dict__, f, indent=4)
def update(self, json_path):
'Loads parameters from json file'
with open(json_path) as f:
params = json.load(f)
self.__dict__.update(params)
@property
def dict(self):
"Gives dict-like access to Params instance by `params.dict['learning_rate']`"
return self.__dict__
|
def set_logger(log_path):
'Sets the logger to log info in terminal and file `log_path`.\n\n In general, it is useful to have a logger so that every output to the terminal is saved\n in a permanent file. Here we save it to `model_dir/train.log`.\n\n Example:\n ```\n logging.info("Starting training...")\n ```\n\n Args:\n log_path: (string) where to log\n '
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if (not logger.handlers):
file_handler = logging.FileHandler(log_path)
file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(stream_handler)
|
def save_dict_to_json(d, json_path):
'Saves dict of floats in json file\n\n Args:\n d: (dict) of float-castable values (np.float, int, float, etc.)\n json_path: (string) path to json file\n '
with open(json_path, 'w') as f:
d = {k: float(v) for (k, v) in d.items()}
json.dump(d, f, indent=4)
|
def row_csv2dict(csv_file):
dict_club = {}
with open(csv_file) as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
dict_club[(row[0], row[1])] = row[2]
return dict_club
|
def save_checkpoint(state, checkpoint):
"Saves model and training parameters at checkpoint + 'last.pth.tar'. If is_best==True, also saves\n checkpoint + 'best.pth.tar'\n Args:\n state: (dict) contains model's state_dict, may contain other keys such as epoch, optimizer state_dict\n is_best: (bool) True if it is the best model seen till now\n checkpoint: (string) folder where parameters are to be saved\n "
filepath = os.path.join(checkpoint, 'model.pth.tar')
if (not os.path.exists(checkpoint)):
print('Checkpoint Directory does not exist! Making directory {}'.format(checkpoint))
os.mkdir(checkpoint)
else:
print('Checkpoint Directory exists! ')
torch.save(state, filepath)
|
def load_checkpoint(checkpoint, model, optimizer=None, scheduler=None):
'Loads model parameters (state_dict) from file_path. If optimizer is provided, loads state_dict of\n optimizer assuming it is present in checkpoint.\n Args:\n checkpoint: (string) filename which needs to be loaded\n model: (torch.nn.Module) model for which the parameters are loaded\n optimizer: (torch.optim) optional: resume optimizer from checkpoint\n '
if (not os.path.exists(checkpoint)):
raise "File doesn't exist {}".format(checkpoint)
checkpoint = torch.load(checkpoint)
model.load_state_dict(checkpoint['gen_state_dict'])
if optimizer:
optimizer.load_state_dict(checkpoint['optim_state_dict'])
if scheduler:
scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
return checkpoint
|
def plot_loss_history(loss_history, params):
(effs_mean_history, diversity_history, binarization_history) = loss_history
iterations = [(i * params.plot_iter) for i in range(len(effs_mean_history))]
plt.figure()
plt.plot(iterations, effs_mean_history)
plt.plot(iterations, diversity_history)
plt.plot(iterations, binarization_history)
plt.xlabel('iteration')
plt.legend(('Average Efficiency', 'Pattern diversity', 'Binarizaion'))
plt.axis([0, (len(effs_mean_history) * params.plot_iter), 0, 1.05])
plt.savefig((params.output_dir + '/figures/Train_history.png'))
history_path = os.path.join(params.output_dir, 'history.mat')
io.savemat(history_path, mdict={'effs_mean_history': np.asarray(effs_mean_history), 'diversity_history': np.asarray(diversity_history), 'binarization_history': np.asarray(binarization_history)})
|
def plot_histogram(Effs, Iter, fig_path):
ax = plt.figure()
bins = [(i * 5) for i in range(21)]
plt.hist((Effs * 100), bins, facecolor='blue', alpha=0.5)
plt.xlim(0, 100)
plt.ylim(0, 50)
plt.yticks([])
plt.xticks(fontsize=12)
plt.xlabel('Deflection efficiency (%)', fontsize=12)
plt.title('Iteration {}'.format(Iter), fontsize=16)
plt.savefig(fig_path, dpi=300)
plt.close()
|
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.initialized = False
def initialize(self):
self.parser.add_argument('--G', type=str, default='UnetINDiv4_CCAM', help='choice of network for Generator')
self.parser.add_argument('--dataroot', type=str, default='./../../Hdd_DATA/BRATS2015_mat_std_sbjnorm', help='data root')
self.parser.add_argument('--savepath', type=str, default='./results', help='savepath')
self.parser.add_argument('--nEpoch', type=int, default=1000, help='number of Epoch iteration')
self.parser.add_argument('--lr', type=float, default=1e-05, help='learning rate')
self.parser.add_argument('--lr_D', type=float, default=1e-05, help='learning rate for D')
self.parser.add_argument('--lr_C', type=float, default=1e-05, help='learning rate for C')
self.parser.add_argument('--disp_div_N', type=int, default=100, help=' display N per epoch')
self.parser.add_argument('--nB', type=int, default=1, help='input batch size')
self.parser.add_argument('--DB_small', action='store_true', help='use small DB')
self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2.')
self.parser.add_argument('--name', type=str, default='demo_exp_CollaGAN_BRATS', help='name of the experiment. It decides where to store samples and models')
self.parser.add_argument('--w_decay', type=float, default=0.01, help='weight decay for generator')
self.parser.add_argument('--w_decay_D', type=float, default=0.0, help='weight decay for discriminator')
self.parser.add_argument('--lambda_l1_cyc', type=float, default=10, help='lambda_L1_cyc, StarGAN cyc loss rec')
self.parser.add_argument('--lambda_l2_cyc', type=float, default=0.0, help='lambda_L2_cyc, StarGAN cyc loss rec')
self.parser.add_argument('--lambda_ssim_cyc', type=float, default=1.0, help='lambda_ssim')
self.parser.add_argument('--lambda_l2', type=float, default=0.0, help='lambda_L2')
self.parser.add_argument('--lambda_l1', type=float, default=0.0, help='lambda_L1')
self.parser.add_argument('--lambda_ssim', type=float, default=0.0, help='lambda_ssim')
self.parser.add_argument('--lambda_GAN', type=float, default=1.0, help='lambda GAN')
self.parser.add_argument('--lambda_G_clsf', type=float, default=1.0, help='generator classification loss. fake to be well classified')
self.parser.add_argument('--lambda_D_clsf', type=float, default=1.0, help='discriminator classification loss. fake to be well classified')
self.parser.add_argument('--lambda_cyc', type=float, default=1, help='lambda_cyc')
self.parser.add_argument('--nEpochDclsf', type=int, default=0, help='# of nEpoch for Discriminator pretrain')
self.parser.add_argument('--nCh_D', type=int, default=4, help='# of ngf for Discriminator')
self.parser.add_argument('--nCh_C', type=int, default=16, help='# of ngf for Classifier')
self.parser.add_argument('--use_lsgan', action='store_true', help='use lsgan, if not defualt GAN')
self.parser.add_argument('--use_1x1Conv', action='store_true', help='use 1x1Conv, if not defualt 3x3conv')
self.parser.add_argument('--wo_norm_std', action='store_true', help='NOT use std normalization')
self.parser.add_argument('--N_null', type=int, default=1, help='# of nulling in input images')
self.parser.add_argument('--ngf', type=int, default=64, help=' ngf')
self.parser.add_argument('--dropout', type=float, default=0.5, help='droptout ')
self.parser.add_argument('--test_mode', action='store_true', help='not train. just test')
self.parser.add_argument('--AUG', action='store_true', help='use augmentation')
self.parser.add_argument('--nEpochD', type=int, default=2, help='nEpochD update while 1 G update')
self.initialized = True
def parse(self):
if (not self.initialized):
self.initialize()
self.opt = self.parser.parse_args()
str_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if (id >= 0):
self.opt.gpu_ids.append(id)
args = vars(self.opt)
print('------------ Options -------------')
for (k, v) in sorted(args.items()):
print(('%s: %s' % (str(k), str(v))))
print('-------------- End ----------------')
expr_dir = os.path.join(self.opt.savepath, self.opt.name)
if (not os.path.exists(expr_dir)):
os.makedirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for (k, v) in sorted(args.items()):
opt_file.write(('%s: %s\n' % (str(k), str(v))))
opt_file.write('-------------- End ----------------\n')
return self.opt
@staticmethod
def load_opts(opt, exp_name):
exp_dir = os.path.join(opt.savepath, exp_name)
with open(os.path.join(exp_dir, 'opt.txt'), 'r') as opt_file:
for aLine in opt_file.readlines():
idx = aLine.find(':')
if (idx == (- 1)):
continue
else:
cur_opt = aLine[:idx]
cur_val = aLine[(idx + 2):(- 1)]
if (cur_opt == 'model'):
opt.model = cur_val
elif (cur_opt == 'dataroot'):
opt.dataroot = cur_val
elif (cur_opt == 'savepath'):
opt.savepath = cur_val
elif (cur_opt == 'nEpoch'):
opt.savepath = cur_val
elif (cur_opt == 'lr'):
opt.lr = float(cur_val)
elif (cur_opt == 'disp_div_N'):
opt.disp_div_N = int(cur_val)
elif (cur_opt == 'batchSize'):
opt.batchSize = int(cur_val)
elif (cur_opt == 'input_nc'):
opt.input_nc = int(cur_val)
elif (cur_opt == 'gpu_ids'):
cur_val = cur_val[1:(- 1)]
opt.gpu_ids = [int(cur_val)]
print('Use GPU id......')
elif (cur_opt == 'name'):
opt.name = cur_val
elif (cur_opt == 'use_residual'):
opt.use_residual = (cur_val == 'True')
elif (cur_opt == 'no_flip'):
opt.use_residual = (cur_val == 'True')
elif (cur_opt == 'lambda_cost'):
opt.lambda_cost = float(cur_val)
elif (cur_opt == 'weight_decay'):
opt.weight_decay = float(cur_val)
elif (cur_opt == 'use_dropout'):
opt.use_dropout = (cur_val == 'True')
elif (cur_opt == 'optimizer'):
opt.optimizer = cur_val
elif (cur_opt == 'ri'):
opt.ri = (cur_val == 'True')
elif (cur_opt == 'normalize'):
opt.normalize = (cur_val == 'True')
else:
st()
return opt
|
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.initialized = False
def initialize(self):
self.parser.add_argument('--G', type=str, default='NVDLMED', help='choice of network')
self.parser.add_argument('--dataroot', type=str, default='/Hdd_2/BRATS_colla/BRATS2015_mat_std_sbjnorm_D', help='data root')
self.parser.add_argument('--savepath', type=str, default='./seg_results', help='savepath')
self.parser.add_argument('--nEpoch', type=int, default=1000, help='number of Epoch iteration')
self.parser.add_argument('--lr', type=float, default=1e-06, help='learning rate')
self.parser.add_argument('--disp_div_N', type=int, default=10, help=' display N per epoch')
self.parser.add_argument('--nB', type=int, default=1, help='input batch size')
self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2.')
self.parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
self.parser.add_argument('--w_decay', type=float, default=1e-05, help='weight decay')
self.parser.add_argument('--lambda_l2', type=float, default=0.1, help='lambda_L2')
self.parser.add_argument('--lambda_KL', type=float, default=0.1, help='lambda_L2')
self.parser.add_argument('--ngf', type=int, default=64, help=' ngf')
self.parser.add_argument('--dropout', type=float, default=0.2, help='droptout ')
self.parser.add_argument('--test_mode', action='store_true', help='not train. just test')
self.parser.add_argument('--AUG', action='store_true', help='use augmentation')
self.parser.add_argument('--lambda_WT', type=float, default=1.0, help='lambda_WT')
self.parser.add_argument('--lambda_TC', type=float, default=1.0, help='lambda_TC')
self.parser.add_argument('--lambda_EC', type=float, default=1.0, help='lambda_EC')
self.parser.add_argument('--lambda_precision', type=float, default=0.0, help='lambda_precision')
self.parser.add_argument('--lambda_recall', type=float, default=0.0, help='lambda_recall')
self.parser.add_argument('--tumor', type=int, default=0, help='0:WT, 1:TC, 2:EC')
self.initialized = True
def parse(self):
if (not self.initialized):
self.initialize()
self.opt = self.parser.parse_args()
str_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if (id >= 0):
self.opt.gpu_ids.append(id)
args = vars(self.opt)
print('------------ Options -------------')
for (k, v) in sorted(args.items()):
print(('%s: %s' % (str(k), str(v))))
print('-------------- End ----------------')
expr_dir = os.path.join(self.opt.savepath, self.opt.name)
if (not os.path.exists(expr_dir)):
os.makedirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for (k, v) in sorted(args.items()):
opt_file.write(('%s: %s\n' % (str(k), str(v))))
opt_file.write('-------------- End ----------------\n')
return self.opt
@staticmethod
def load_opts(opt, exp_name):
exp_dir = os.path.join(opt.savepath, exp_name)
with open(os.path.join(exp_dir, 'opt.txt'), 'r') as opt_file:
for aLine in opt_file.readlines():
idx = aLine.find(':')
if (idx == (- 1)):
continue
else:
cur_opt = aLine[:idx]
cur_val = aLine[(idx + 2):(- 1)]
if (cur_opt == 'model'):
opt.model = cur_val
elif (cur_opt == 'dataroot'):
opt.dataroot = cur_val
elif (cur_opt == 'savepath'):
opt.savepath = cur_val
elif (cur_opt == 'nEpoch'):
opt.savepath = cur_val
elif (cur_opt == 'lr'):
opt.lr = float(cur_val)
elif (cur_opt == 'disp_div_N'):
opt.disp_div_N = int(cur_val)
elif (cur_opt == 'batchSize'):
opt.batchSize = int(cur_val)
elif (cur_opt == 'input_nc'):
opt.input_nc = int(cur_val)
elif (cur_opt == 'gpu_ids'):
cur_val = cur_val[1:(- 1)]
opt.gpu_ids = [int(cur_val)]
print('Use GPU id......')
elif (cur_opt == 'name'):
opt.name = cur_val
elif (cur_opt == 'use_residual'):
opt.use_residual = (cur_val == 'True')
elif (cur_opt == 'no_flip'):
opt.use_residual = (cur_val == 'True')
elif (cur_opt == 'lambda_cost'):
opt.lambda_cost = float(cur_val)
elif (cur_opt == 'weight_decay'):
opt.weight_decay = float(cur_val)
elif (cur_opt == 'use_dropout'):
opt.use_dropout = (cur_val == 'True')
elif (cur_opt == 'optimizer'):
opt.optimizer = cur_val
elif (cur_opt == 'ri'):
opt.ri = (cur_val == 'True')
elif (cur_opt == 'normalize'):
opt.normalize = (cur_val == 'True')
else:
st()
return opt
|
class TrainingInstance(object):
'A single training instance (sentence pair).'
def __init__(self, tokens):
self.tokens = tokens
self.input_tokens = tokens
self.target_tokens = tokens
def __str__(self):
s = ''
s += ('tokens: %s\n' % ' '.join([tokenization.printable_text(x) for x in self.tokens]))
s += '\n'
return s
def __repr__(self):
return self.__str__()
|
def write_instance_to_example_files(instances, word_to_id, max_seq_length, output_files):
'Create TF example files from `TrainingInstance`s.'
writers = []
for output_file in output_files:
writers.append(tf.python_io.TFRecordWriter(output_file))
writer_index = 0
total_written = 0
for (inst_index, instance) in enumerate(instances):
input_ids = [word_to_id[token] for token in instance.input_tokens]
target_ids = [word_to_id[token] for token in instance.target_tokens]
input_mask = ([1] * len(input_ids))
assert (len(input_ids) <= max_seq_length)
while (len(input_ids) < max_seq_length):
input_ids.append(0)
target_ids.append(0)
input_mask.append(0)
assert (len(input_ids) == max_seq_length)
assert (len(target_ids) == max_seq_length)
assert (len(input_mask) == max_seq_length)
features = collections.OrderedDict()
features['input_ids'] = create_int_feature(input_ids)
features['target_ids'] = create_int_feature(target_ids)
features['input_mask'] = create_int_feature(input_mask)
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writers[writer_index].write(tf_example.SerializeToString())
writer_index = ((writer_index + 1) % len(writers))
total_written += 1
if (inst_index < 20):
tf.logging.info('*** Example ***')
tf.logging.info(('tokens: %s' % ' '.join([tokenization.printable_text(x) for x in instance.tokens])))
for feature_name in features.keys():
feature = features[feature_name]
values = []
if feature.int64_list.value:
values = feature.int64_list.value
elif feature.float_list.value:
values = feature.float_list.value
tf.logging.info(('%s: %s' % (feature_name, ' '.join([str(x) for x in values]))))
for writer in writers:
writer.close()
tf.logging.info('Wrote %d total instances', total_written)
|
def create_int_feature(values):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
|
def create_float_feature(values):
feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return feature
|
def create_training_instances(all_tokens, vocab_words, max_seq_length, rng):
'Create `TrainingInstance`s from raw text.'
rng.shuffle(all_tokens)
instances = []
print('Process of "create_training_instances"')
for tokens in all_tokens:
instances.append(create_instances_from_sentence(tokens, max_seq_length, rng))
rng.shuffle(instances)
print('finised')
return instances
|
def create_instances_from_sentence(tokens, max_seq_length, rng):
'Creates `TrainingInstance`s for a single sentence.'
max_num_tokens = (max_seq_length - 2)
assert (len(tokens) >= 1)
if (len(tokens) >= max_num_tokens):
truncate_seq(tokens, max_num_tokens, rng)
if (tokens[0] is not '[SOS]'):
tokens.insert(0, '[SOS]')
if (tokens[(- 1)] is not '[EOS]'):
tokens.append('[EOS]')
instance = TrainingInstance(tokens)
return instance
|
def truncate_seq(tokens, max_num_tokens, rng):
'Truncates a sequence to a maximum sequence length.'
while True:
total_length = len(tokens)
if (total_length <= max_num_tokens):
break
trunc_tokens = tokens
assert (len(trunc_tokens) >= 1)
if (rng.random() < 0.5):
del trunc_tokens[0]
else:
trunc_tokens.pop()
|
def read_all_sentences(input_files):
all_sentences = []
for input_file in input_files:
with open(input_file, 'r') as reader:
for line in reader.readlines():
line = line.strip()
if (not line):
continue
else:
all_sentences.append(line)
return all_sentences
|
def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):
'Creates an optimizer training op.'
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
learning_rate = tf.train.polynomial_decay(learning_rate, global_step, num_train_steps, end_learning_rate=0.0, power=1.0, cycle=False)
if num_warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = (global_steps_float / warmup_steps_float)
warmup_learning_rate = (init_lr * warmup_percent_done)
is_warmup = tf.cast((global_steps_int < warmup_steps_int), tf.float32)
learning_rate = (((1.0 - is_warmup) * learning_rate) + (is_warmup * warmup_learning_rate))
optimizer = AdamWeightDecayOptimizer(learning_rate=learning_rate, weight_decay_rate=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-06, exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'])
if use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=global_step)
new_global_step = (global_step + 1)
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return (train_op, learning_rate)
|
class AdamWeightDecayOptimizer(tf.train.Optimizer):
'A basic Adam optimizer that includes "correct" L2 weight decay.'
def __init__(self, learning_rate, weight_decay_rate=0.0, beta_1=0.9, beta_2=0.999, epsilon=1e-06, exclude_from_weight_decay=None, name='AdamWeightDecayOptimizer'):
'Constructs a AdamWeightDecayOptimizer.'
super(AdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
'See base class.'
assignments = []
for (grad, param) in grads_and_vars:
if ((grad is None) or (param is None)):
continue
param_name = self._get_variable_name(param.name)
m = tf.get_variable(name=(param_name + '/adam_m'), shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())
v = tf.get_variable(name=(param_name + '/adam_v'), shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())
next_m = (tf.multiply(self.beta_1, m) + tf.multiply((1.0 - self.beta_1), grad))
next_v = (tf.multiply(self.beta_2, v) + tf.multiply((1.0 - self.beta_2), tf.square(grad)))
update = (next_m / (tf.sqrt(next_v) + self.epsilon))
if self._do_use_weight_decay(param_name):
update += (self.weight_decay_rate * param)
update_with_lr = (self.learning_rate * update)
next_param = (param - update_with_lr)
assignments.extend([param.assign(next_param), m.assign(next_m), v.assign(next_v)])
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
'Whether to use L2 weight decay for `param_name`.'
if (not self.weight_decay_rate):
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if (re.search(r, param_name) is not None):
return False
return True
def _get_variable_name(self, param_name):
'Get the variable name from the tensor name.'
m = re.match('^(.*):\\d+$', param_name)
if (m is not None):
param_name = m.group(1)
return param_name
|
def model_fn_builder(config, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings):
'Returns `model_fn` closure for TPUEstimator.'
def model_fn(features, labels, mode, params):
'The `model_fn` for TPUEstimator.'
tf.logging.info('*** Features ***')
for name in sorted(features.keys()):
tf.logging.info((' name = %s, shape = %s' % (name, features[name].shape)))
input_ids = features['input_ids']
target_ids = features['target_ids']
input_mask = features['input_mask']
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
model = modeling.BertModel(config=config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, use_one_hot_embeddings=use_one_hot_embeddings)
(lm_loss, lm_example_loss, lm_log_probs) = get_lm_output(config, model.get_sequence_output(), model.get_embedding_table(), target_ids, input_mask)
total_loss = lm_loss
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info('**** Trainable Variables ****')
for var in tvars:
init_string = ''
if (var.name in initialized_variable_names):
init_string = ', *INIT_FROM_CKPT*'
tf.logging.info(' name = %s, shape = %s%s', var.name, var.shape, init_string)
output_spec = None
if (mode == tf.estimator.ModeKeys.TRAIN):
(train_op, _lr) = optimization.create_optimizer(total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
tf.summary.scalar('learning_rate', _lr)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn)
elif (mode == tf.estimator.ModeKeys.EVAL):
def metric_fn(lm_example_loss, lm_log_probs):
'Computes the loss and accuracy of the model.'
lm_log_probs = tf.reshape(lm_log_probs, [(- 1), lm_log_probs.shape[(- 1)]])
lm_predictions = tf.argmax(lm_log_probs, axis=(- 1), output_type=tf.int32)
lm_example_loss = tf.reshape(lm_example_loss, [(- 1)])
lm_mean_loss = tf.metrics.mean(values=lm_example_loss)
return {'lm_loss': lm_mean_loss}
eval_metrics = (metric_fn, [lm_example_loss, lm_log_probs])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn)
else:
raise ValueError(('Only TRAIN and EVAL modes are supported: %s' % mode))
return output_spec
return model_fn
|
def get_lm_output(config, input_tensor, output_weights, label_ids, label_mask):
'Get loss and log probs for the LM.'
input_shape = modeling.get_shape_list(input_tensor, expected_rank=3)
input_tensor = tf.reshape(input_tensor, [(input_shape[0] * input_shape[1]), input_shape[2]])
with tf.variable_scope('cls/predictions'):
with tf.variable_scope('transform'):
input_tensor = tf.layers.dense(input_tensor, units=config.hidden_size, activation=modeling.get_activation(config.hidden_act), kernel_initializer=modeling.create_initializer(config.initializer_range))
input_tensor = modeling.layer_norm(input_tensor)
output_bias = tf.get_variable('output_bias', shape=[config.vocab_size], initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=(- 1))
label_ids = tf.reshape(label_ids, [(- 1)])
one_hot_labels = tf.one_hot(label_ids, depth=config.vocab_size, dtype=tf.float32)
per_example_loss = (- tf.reduce_sum((log_probs * one_hot_labels), axis=[(- 1)]))
label_mask = tf.reshape(label_mask, [(input_shape[0] * input_shape[1])])
loss_mask = tf.dtypes.cast(label_mask, tf.float32)
per_example_loss = tf.math.multiply(per_example_loss, loss_mask)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, log_probs)
|
def input_fn_builder(input_files, max_seq_length, is_training, num_cpu_threads=4):
'Creates an `input_fn` closure to be passed to TPUEstimator.'
def input_fn(params):
'The actual input function.'
batch_size = params['batch_size']
name_to_features = {'input_ids': tf.FixedLenFeature([max_seq_length], tf.int64), 'target_ids': tf.FixedLenFeature([max_seq_length], tf.int64), 'input_mask': tf.FixedLenFeature([max_seq_length], tf.int64)}
if is_training:
d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))
d = d.repeat()
d = d.shuffle(buffer_size=len(input_files))
cycle_length = min(num_cpu_threads, len(input_files))
d = d.apply(tf.contrib.data.parallel_interleave(tf.data.TFRecordDataset, sloppy=is_training, cycle_length=cycle_length))
d = d.shuffle(buffer_size=100)
else:
d = tf.data.TFRecordDataset(input_files)
d = d.repeat()
d = d.apply(tf.contrib.data.map_and_batch((lambda record: _decode_record(record, name_to_features)), batch_size=batch_size, num_parallel_batches=num_cpu_threads, drop_remainder=True))
return d
return input_fn
|
def _decode_record(record, name_to_features):
'Decodes a record to a TensorFlow example.'
example = tf.parse_single_example(record, name_to_features)
for name in list(example.keys()):
t = example[name]
if (t.dtype == tf.int64):
t = tf.to_int32(t)
example[name] = t
return example
|
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
if ((not FLAGS.do_train) and (not FLAGS.do_eval)):
raise ValueError('At least one of `do_train` or `do_eval` must be True.')
config = modeling.BertConfig.from_json_file(FLAGS.config_file)
tf.gfile.MakeDirs(FLAGS.output_dir)
src = FLAGS.config_file
dst = os.path.join(FLAGS.output_dir, FLAGS.config_file.split('/')[(- 1)])
os.system(f'cp {src} {dst}')
src = os.path.join(FLAGS.config_file.split('/')[0], config.vocab_file)
dst = os.path.join(FLAGS.output_dir, config.vocab_file)
os.system(f'cp {src} {dst}')
input_files = []
for input_pattern in FLAGS.input_file.split(','):
input_files.extend(tf.gfile.Glob(input_pattern))
tf.logging.info('*** Input Files ***')
for input_file in input_files:
tf.logging.info((' %s' % input_file))
eval_input_files = []
for eval_input_pattern in FLAGS.eval_input_file.split(','):
eval_input_files.extend(tf.gfile.Glob(eval_input_pattern))
tf.logging.info('*** Eval Files ***')
for eval_input_file in eval_input_files:
tf.logging.info((' %s' % eval_input_file))
tpu_cluster_resolver = None
if (FLAGS.use_tpu and FLAGS.tpu_name):
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host))
model_fn = model_fn_builder(config=config, init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=FLAGS.num_train_steps, num_warmup_steps=FLAGS.num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu)
estimator = tf.contrib.tpu.TPUEstimator(use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size)
if FLAGS.do_train:
tf.logging.info('***** Running training *****')
tf.logging.info(' Batch size = %d', FLAGS.train_batch_size)
train_input_fn = input_fn_builder(input_files=input_files, max_seq_length=FLAGS.max_seq_length, is_training=True)
if FLAGS.do_eval:
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)
eval_input_fn = input_fn_builder(input_files=eval_input_files, max_seq_length=FLAGS.max_seq_length, is_training=False)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn, steps=FLAGS.max_eval_steps)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
else:
estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)
if FLAGS.do_eval:
tf.logging.info('***** Running evaluation *****')
tf.logging.info(' Batch size = %d', FLAGS.eval_batch_size)
eval_input_fn = input_fn_builder(input_files=eval_input_files, max_seq_length=FLAGS.max_seq_length, is_training=False)
result = estimator.evaluate(input_fn=eval_input_fn, steps=FLAGS.max_eval_steps)
output_eval_input_file = os.path.join(FLAGS.output_dir, 'eval_results.txt')
with tf.gfile.GFile(output_eval_input_file, 'w') as writer:
tf.logging.info('***** Eval results *****')
for key in sorted(result.keys()):
tf.logging.info(' %s = %s', key, str(result[key]))
writer.write(('%s = %s\n' % (key, str(result[key]))))
|
class TestingInstance(object):
'A single test instance (sentence pair).'
def __init__(self, tokens):
self.tokens = tokens
self.input_tokens = tokens
self.target_tokens = tokens
def __str__(self):
s = ''
s += ('tokens: %s\n' % ' '.join([tokenization.printable_text(x) for x in self.tokens]))
s += '\n'
return s
def __repr__(self):
return self.__str__()
|
def create_testing_instances(sentence, tokenizer, max_seq_length=128):
'Create `TestInstance`s from raw text.'
max_token_num = (max_seq_length - 2)
tokens = tokenizer.tokenize(sentence)
if (len(tokens) > max_token_num):
tokens = tokens[:max_token_num]
if (tokens[0] is not '[SOS]'):
tokens.insert(0, '[SOS]')
if (tokens[(- 1)] is not '[EOS]'):
tokens.append('[EOS]')
instances = []
instances.append(create_instances_from_tokens(tokens))
return instances
|
def create_instances_from_tokens(tokens):
'Creates `TestInstance`s for a single sentence.'
instance = TestingInstance(tokens)
return instance
|
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
'Checks whether the casing config is consistent with the checkpoint name.'
if (not init_checkpoint):
return
m = re.match('^.*?([A-Za-z0-9_-]+)/bert_model.ckpt', init_checkpoint)
if (m is None):
return
model_name = m.group(1)
lower_models = ['uncased_L-24_H-1024_A-16', 'uncased_L-12_H-768_A-12', 'multilingual_L-12_H-768_A-12', 'chinese_L-12_H-768_A-12']
cased_models = ['cased_L-12_H-768_A-12', 'cased_L-24_H-1024_A-16', 'multi_cased_L-12_H-768_A-12']
is_bad_config = False
if ((model_name in lower_models) and (not do_lower_case)):
is_bad_config = True
actual_flag = 'False'
case_name = 'lowercased'
opposite_flag = 'True'
if ((model_name in cased_models) and do_lower_case):
is_bad_config = True
actual_flag = 'True'
case_name = 'cased'
opposite_flag = 'False'
if is_bad_config:
raise ValueError(('You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. However, `%s` seems to be a %s model, so you should pass in `--do_lower_case=%s` so that the fine-tuning matches how the model was pre-training. If this error is wrong, please just comment out this check.' % (actual_flag, init_checkpoint, model_name, case_name, opposite_flag)))
|
def convert_to_unicode(text):
"Converts `text` to Unicode (if it's not already), assuming utf-8 input."
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode('utf-8', 'ignore')
else:
raise ValueError(('Unsupported string type: %s' % type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode('utf-8', 'ignore')
elif isinstance(text, unicode):
return text
else:
raise ValueError(('Unsupported string type: %s' % type(text)))
else:
raise ValueError('Not running on Python2 or Python 3?')
|
def printable_text(text):
'Returns text encoded in a way suitable for print or `tf.logging`.'
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode('utf-8', 'ignore')
else:
raise ValueError(('Unsupported string type: %s' % type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode('utf-8')
else:
raise ValueError(('Unsupported string type: %s' % type(text)))
else:
raise ValueError('Not running on Python2 or Python 3?')
|
def load_vocab(vocab_file):
'Loads a vocabulary file into a dictionary.'
vocab = collections.OrderedDict()
index = 0
with tf.gfile.GFile(vocab_file, 'r') as reader:
while True:
token = convert_to_unicode(reader.readline())
if (not token):
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
|
def convert_by_vocab(vocab, items):
'Converts a sequence of [tokens|ids] using the vocab.'
output = []
for item in items:
output.append(vocab[item])
return output
|
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
|
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
|
def whitespace_tokenize(text):
'Runs basic whitespace cleaning and splitting on a piece of text.'
text = text.strip()
if (not text):
return []
tokens = text.split()
return tokens
|
class FullTokenizer(object):
'Runs end-to-end tokenziation.'
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for (k, v) in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
|
class BasicTokenizer(object):
'Runs basic tokenization (punctuation splitting, lower casing, etc.).'
def __init__(self, do_lower_case=True):
'Constructs a BasicTokenizer.\n\n Args:\n do_lower_case: Whether to lower case the input.\n '
self.do_lower_case = do_lower_case
def tokenize(self, text):
'Tokenizes a piece of text.'
text = convert_to_unicode(text)
text = self._clean_text(text)
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(' '.join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
'Strips accents from a piece of text.'
text = unicodedata.normalize('NFD', text)
output = []
for char in text:
cat = unicodedata.category(char)
if (cat == 'Mn'):
continue
output.append(char)
return ''.join(output)
def _run_split_on_punc(self, text):
'Splits punctuation on a piece of text.'
chars = list(text)
i = 0
start_new_word = True
output = []
while (i < len(chars)):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[(- 1)].append(char)
i += 1
return [''.join(x) for x in output]
def _tokenize_chinese_chars(self, text):
'Adds whitespace around any CJK character.'
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(' ')
output.append(char)
output.append(' ')
else:
output.append(char)
return ''.join(output)
def _is_chinese_char(self, cp):
'Checks whether CP is the codepoint of a CJK character.'
if (((cp >= 19968) and (cp <= 40959)) or ((cp >= 13312) and (cp <= 19903)) or ((cp >= 131072) and (cp <= 173791)) or ((cp >= 173824) and (cp <= 177983)) or ((cp >= 177984) and (cp <= 178207)) or ((cp >= 178208) and (cp <= 183983)) or ((cp >= 63744) and (cp <= 64255)) or ((cp >= 194560) and (cp <= 195103))):
return True
return False
def _clean_text(self, text):
'Performs invalid character removal and whitespace cleanup on text.'
output = []
for char in text:
cp = ord(char)
if ((cp == 0) or (cp == 65533) or _is_control(char)):
continue
if _is_whitespace(char):
output.append(' ')
else:
output.append(char)
return ''.join(output)
|
class WordpieceTokenizer(object):
'Runs WordPiece tokenziation.'
def __init__(self, vocab, unk_token='[UNK]', max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
'Tokenizes a piece of text into its word pieces.\n\n This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = "unaffable"\n output = ["un", "##aff", "##able"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have\n already been passed through `BasicTokenizer.\n\n Returns:\n A list of wordpiece tokens.\n '
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if (len(chars) > self.max_input_chars_per_word):
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while (start < len(chars)):
end = len(chars)
cur_substr = None
while (start < end):
substr = ''.join(chars[start:end])
if (start > 0):
substr = ('##' + substr)
if (substr in self.vocab):
cur_substr = substr
break
end -= 1
if (cur_substr is None):
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
|
def _is_whitespace(char):
'Checks whether `chars` is a whitespace character.'
if ((char == ' ') or (char == '\t') or (char == '\n') or (char == '\r')):
return True
cat = unicodedata.category(char)
if (cat == 'Zs'):
return True
return False
|
def _is_control(char):
'Checks whether `chars` is a control character.'
if ((char == '\t') or (char == '\n') or (char == '\r')):
return False
cat = unicodedata.category(char)
if cat.startswith('C'):
return True
return False
|
def _is_punctuation(char):
'Checks whether `chars` is a punctuation character.'
cp = ord(char)
if (((cp >= 33) and (cp <= 47)) or ((cp >= 58) and (cp <= 64)) or ((cp >= 91) and (cp <= 96)) or ((cp >= 123) and (cp <= 126))):
return True
cat = unicodedata.category(char)
if cat.startswith('P'):
return True
return False
|
def path2gt(file_path, dataset):
if (dataset == 'GTZAN'):
return gtzan_path2gt(file_path)
elif (dataset == 'Ballroom'):
return ballroom_path2gt(file_path)
elif (dataset == 'ExtendedBallroom'):
return extended_ballroom_path2gt(file_path)
elif (dataset == 'UrbanSound8K'):
return urban_sound_path2gt(file_path)
else:
import ipdb
ipdb.set_trace()
|
def gtzan_path2gt(file_path):
tag = file_path[(file_path.rfind('/') + 1):file_path.rfind('.', 0, (- 4))]
print(tag)
if (tag == 'blues'):
return 0
elif (tag == 'classical'):
return 1
elif (tag == 'country'):
return 2
elif (tag == 'disco'):
return 3
elif (tag == 'hiphop'):
return 4
elif (tag == 'jazz'):
return 5
elif (tag == 'metal'):
return 6
elif (tag == 'pop'):
return 7
elif (tag == 'reggae'):
return 8
elif (tag == 'rock'):
return 9
else:
print((('Warning: did not find the corresponding ground truth (' + str(tag)) + ').'))
import ipdb
ipdb.set_trace()
|
def ballroom_path2gt(file_path):
cut_end = file_path[:file_path.rfind('/')]
tag = cut_end[(cut_end.rfind('/') + 1):]
print(tag)
if (tag == 'ChaChaCha'):
return 0
elif (tag == 'Jive'):
return 1
elif (tag == 'Quickstep'):
return 2
elif (tag == 'Rumba'):
return 3
elif (tag == 'Samba'):
return 4
elif (tag == 'Tango'):
return 5
elif (tag == 'VienneseWaltz'):
return 6
elif (tag == 'Waltz'):
return 7
else:
print((('Warning: did not find the corresponding ground truth (' + str(tag)) + ').'))
import ipdb
ipdb.set_trace()
|
def extended_ballroom_path2gt(file_path):
cut_end = file_path[:file_path.rfind('/')]
tag = cut_end[(cut_end.rfind('/') + 1):]
print(tag)
if (tag == 'Chacha'):
return 0
elif (tag == 'Foxtrot'):
return 1
elif (tag == 'Jive'):
return 2
elif (tag == 'Pasodoble'):
return 3
elif (tag == 'Quickstep'):
return 4
elif (tag == 'Rumba'):
return 5
elif (tag == 'Salsa'):
return 6
elif (tag == 'Samba'):
return 7
elif (tag == 'Slowwaltz'):
return 8
elif (tag == 'Tango'):
return 9
elif (tag == 'Viennesewaltz'):
return 10
elif (tag == 'Waltz'):
return 11
elif (tag == 'Wcswing'):
return 12
else:
print((('Warning: did not find the corresponding ground truth (' + str(tag)) + ').'))
import ipdb
ipdb.set_trace()
|
def urban_sound_path2gt(file_path):
tag = file_path[(file_path.rfind('/') + 1):]
print(tag)
df = pd.read_csv('/datasets/MTG/users/jpons/urban_sounds/UrbanSound8K/metadata/UrbanSound8K.csv')
return int(df[(df.slice_file_name == tag)].classID)
|
def build(config, x_in):
if (config['CNN']['architecture'] == 'cnn_small_filters'):
return cnn_small_filters(config, x_in)
elif (config['CNN']['architecture'] == 'cnn_single'):
return cnn_single(config, x_in)
elif (config['CNN']['architecture'] == 'cnn_music'):
return cnn_music(config, x_in)
elif (config['CNN']['architecture'] == 'sample_level'):
return sample_level(config, x_in)
elif (config['CNN']['architecture'] == 'frame_level'):
return frame_level(config, x_in)
elif (config['CNN']['architecture'] == 'frame_level_many'):
return frame_level_many(config, x_in)
elif (config['CNN']['architecture'] == 'cnn_audio'):
return cnn_audio(config, x_in)
|
def cnn_small_filters(config, x_in):
with tf.name_scope('cnn_small_filters'):
print(('[SMALL FILTERS] Input: ' + str(x_in.get_shape)))
input_layer = tf.reshape(x_in, [(- 1), config['CNN']['n_frames'], config['CNN']['n_mels'], 1])
conv1 = tf.layers.conv2d(inputs=input_layer, filters=config['CNN']['num_filters'], kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='1CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[4, 2], strides=[4, 2])
conv2 = tf.layers.conv2d(inputs=pool1, filters=config['CNN']['num_filters'], kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='2CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[4, 3], strides=[4, 3])
conv3 = tf.layers.conv2d(inputs=pool2, filters=config['CNN']['num_filters'], kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='3CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[5, 2], strides=[5, 2])
conv4 = tf.layers.conv2d(inputs=pool3, filters=config['CNN']['num_filters'], kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='4CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool4 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[4, 2], strides=[4, 2])
conv5 = tf.layers.conv2d(inputs=pool4, filters=config['CNN']['num_filters'], kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='5CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool5 = tf.layers.max_pooling2d(inputs=conv5, pool_size=[4, 4], strides=[4, 4])
print(pool1.get_shape)
print(pool2.get_shape)
print(pool3.get_shape)
print(pool4.get_shape)
print(pool5.get_shape)
return [pool1, pool2, pool3, pool4, pool5]
|
def cnn_single(config, x_in):
with tf.name_scope('cnn_single'):
print(('[CNN SINGLE] Input: ' + str(x_in.get_shape)))
input_layer = tf.reshape(x_in, [(- 1), config['CNN']['n_frames'], config['CNN']['n_mels'], 1])
conv1 = tf.layers.conv2d(inputs=input_layer, filters=config['CNN']['num_filters'], kernel_size=config['CNN']['filter_shape'], padding='valid', activation=tf.nn.relu, name='1CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=config['CNN']['pool_shape'], strides=config['CNN']['pool_shape'])
print(conv1.get_shape)
print(pool1.get_shape)
return [conv1, pool1]
|
def cnn_music(config, x_in):
if (config['CNN']['num_filters'] == 256):
remove = 64
elif (config['CNN']['num_filters'] == 128):
remove = 32
elif (config['CNN']['num_filters'] == 64):
remove = 16
elif (config['CNN']['num_filters'] == 32):
remove = 8
elif (config['CNN']['num_filters'] == 16):
remove = 4
elif (config['CNN']['num_filters'] == 8):
remove = 2
elif (config['CNN']['num_filters'] == 4):
remove = 1
with tf.name_scope('cnn_music'):
print(('[MUSIC] Input: ' + str(x_in.get_shape)))
input_layer = tf.reshape(x_in, [(- 1), config['CNN']['n_frames'], config['CNN']['n_mels'], 1])
input_pad_7 = tf.pad(input_layer, [[0, 0], [3, 3], [0, 0], [0, 0]], 'CONSTANT')
input_pad_3 = tf.pad(input_layer, [[0, 0], [1, 1], [0, 0], [0, 0]], 'CONSTANT')
conv1 = tf.layers.conv2d(inputs=input_pad_7, filters=config['CNN']['num_filters'], kernel_size=[7, int((0.9 * config['CNN']['n_mels']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[1, conv1.shape[2]], strides=[1, conv1.shape[2]])
p1 = tf.squeeze(pool1, [2])
conv2 = tf.layers.conv2d(inputs=input_pad_3, filters=(config['CNN']['num_filters'] * 2), kernel_size=[3, int((0.9 * config['CNN']['n_mels']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[1, conv2.shape[2]], strides=[1, conv2.shape[2]])
p2 = tf.squeeze(pool2, [2])
conv3 = tf.layers.conv2d(inputs=input_layer, filters=(config['CNN']['num_filters'] * 4), kernel_size=[1, int((0.9 * config['CNN']['n_mels']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[1, conv3.shape[2]], strides=[1, conv3.shape[2]])
p3 = tf.squeeze(pool3, [2])
conv4 = tf.layers.conv2d(inputs=input_pad_7, filters=config['CNN']['num_filters'], kernel_size=[7, int((0.4 * config['CNN']['n_mels']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool4 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[1, conv4.shape[2]], strides=[1, conv4.shape[2]])
p4 = tf.squeeze(pool4, [2])
conv5 = tf.layers.conv2d(inputs=input_pad_3, filters=(config['CNN']['num_filters'] * 2), kernel_size=[3, int((0.4 * config['CNN']['n_mels']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool5 = tf.layers.max_pooling2d(inputs=conv5, pool_size=[1, conv5.shape[2]], strides=[1, conv5.shape[2]])
p5 = tf.squeeze(pool5, [2])
conv6 = tf.layers.conv2d(inputs=input_layer, filters=(config['CNN']['num_filters'] * 4), kernel_size=[1, int((0.4 * config['CNN']['n_mels']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool6 = tf.layers.max_pooling2d(inputs=conv6, pool_size=[1, conv6.shape[2]], strides=[1, conv6.shape[2]])
p6 = tf.squeeze(pool6, [2])
pool7 = tf.layers.average_pooling2d(inputs=input_layer, pool_size=[1, config['CNN']['n_mels']], strides=[1, config['CNN']['n_mels']])
pool7_rs = tf.squeeze(pool7, [3])
conv7 = tf.layers.conv1d(inputs=pool7_rs, filters=(config['CNN']['num_filters'] - remove), kernel_size=165, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool8 = tf.layers.average_pooling2d(inputs=input_layer, pool_size=[1, config['CNN']['n_mels']], strides=[1, config['CNN']['n_mels']])
pool8_rs = tf.squeeze(pool8, [3])
conv8 = tf.layers.conv1d(inputs=pool8_rs, filters=((config['CNN']['num_filters'] * 2) - remove), kernel_size=128, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool9 = tf.layers.average_pooling2d(inputs=input_layer, pool_size=[1, config['CNN']['n_mels']], strides=[1, config['CNN']['n_mels']])
pool9_rs = tf.squeeze(pool9, [3])
conv9 = tf.layers.conv1d(inputs=pool9_rs, filters=((config['CNN']['num_filters'] * 4) - remove), kernel_size=64, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool10 = tf.layers.average_pooling2d(inputs=input_layer, pool_size=[1, config['CNN']['n_mels']], strides=[1, config['CNN']['n_mels']])
pool10_rs = tf.squeeze(pool10, [3])
conv10 = tf.layers.conv1d(inputs=pool10_rs, filters=((config['CNN']['num_filters'] * 8) - remove), kernel_size=32, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
timbral = tf.concat([p1, p2, p3, p4, p5, p6], 2)
temporal = tf.concat([conv7, conv8, conv9, conv10], 2)
print(timbral.get_shape)
print(temporal.get_shape)
return [timbral, temporal]
|
def backend(route_out, config):
"Function implementing the proposed back-end.\n - 'route_out': is the output of the front-end, and therefore the input of this function.\n - 'config': dictionary with some configurable parameters like: number of output units - config['numOutputNeurons']\n or number of frequency bins of the spectrogram config['setup_params']['yInput']\n "
conv1 = tf.layers.conv2d(inputs=route_out, filters=config['CNN']['num_filters'], kernel_size=[7, route_out.shape[2]], padding='valid', activation=tf.nn.relu, name='1cnnOut', kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
conv1_t = tf.transpose(conv1, [0, 1, 3, 2])
bn_conv1_pad = tf.pad(conv1_t, [[0, 0], [3, 3], [0, 0], [0, 0]], 'CONSTANT')
conv2 = tf.layers.conv2d(inputs=bn_conv1_pad, filters=config['CNN']['num_filters'], kernel_size=[7, bn_conv1_pad.shape[2]], padding='valid', activation=tf.nn.relu, name='2cnnOut', kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
conv2_t = tf.transpose(conv2, [0, 1, 3, 2])
res_conv2 = tf.add(conv2_t, conv1_t)
pool1 = tf.layers.max_pooling2d(inputs=res_conv2, pool_size=[2, 1], strides=[2, 1], name='poolOut')
bn_conv4_pad = tf.pad(pool1, [[0, 0], [3, 3], [0, 0], [0, 0]], 'CONSTANT')
conv5 = tf.layers.conv2d(inputs=bn_conv4_pad, filters=config['CNN']['num_filters'], kernel_size=[7, bn_conv4_pad.shape[2]], padding='valid', activation=tf.nn.relu, name='3cnnOut', kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
conv5_t = tf.transpose(conv5, [0, 1, 3, 2])
res_conv5 = tf.add(conv5_t, pool1)
return [conv1_t, res_conv2, res_conv5]
|
def sample_level(config, x_in):
'Function implementing the front-end proposed by Lee et al. 2017.\n Lee, et al. "Sample-level Deep Convolutional Neural Networks for Music Auto-tagging Using Raw Waveforms." \n arXiv preprint arXiv:1703.01789 (2017).\n - \'x\': placeholder whith the input.\n - \'is_training\': placeholder indicating weather it is training or test phase, for dropout or batch norm.\n '
conv0 = tf.layers.conv1d(inputs=x_in, filters=config['CNN']['num_filters'], kernel_size=3, strides=3, padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
conv1 = tf.layers.conv1d(inputs=conv0, filters=config['CNN']['num_filters'], kernel_size=3, strides=1, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool_1 = tf.layers.max_pooling1d(conv1, pool_size=3, strides=3)
conv2 = tf.layers.conv1d(inputs=pool_1, filters=config['CNN']['num_filters'], kernel_size=3, strides=1, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool_2 = tf.layers.max_pooling1d(conv2, pool_size=3, strides=3)
conv3 = tf.layers.conv1d(inputs=pool_2, filters=config['CNN']['num_filters'], kernel_size=3, strides=1, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool_3 = tf.layers.max_pooling1d(conv3, pool_size=3, strides=3)
conv4 = tf.layers.conv1d(inputs=pool_3, filters=config['CNN']['num_filters'], kernel_size=3, strides=1, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool_4 = tf.layers.max_pooling1d(conv4, pool_size=3, strides=3)
conv5 = tf.layers.conv1d(inputs=pool_4, filters=config['CNN']['num_filters'], kernel_size=3, strides=1, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool_5 = tf.layers.max_pooling1d(conv5, pool_size=3, strides=3)
conv6 = tf.layers.conv1d(inputs=pool_5, filters=config['CNN']['num_filters'], kernel_size=3, strides=1, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool_6 = tf.layers.max_pooling1d(conv6, pool_size=3, strides=3)
print(pool_1.get_shape)
print(pool_2.get_shape)
print(pool_3.get_shape)
print(pool_4.get_shape)
print(pool_5.get_shape)
print(pool_6.get_shape)
return [conv0, pool_1, pool_2, pool_3, pool_4, pool_5, pool_6]
|
def frame_level(config, x_in):
conv1 = tf.layers.conv1d(inputs=x_in, filters=config['CNN']['num_filters'], kernel_size=512, strides=32, padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
front_end_out = tf.expand_dims(conv1, 3)
[end_c1, end_cr2, end_cr3] = backend(front_end_out, config)
print(conv1.get_shape)
print(end_c1.get_shape)
print(end_cr2.get_shape)
print(end_cr3.get_shape)
return [conv1, end_c1, end_cr2, end_cr3]
|
def frame_level_many(config, x_in):
conv0 = tf.layers.conv1d(inputs=x_in, filters=config['CNN']['num_filters'], kernel_size=512, strides=32, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
conv1 = tf.layers.conv1d(inputs=x_in, filters=config['CNN']['num_filters'], kernel_size=256, strides=32, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
conv2 = tf.layers.conv1d(inputs=x_in, filters=config['CNN']['num_filters'], kernel_size=128, strides=32, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
conv3 = tf.layers.conv1d(inputs=x_in, filters=config['CNN']['num_filters'], kernel_size=64, strides=32, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
conv4 = tf.layers.conv1d(inputs=x_in, filters=config['CNN']['num_filters'], kernel_size=32, strides=32, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
many = tf.concat([conv0, conv1, conv2, conv3, conv4], 2)
front_end_out = tf.expand_dims(many, 3)
[end_c1, end_cr2, end_cr3] = backend(front_end_out, config)
print(x_in.get_shape)
print(conv0.get_shape)
print(conv1.get_shape)
print(conv2.get_shape)
print(conv3.get_shape)
print(conv4.get_shape)
print(end_c1.get_shape)
print(end_cr2.get_shape)
print(end_cr3.get_shape)
return [conv0, conv1, conv2, conv3, conv4, end_c1, end_cr2, end_cr3]
|
def cnn_audio(config, x_in):
if (config['CNN']['num_filters'] == 256):
remove = 64
elif (config['CNN']['num_filters'] == 128):
remove = 32
elif (config['CNN']['num_filters'] == 64):
remove = 16
elif (config['CNN']['num_filters'] == 32):
remove = 8
elif (config['CNN']['num_filters'] == 16):
remove = 4
elif (config['CNN']['num_filters'] == 8):
remove = 2
elif (config['CNN']['num_filters'] == 4):
remove = 1
with tf.name_scope('cnn_audio'):
print(('[AUDIO!] Input: ' + str(x_in.get_shape)))
input_layer = tf.reshape(x_in, [(- 1), config['CNN']['n_frames'], config['CNN']['n_mels'], 1])
input_pad_7 = tf.pad(input_layer, [[0, 0], [3, 3], [0, 0], [0, 0]], 'CONSTANT')
input_pad_3 = tf.pad(input_layer, [[0, 0], [1, 1], [0, 0], [0, 0]], 'CONSTANT')
conv1 = tf.layers.conv2d(inputs=input_pad_7, filters=config['CNN']['num_filters'], kernel_size=[7, int((0.9 * config['CNN']['n_mels']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[1, conv1.shape[2]], strides=[1, conv1.shape[2]])
p1 = tf.squeeze(pool1, [2])
conv2 = tf.layers.conv2d(inputs=input_pad_3, filters=(config['CNN']['num_filters'] * 2), kernel_size=[3, int((0.9 * config['CNN']['n_mels']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[1, conv2.shape[2]], strides=[1, conv2.shape[2]])
p2 = tf.squeeze(pool2, [2])
conv3 = tf.layers.conv2d(inputs=input_layer, filters=(config['CNN']['num_filters'] * 4), kernel_size=[1, int((0.9 * config['CNN']['n_mels']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[1, conv3.shape[2]], strides=[1, conv3.shape[2]])
p3 = tf.squeeze(pool3, [2])
conv4 = tf.layers.conv2d(inputs=input_pad_7, filters=config['CNN']['num_filters'], kernel_size=[7, int((0.4 * config['CNN']['n_mels']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool4 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[1, conv4.shape[2]], strides=[1, conv4.shape[2]])
p4 = tf.squeeze(pool4, [2])
conv5 = tf.layers.conv2d(inputs=input_pad_3, filters=(config['CNN']['num_filters'] * 2), kernel_size=[3, int((0.4 * config['CNN']['n_mels']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool5 = tf.layers.max_pooling2d(inputs=conv5, pool_size=[1, conv5.shape[2]], strides=[1, conv5.shape[2]])
p5 = tf.squeeze(pool5, [2])
conv6 = tf.layers.conv2d(inputs=input_layer, filters=(config['CNN']['num_filters'] * 4), kernel_size=[1, int((0.4 * config['CNN']['n_mels']))], padding='valid', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool6 = tf.layers.max_pooling2d(inputs=conv6, pool_size=[1, conv6.shape[2]], strides=[1, conv6.shape[2]])
p6 = tf.squeeze(pool6, [2])
pool7 = tf.layers.average_pooling2d(inputs=input_layer, pool_size=[1, config['CNN']['n_mels']], strides=[1, config['CNN']['n_mels']])
pool7_rs = tf.squeeze(pool7, [3])
conv7 = tf.layers.conv1d(inputs=pool7_rs, filters=(config['CNN']['num_filters'] - remove), kernel_size=64, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool8 = tf.layers.average_pooling2d(inputs=input_layer, pool_size=[1, config['CNN']['n_mels']], strides=[1, config['CNN']['n_mels']])
pool8_rs = tf.squeeze(pool8, [3])
conv8 = tf.layers.conv1d(inputs=pool8_rs, filters=((config['CNN']['num_filters'] * 2) - remove), kernel_size=32, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool9 = tf.layers.average_pooling2d(inputs=input_layer, pool_size=[1, config['CNN']['n_mels']], strides=[1, config['CNN']['n_mels']])
pool9_rs = tf.squeeze(pool9, [3])
conv9 = tf.layers.conv1d(inputs=pool9_rs, filters=((config['CNN']['num_filters'] * 4) - remove), kernel_size=16, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
pool10 = tf.layers.average_pooling2d(inputs=input_layer, pool_size=[1, config['CNN']['n_mels']], strides=[1, config['CNN']['n_mels']])
pool10_rs = tf.squeeze(pool10, [3])
conv10 = tf.layers.conv1d(inputs=pool10_rs, filters=((config['CNN']['num_filters'] * 8) - remove), kernel_size=8, padding='same', activation=tf.nn.relu, kernel_initializer=tf.contrib.layers.variance_scaling_initializer())
timbral = tf.concat([p1, p2, p3, p4, p5, p6], 2)
temporal = tf.concat([conv7, conv8, conv9, conv10], 2)
print(timbral.get_shape)
print(temporal.get_shape)
return [timbral, temporal]
|
class BaseELM(BaseEstimator):
'\n Base class for ELMs.\n\n Warning: This class should not be used directly.\n Use derived classes instead.\n '
__metaclass__ = ABCMeta
def __init__(self, hidden_layer, regressor):
self.regressor = regressor
self.hidden_layer = hidden_layer
@abstractmethod
def fit(self, X, y):
'\n Fit the model using X, y as training data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n\n y : array-like of shape [n_samples, n_outputs]\n Target values (class labels in classification, real numbers in\n regression)\n\n Returns\n -------\n self : object\n\n Returns an instance of self.\n '
@abstractmethod
def predict(self, X):
'\n Predict values using the model\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape [n_samples, n_features]\n\n Returns\n -------\n C : numpy array of shape [n_samples, n_outputs]\n Predicted values.\n '
|
class GenELMRegressor(BaseELM, RegressorMixin):
'\n ELMRegressor is a regressor based on the Extreme Learning Machine.\n\n An Extreme Learning Machine (ELM) is a single layer feedforward\n network with a random hidden layer components and ordinary linear\n least squares fitting of the hidden->output weights by default.\n [1][2]\n\n Parameters\n ----------\n `hidden_layer` : random_layer instance, optional\n (default=MLPRandomLayer(random_state=0))\n\n `regressor` : regressor instance, optional (default=None)\n If provided, this object is used to perform the regression from hidden\n unit activations to the outputs and subsequent predictions. If not\n present, an ordinary linear least squares fit is performed\n\n Attributes\n ----------\n `coefs_` : numpy array\n Fitted regression coefficients if no regressor supplied.\n\n `fitted_` : bool\n Flag set when fit has been called already.\n\n `hidden_activations_` : numpy array of shape [n_samples, n_hidden]\n Hidden layer activations for last input.\n\n See Also\n --------\n RBFRandomLayer, MLPRandomLayer, ELMRegressor, ELMClassifier\n\n References\n ----------\n .. [1] http://www.extreme-learning-machines.org\n .. [2] G.-B. Huang, Q.-Y. Zhu and C.-K. Siew, "Extreme Learning Machine:\n Theory and Applications", Neurocomputing, vol. 70, pp. 489-501,\n 2006.\n '
def __init__(self, hidden_layer=MLPRandomLayer(random_state=0), regressor=None):
super(GenELMRegressor, self).__init__(hidden_layer, regressor)
self.coefs_ = None
self.fitted_ = False
self.hidden_activations_ = None
def _fit_regression(self, y):
'\n fit regression using pseudo-inverse\n or supplied regressor\n '
if (self.regressor is None):
self.coefs_ = safe_sparse_dot(pinv2(self.hidden_activations_), y)
else:
self.regressor.fit(self.hidden_activations_, y)
self.fitted_ = True
def fit(self, X, y):
'\n Fit the model using X, y as training data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n\n y : array-like of shape [n_samples, n_outputs]\n Target values (class labels in classification, real numbers in\n regression)\n\n Returns\n -------\n self : object\n\n Returns an instance of self.\n '
self.hidden_activations_ = self.hidden_layer.fit_transform(X)
self._fit_regression(as_float_array(y, copy=True))
return self
def _get_predictions(self):
'get predictions using internal least squares/supplied regressor'
if (self.regressor is None):
preds = safe_sparse_dot(self.hidden_activations_, self.coefs_)
else:
preds = self.regressor.predict(self.hidden_activations_)
return preds
def predict(self, X):
'\n Predict values using the model\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape [n_samples, n_features]\n\n Returns\n -------\n C : numpy array of shape [n_samples, n_outputs]\n Predicted values.\n '
if (not self.fitted_):
raise ValueError('ELMRegressor not fitted')
self.hidden_activations_ = self.hidden_layer.transform(X)
predictions = self._get_predictions()
return predictions
|
class GenELMClassifier(BaseELM, ClassifierMixin):
'\n GenELMClassifier is a classifier based on the Extreme Learning Machine.\n\n An Extreme Learning Machine (ELM) is a single layer feedforward\n network with a random hidden layer components and ordinary linear\n least squares fitting of the hidden->output weights by default.\n [1][2]\n\n Parameters\n ----------\n `hidden_layer` : random_layer instance, optional\n (default=MLPRandomLayer(random_state=0))\n\n `binarizer` : LabelBinarizer, optional\n (default=LabelBinarizer(-1, 1))\n\n `regressor` : regressor instance, optional (default=None)\n If provided, this object is used to perform the regression from hidden\n unit activations to the outputs and subsequent predictions. If not\n present, an ordinary linear least squares fit is performed\n\n Attributes\n ----------\n `classes_` : numpy array of shape [n_classes]\n Array of class labels\n\n `genelm_regressor_` : ELMRegressor instance\n Performs actual fit of binarized values\n\n See Also\n --------\n RBFRandomLayer, MLPRandomLayer, ELMRegressor, ELMClassifier\n\n References\n ----------\n .. [1] http://www.extreme-learning-machines.org\n .. [2] G.-B. Huang, Q.-Y. Zhu and C.-K. Siew, "Extreme Learning Machine:\n Theory and Applications", Neurocomputing, vol. 70, pp. 489-501,\n 2006.\n '
def __init__(self, hidden_layer=MLPRandomLayer(random_state=0), binarizer=LabelBinarizer((- 1), 1), regressor=None):
super(GenELMClassifier, self).__init__(hidden_layer, regressor)
self.binarizer = binarizer
self.classes_ = None
self.genelm_regressor_ = GenELMRegressor(hidden_layer, regressor)
def decision_function(self, X):
'\n This function return the decision function values related to each\n class on an array of test vectors X.\n\n Parameters\n ----------\n X : array-like of shape [n_samples, n_features]\n\n Returns\n -------\n C : array of shape [n_samples, n_classes] or [n_samples,]\n Decision function values related to each class, per sample.\n In the two-class case, the shape is [n_samples,]\n '
return self.genelm_regressor_.predict(X)
def fit(self, X, y):
'\n Fit the model using X, y as training data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n\n y : array-like of shape [n_samples, n_outputs]\n Target values (class labels in classification, real numbers in\n regression)\n\n Returns\n -------\n self : object\n\n Returns an instance of self.\n '
self.classes_ = np.unique(y)
y_bin = self.binarizer.fit_transform(y)
self.genelm_regressor_.fit(X, y_bin)
return self
def predict(self, X):
'Predict values using the model\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape [n_samples, n_features]\n\n Returns\n -------\n C : numpy array of shape [n_samples, n_outputs]\n Predicted values.\n '
raw_predictions = self.decision_function(X)
class_predictions = self.binarizer.inverse_transform(raw_predictions)
return class_predictions
|
class ELMRegressor(BaseEstimator, RegressorMixin):
'\n ELMRegressor is a regressor based on the Extreme Learning Machine.\n\n An Extreme Learning Machine (ELM) is a single layer feedforward\n network with a random hidden layer components and ordinary linear\n least squares fitting of the hidden->output weights by default.\n [1][2]\n\n ELMRegressor is a wrapper for an GenELMRegressor that uses a\n RandomLayer and passes the __init__ parameters through\n to the hidden layer generated by the fit() method.\n\n Parameters\n ----------\n `n_hidden` : int, optional (default=20)\n Number of units to generate in the SimpleRandomLayer\n\n `alpha` : float, optional (default=0.5)\n Mixing coefficient for distance and dot product input activations:\n activation = alpha*mlp_activation + (1-alpha)*rbf_width*rbf_activation\n\n `rbf_width` : float, optional (default=1.0)\n multiplier on rbf_activation\n\n `activation_func` : {callable, string} optional (default=\'tanh\')\n Function used to transform input activation\n\n It must be one of \'tanh\', \'sine\', \'tribas\', \'inv_tribase\', \'sigmoid\',\n \'hardlim\', \'softlim\', \'gaussian\', \'multiquadric\', \'inv_multiquadric\',\n \'reclinear\' or a callable. If none is given, \'tanh\' will be used. \n If a callable is given, it will be used to compute the hidden unit\n activations.\n\n `activation_args` : dictionary, optional (default=None)\n Supplies keyword arguments for a callable activation_func\n\n `user_components`: dictionary, optional (default=None)\n dictionary containing values for components that woud otherwise be\n randomly generated. Valid key/value pairs are as follows:\n \'radii\' : array-like of shape [n_hidden]\n \'centers\': array-like of shape [n_hidden, n_features]\n \'biases\' : array-like of shape [n_hidden]\n \'weights\': array-like of shape [n_hidden, n_features]\n\n `regressor` : regressor instance, optional (default=None)\n If provided, this object is used to perform the regression from hidden\n unit activations to the outputs and subsequent predictions. If not\n present, an ordinary linear least squares fit is performed\n\n `random_state` : int, RandomState instance or None (default=None)\n Control the pseudo random number generator used to generate the\n hidden unit weights at fit time.\n\n Attributes\n ----------\n `genelm_regressor_` : GenELMRegressor object\n Wrapped object that actually performs the fit.\n\n See Also\n --------\n RandomLayer, RBFRandomLayer, MLPRandomLayer,\n GenELMRegressor, GenELMClassifier, ELMClassifier\n\n References\n ----------\n .. [1] http://www.extreme-learning-machines.org\n .. [2] G.-B. Huang, Q.-Y. Zhu and C.-K. Siew, "Extreme Learning Machine:\n Theory and Applications", Neurocomputing, vol. 70, pp. 489-501,\n 2006.\n '
def __init__(self, n_hidden=20, alpha=0.5, rbf_width=1.0, activation_func='tanh', activation_args=None, user_components=None, regressor=None, random_state=None):
self.n_hidden = n_hidden
self.alpha = alpha
self.random_state = random_state
self.activation_func = activation_func
self.activation_args = activation_args
self.user_components = user_components
self.rbf_width = rbf_width
self.regressor = regressor
self._genelm_regressor = None
def _create_random_layer(self):
'Pass init params to RandomLayer'
return RandomLayer(n_hidden=self.n_hidden, alpha=self.alpha, random_state=self.random_state, activation_func=self.activation_func, activation_args=self.activation_args, user_components=self.user_components, rbf_width=self.rbf_width)
def fit(self, X, y):
'\n Fit the model using X, y as training data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape [n_samples, n_features]\n Training vectors, where n_samples is the number of samples\n and n_features is the number of features.\n\n y : array-like of shape [n_samples, n_outputs]\n Target values (class labels in classification, real numbers in\n regression)\n\n Returns\n -------\n self : object\n\n Returns an instance of self.\n '
rhl = self._create_random_layer()
self._genelm_regressor = GenELMRegressor(hidden_layer=rhl, regressor=self.regressor)
self._genelm_regressor.fit(X, y)
return self
def predict(self, X):
'\n Predict values using the model\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape [n_samples, n_features]\n\n Returns\n -------\n C : numpy array of shape [n_samples, n_outputs]\n Predicted values.\n '
if (self._genelm_regressor is None):
raise ValueError('SimpleELMRegressor not fitted')
return self._genelm_regressor.predict(X)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.