code stringlengths 17 6.64M |
|---|
def parse_args():
parser = argparse.ArgumentParser('Sample (with beam-search) from the session model')
parser.add_argument('--ignore-unk', action='store_false', help='Disables the generation of unknown words (<unk> tokens)')
parser.add_argument('model_prefix', help='Path to the model prefix (without _model.npz or _state.pkl)')
parser.add_argument('context', help='File of input contexts')
parser.add_argument('output', help='Output file')
parser.add_argument('--beam_search', action='store_true', help='Use beam search instead of random search')
parser.add_argument('--n-samples', default='1', type=int, help='Number of samples')
parser.add_argument('--n-turns', default=1, type=int, help='Number of dialog turns to generate')
parser.add_argument('--verbose', action='store_true', default=False, help='Be verbose')
parser.add_argument('changes', nargs='?', default='', help='Changes to state')
return parser.parse_args()
|
def main():
args = parse_args()
state = prototype_state()
state_path = (args.model_prefix + '_state.pkl')
model_path = (args.model_prefix + '_model.npz')
with open(state_path) as src:
state.update(cPickle.load(src))
logging.basicConfig(level=getattr(logging, state['level']), format='%(asctime)s: %(name)s: %(levelname)s: %(message)s')
state['compute_training_updates'] = False
model = DialogEncoderDecoder(state)
sampler = search.RandomSampler(model)
if args.beam_search:
sampler = search.BeamSampler(model)
if os.path.isfile(model_path):
logger.debug('Loading previous model')
model.load(model_path)
else:
raise Exception('Must specify a valid model path')
contexts = [[]]
lines = open(args.context, 'r').readlines()
if len(lines):
contexts = [x.strip() for x in lines]
print('Sampling started...')
(context_samples, context_costs) = sampler.sample(contexts, n_samples=args.n_samples, n_turns=args.n_turns, ignore_unk=args.ignore_unk, verbose=args.verbose)
print('Sampling finished.')
print('Saving to file...')
output_handle = open(args.output, 'w')
for context_sample in context_samples:
((print >> output_handle), '\t'.join(context_sample))
output_handle.close()
print('Saving to file finished.')
print('All done!')
|
def prototype_state():
state = {}
state['seed'] = 1234
state['level'] = 'DEBUG'
state['oov'] = '<unk>'
state['end_sym_utterance'] = '</s>'
state['unk_sym'] = 0
state['eos_sym'] = 1
state['eod_sym'] = 2
state['first_speaker_sym'] = 3
state['second_speaker_sym'] = 4
state['third_speaker_sym'] = 5
state['minor_speaker_sym'] = 6
state['voice_over_sym'] = 7
state['off_screen_sym'] = 8
state['pause_sym'] = 9
state['reset_hidden_states_between_subsequences'] = False
state['maxout_out'] = False
state['deep_utterance_decoder_out'] = True
state['deep_dialogue_encoder_input'] = False
state['sent_rec_activation'] = 'lambda x: T.tanh(x)'
state['dialogue_rec_activation'] = 'lambda x: T.tanh(x)'
state['decoder_bias_type'] = 'all'
state['utterance_encoder_gating'] = 'GRU'
state['dialogue_encoder_gating'] = 'GRU'
state['utterance_decoder_gating'] = 'GRU'
state['bidirectional_utterance_encoder'] = False
state['direct_connection_between_encoders_and_decoder'] = False
state['deep_direct_connection'] = False
state['disable_dialogue_encoder'] = False
state['collaps_to_standard_rnn'] = False
state['reset_utterance_decoder_at_end_of_utterance'] = True
state['reset_utterance_encoder_at_end_of_utterance'] = False
state['qdim_encoder'] = 512
state['qdim_decoder'] = 512
state['sdim'] = 1000
state['rankdim'] = 256
state['add_latent_gaussian_per_utterance'] = False
state['condition_latent_variable_on_dialogue_encoder'] = False
state['condition_posterior_latent_variable_on_dcgm_encoder'] = False
state['latent_gaussian_per_utterance_dim'] = 10
state['scale_latent_gaussian_variable_variances'] = 10
state['min_latent_gaussian_variable_variances'] = 0.01
state['max_latent_gaussian_variable_variances'] = 10.0
state['deep_latent_gaussian_variable_conditioning'] = True
state['condition_decoder_only_on_latent_variable'] = False
state['add_latent_piecewise_per_utterance'] = False
state['gate_latent_piecewise_per_utterance'] = True
state['latent_piecewise_alpha_variables'] = 5
state['scale_latent_piecewise_variable_alpha_use_softplus'] = True
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['latent_piecewise_per_utterance_dim'] = 10
state['latent_piecewise_variable_alpha_parameter_tying'] = False
state['latent_piecewise_variable_alpha_parameter_tying_beta'] = 1.0
state['deep_latent_piecewise_variable_conditioning'] = True
state['deep_utterance_decoder_input'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = False
state['kl_divergence_annealing_rate'] = (1.0 / 60000.0)
state['kl_divergence_max_weight'] = 1.0
state['decoder_drop_previous_input_tokens'] = False
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['apply_meanfield_inference'] = False
state['initialize_from_pretrained_word_embeddings'] = False
state['pretrained_word_embeddings_file'] = ''
state['fix_pretrained_word_embeddings'] = False
state['fix_encoder_parameters'] = False
state['do_generate_first_utterance'] = True
state['skip_utterance'] = False
state['skip_utterance_predict_both'] = False
state['updater'] = 'adam'
state['use_nce'] = False
state['cutoff'] = 0.01
state['lr'] = 0.0002
state['patience'] = 20
state['cost_threshold'] = 1.003
state['bs'] = 80
state['sort_k_batches'] = 20
state['max_grad_steps'] = 80
state['save_dir'] = './'
state['train_freq'] = 10
state['valid_freq'] = 5000
state['loop_iters'] = 3000000
state['time_stop'] = ((24 * 60) * 31)
state['minerr'] = (- 1)
state['max_len'] = (- 1)
state['normop_type'] = 'LN'
if (state['normop_type'] == 'BN'):
state['normop_gamma_init'] = 0.1
state['normop_gamma_min'] = 0.05
state['normop_gamma_max'] = 10.0
state['normop_moving_average_const'] = 0.99
state['normop_max_enc_seq'] = 50
else:
state['normop_gamma_init'] = 1.0
state['normop_gamma_min'] = 0.05
state['normop_gamma_max'] = 10.0
state['normop_moving_average_const'] = 0.99
state['normop_max_enc_seq'] = 1
state['train_iterator_offset'] = 0
state['train_iterator_reshuffle_count'] = 1
return state
|
def prototype_test():
state = prototype_state()
state['train_dialogues'] = './tests/data/ttrain.dialogues.pkl'
state['test_dialogues'] = './tests/data/ttest.dialogues.pkl'
state['valid_dialogues'] = './tests/data/tvalid.dialogues.pkl'
state['dictionary'] = './tests/data/ttrain.dict.pkl'
state['save_dir'] = './tests/models/'
state['max_grad_steps'] = 20
state['initialize_from_pretrained_word_embeddings'] = False
state['pretrained_word_embeddings_file'] = './tests/data/MT_WordEmb.pkl'
state['fix_pretrained_word_embeddings'] = False
state['valid_freq'] = 50
state['prefix'] = 'testmodel_'
state['updater'] = 'adam'
state['maxout_out'] = False
state['deep_utterance_decoder_out'] = True
state['deep_dialogue_encoder_input'] = True
state['utterance_encoder_gating'] = 'GRU'
state['dialogue_encoder_gating'] = 'GRU'
state['utterance_decoder_gating'] = 'GRU'
state['bidirectional_utterance_encoder'] = True
state['direct_connection_between_encoders_and_decoder'] = True
state['bs'] = 5
state['sort_k_batches'] = 1
state['use_nce'] = False
state['decoder_bias_type'] = 'all'
state['qdim_encoder'] = 15
state['qdim_decoder'] = 5
state['sdim'] = 10
state['rankdim'] = 10
return state
|
def prototype_test_variational():
state = prototype_state()
state['train_dialogues'] = './tests/data/ttrain.dialogues.pkl'
state['test_dialogues'] = './tests/data/ttest.dialogues.pkl'
state['valid_dialogues'] = './tests/data/tvalid.dialogues.pkl'
state['dictionary'] = './tests/data/ttrain.dict.pkl'
state['save_dir'] = './tests/models/'
state['max_grad_steps'] = 20
state['initialize_from_pretrained_word_embeddings'] = True
state['pretrained_word_embeddings_file'] = './tests/data/MT_WordEmb.pkl'
state['valid_freq'] = 5
state['prefix'] = 'testmodel_'
state['updater'] = 'adam'
state['maxout_out'] = False
state['deep_utterance_decoder_out'] = True
state['deep_dialogue_encoder_input'] = True
state['direct_connection_between_encoders_and_decoder'] = False
state['deep_direct_connection'] = False
state['utterance_encoder_gating'] = 'GRU'
state['dialogue_encoder_gating'] = 'GRU'
state['utterance_decoder_gating'] = 'LSTM'
state['bidirectional_utterance_encoder'] = False
state['add_latent_gaussian_per_utterance'] = False
state['latent_gaussian_per_utterance_dim'] = 5
state['condition_latent_variable_on_dialogue_encoder'] = True
state['condition_posterior_latent_variable_on_dcgm_encoder'] = False
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 60000.0)
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 10
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['bs'] = 5
state['sort_k_batches'] = 1
state['use_nce'] = False
state['decoder_bias_type'] = 'all'
state['qdim_encoder'] = 15
state['qdim_decoder'] = 5
state['sdim'] = 10
state['rankdim'] = 10
state['gate_latent_piecewise_per_utterance'] = False
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_max_weight'] = 0.5
return state
|
def prototype_twitter_HRED_NormOp_ClusterExp1():
state = prototype_state()
state['train_dialogues'] = '../TwitterDataBPE/Train.dialogues.pkl'
state['test_dialogues'] = '../TwitterDataBPE/Test.dialogues.pkl'
state['valid_dialogues'] = '../TwitterDataBPE/Valid.dialogues.pkl'
state['dictionary'] = '../TwitterDataBPE/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = 'TwitterModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 1000
state['sdim'] = 500
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = False
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = False
state['train_latent_variables_with_kl_divergence_annealing'] = False
state['kl_divergence_annealing_rate'] = (1.0 / 60000.0)
state['decoder_drop_previous_input_tokens'] = False
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
|
def prototype_twitter_HRED_NormOp_ClusterExp2():
state = prototype_state()
state['train_dialogues'] = '../TwitterDataBPE/Train.dialogues.pkl'
state['test_dialogues'] = '../TwitterDataBPE/Test.dialogues.pkl'
state['valid_dialogues'] = '../TwitterDataBPE/Valid.dialogues.pkl'
state['dictionary'] = '../TwitterDataBPE/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = 'TwitterModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 1000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = False
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = False
state['train_latent_variables_with_kl_divergence_annealing'] = False
state['kl_divergence_annealing_rate'] = (1.0 / 60000.0)
state['decoder_drop_previous_input_tokens'] = False
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
|
def prototype_twitter_HRED_NormOp_ClusterExp3():
state = prototype_state()
state['train_dialogues'] = '../TwitterDataBPE/Train.dialogues.pkl'
state['test_dialogues'] = '../TwitterDataBPE/Test.dialogues.pkl'
state['valid_dialogues'] = '../TwitterDataBPE/Valid.dialogues.pkl'
state['dictionary'] = '../TwitterDataBPE/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = 'TwitterModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = False
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = False
state['train_latent_variables_with_kl_divergence_annealing'] = False
state['kl_divergence_annealing_rate'] = (1.0 / 60000.0)
state['decoder_drop_previous_input_tokens'] = False
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
|
def prototype_twitter_HRED_NormOp_ClusterExp4():
state = prototype_state()
state['train_dialogues'] = '../TwitterDataBPE/Train.dialogues.pkl'
state['test_dialogues'] = '../TwitterDataBPE/Test.dialogues.pkl'
state['valid_dialogues'] = '../TwitterDataBPE/Valid.dialogues.pkl'
state['dictionary'] = '../TwitterDataBPE/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = 'TwitterModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 4000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = False
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = False
state['train_latent_variables_with_kl_divergence_annealing'] = False
state['kl_divergence_annealing_rate'] = (1.0 / 60000.0)
state['decoder_drop_previous_input_tokens'] = False
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
|
def prototype_twitter_HRED_NormOp_ClusterExp5():
state = prototype_state()
state['train_dialogues'] = '../TwitterDataBPE/Train.dialogues.pkl'
state['test_dialogues'] = '../TwitterDataBPE/Test.dialogues.pkl'
state['valid_dialogues'] = '../TwitterDataBPE/Valid.dialogues.pkl'
state['dictionary'] = '../TwitterDataBPE/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = 'TwitterModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 2000
state['qdim_decoder'] = 4000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = False
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = False
state['train_latent_variables_with_kl_divergence_annealing'] = False
state['kl_divergence_annealing_rate'] = (1.0 / 60000.0)
state['decoder_drop_previous_input_tokens'] = False
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
|
def prototype_twitter_GaussOnly_VHRED_NormOp_ClusterExp1():
state = prototype_state()
state['train_dialogues'] = '../TwitterDataBPE/Train.dialogues.pkl'
state['test_dialogues'] = '../TwitterDataBPE/Test.dialogues.pkl'
state['valid_dialogues'] = '../TwitterDataBPE/Valid.dialogues.pkl'
state['dictionary'] = '../TwitterDataBPE/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = 'TwitterModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 1000
state['sdim'] = 500
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 60000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
|
def prototype_twitter_GaussOnly_VHRED_NormOp_ClusterExp2():
state = prototype_state()
state['train_dialogues'] = '../TwitterDataBPE/Train.dialogues.pkl'
state['test_dialogues'] = '../TwitterDataBPE/Test.dialogues.pkl'
state['valid_dialogues'] = '../TwitterDataBPE/Valid.dialogues.pkl'
state['dictionary'] = '../TwitterDataBPE/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = 'TwitterModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 1000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 60000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
|
def prototype_twitter_GaussOnly_VHRED_NormOp_ClusterExp3():
state = prototype_state()
state['train_dialogues'] = '../TwitterDataBPE/Train.dialogues.pkl'
state['test_dialogues'] = '../TwitterDataBPE/Test.dialogues.pkl'
state['valid_dialogues'] = '../TwitterDataBPE/Valid.dialogues.pkl'
state['dictionary'] = '../TwitterDataBPE/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = 'TwitterModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 60000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
|
def prototype_twitter_GaussOnly_VHRED_NormOp_ClusterExp4():
state = prototype_state()
state['train_dialogues'] = '../TwitterDataBPE/Train.dialogues.pkl'
state['test_dialogues'] = '../TwitterDataBPE/Test.dialogues.pkl'
state['valid_dialogues'] = '../TwitterDataBPE/Valid.dialogues.pkl'
state['dictionary'] = '../TwitterDataBPE/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = 'TwitterModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 4000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 60000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
|
def prototype_twitter_GaussOnly_VHRED_NormOp_ClusterExp5():
state = prototype_state()
state['train_dialogues'] = '../TwitterDataBPE/Train.dialogues.pkl'
state['test_dialogues'] = '../TwitterDataBPE/Test.dialogues.pkl'
state['valid_dialogues'] = '../TwitterDataBPE/Valid.dialogues.pkl'
state['dictionary'] = '../TwitterDataBPE/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = 'TwitterModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 4000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 300
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 60000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
|
def prototype_twitter_GaussPiecewise_VHRED_NormOp_ClusterExp1():
state = prototype_state()
state['train_dialogues'] = '../TwitterDataBPE/Train.dialogues.pkl'
state['test_dialogues'] = '../TwitterDataBPE/Test.dialogues.pkl'
state['valid_dialogues'] = '../TwitterDataBPE/Valid.dialogues.pkl'
state['dictionary'] = '../TwitterDataBPE/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = 'TwitterModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 1000
state['sdim'] = 500
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 60000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
|
def prototype_twitter_GaussPiecewise_VHRED_NormOp_ClusterExp2():
state = prototype_state()
state['train_dialogues'] = '../TwitterDataBPE/Train.dialogues.pkl'
state['test_dialogues'] = '../TwitterDataBPE/Test.dialogues.pkl'
state['valid_dialogues'] = '../TwitterDataBPE/Valid.dialogues.pkl'
state['dictionary'] = '../TwitterDataBPE/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = 'TwitterModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 1000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 60000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
|
def prototype_twitter_GaussPiecewise_VHRED_NormOp_ClusterExp3():
state = prototype_state()
state['train_dialogues'] = '../TwitterDataBPE/Train.dialogues.pkl'
state['test_dialogues'] = '../TwitterDataBPE/Test.dialogues.pkl'
state['valid_dialogues'] = '../TwitterDataBPE/Valid.dialogues.pkl'
state['dictionary'] = '../TwitterDataBPE/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = 'TwitterModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 60000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
|
def prototype_twitter_GaussPiecewise_VHRED_NormOp_ClusterExp4():
state = prototype_state()
state['train_dialogues'] = '../TwitterDataBPE/Train.dialogues.pkl'
state['test_dialogues'] = '../TwitterDataBPE/Test.dialogues.pkl'
state['valid_dialogues'] = '../TwitterDataBPE/Valid.dialogues.pkl'
state['dictionary'] = '../TwitterDataBPE/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = 'TwitterModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 4000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 60000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
|
def prototype_twitter_GaussPiecewise_VHRED_NormOp_ClusterExp5():
state = prototype_state()
state['train_dialogues'] = '../TwitterDataBPE/Train.dialogues.pkl'
state['test_dialogues'] = '../TwitterDataBPE/Test.dialogues.pkl'
state['valid_dialogues'] = '../TwitterDataBPE/Valid.dialogues.pkl'
state['dictionary'] = '../TwitterDataBPE/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = 'TwitterModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 4000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 300
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 300
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 60000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state
|
def prototype_twitter_GaussPiecewise_VHRED_NormOp_ClusterExp6():
state = prototype_state()
state['train_dialogues'] = '../TwitterDataBPE/Train.dialogues.pkl'
state['test_dialogues'] = '../TwitterDataBPE/Test.dialogues.pkl'
state['valid_dialogues'] = '../TwitterDataBPE/Valid.dialogues.pkl'
state['dictionary'] = '../TwitterDataBPE/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = 'TwitterModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 60000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
state['gate_latent_piecewise_per_utterance'] = False
return state
|
def prototype_twitter_GaussPiecewise_VHRED_NormOp_ClusterExp7():
state = prototype_state()
state['train_dialogues'] = '../TwitterDataBPE/Train.dialogues.pkl'
state['test_dialogues'] = '../TwitterDataBPE/Test.dialogues.pkl'
state['valid_dialogues'] = '../TwitterDataBPE/Valid.dialogues.pkl'
state['dictionary'] = '../TwitterDataBPE/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = 'TwitterModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 4000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 60000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
state['gate_latent_piecewise_per_utterance'] = False
return state
|
def prototype_twitter_GaussPiecewise_VHRED_NormOp_ClusterExp8():
state = prototype_state()
state['train_dialogues'] = '../TwitterDataBPE/Train.dialogues.pkl'
state['test_dialogues'] = '../TwitterDataBPE/Test.dialogues.pkl'
state['valid_dialogues'] = '../TwitterDataBPE/Valid.dialogues.pkl'
state['dictionary'] = '../TwitterDataBPE/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = 'TwitterModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 4000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 300
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 300
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 60000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
state['gate_latent_piecewise_per_utterance'] = False
return state
|
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Baseline_Exp1():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0
state['eos_sym'] = 1
state['eod_sym'] = (- 1)
state['first_speaker_sym'] = (- 1)
state['second_speaker_sym'] = (- 1)
state['third_speaker_sym'] = (- 1)
state['minor_speaker_sym'] = (- 1)
state['voice_over_sym'] = (- 1)
state['off_screen_sym'] = (- 1)
state['pause_sym'] = (- 1)
state['train_dialogues'] = '../UbuntuData/Training.dialogues.pkl'
state['test_dialogues'] = '../UbuntuData/Test.dialogues.pkl'
state['valid_dialogues'] = '../UbuntuData/Validation.dialogues.pkl'
state['dictionary'] = '../UbuntuData/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = 'UbuntuModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['add_latent_gaussian_per_utterance'] = False
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 75000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = False
state['patience'] = 20
return state
|
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Baseline_Exp2():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0
state['eos_sym'] = 1
state['eod_sym'] = (- 1)
state['first_speaker_sym'] = (- 1)
state['second_speaker_sym'] = (- 1)
state['third_speaker_sym'] = (- 1)
state['minor_speaker_sym'] = (- 1)
state['voice_over_sym'] = (- 1)
state['off_screen_sym'] = (- 1)
state['pause_sym'] = (- 1)
state['train_dialogues'] = '../UbuntuData/Training.dialogues.pkl'
state['test_dialogues'] = '../UbuntuData/Test.dialogues.pkl'
state['valid_dialogues'] = '../UbuntuData/Validation.dialogues.pkl'
state['dictionary'] = '../UbuntuData/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = 'UbuntuModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['add_latent_gaussian_per_utterance'] = False
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 75000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
return state
|
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp1():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0
state['eos_sym'] = 1
state['eod_sym'] = (- 1)
state['first_speaker_sym'] = (- 1)
state['second_speaker_sym'] = (- 1)
state['third_speaker_sym'] = (- 1)
state['minor_speaker_sym'] = (- 1)
state['voice_over_sym'] = (- 1)
state['off_screen_sym'] = (- 1)
state['pause_sym'] = (- 1)
state['train_dialogues'] = '../UbuntuData/Training.dialogues.pkl'
state['test_dialogues'] = '../UbuntuData/Test.dialogues.pkl'
state['valid_dialogues'] = '../UbuntuData/Validation.dialogues.pkl'
state['dictionary'] = '../UbuntuData/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = 'UbuntuModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 75000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = False
state['patience'] = 20
return state
|
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp2():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0
state['eos_sym'] = 1
state['eod_sym'] = (- 1)
state['first_speaker_sym'] = (- 1)
state['second_speaker_sym'] = (- 1)
state['third_speaker_sym'] = (- 1)
state['minor_speaker_sym'] = (- 1)
state['voice_over_sym'] = (- 1)
state['off_screen_sym'] = (- 1)
state['pause_sym'] = (- 1)
state['train_dialogues'] = '../UbuntuData/Training.dialogues.pkl'
state['test_dialogues'] = '../UbuntuData/Test.dialogues.pkl'
state['valid_dialogues'] = '../UbuntuData/Validation.dialogues.pkl'
state['dictionary'] = '../UbuntuData/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = 'UbuntuModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['add_latent_gaussian_per_utterance'] = False
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 75000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = False
state['patience'] = 20
return state
|
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp3():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0
state['eos_sym'] = 1
state['eod_sym'] = (- 1)
state['first_speaker_sym'] = (- 1)
state['second_speaker_sym'] = (- 1)
state['third_speaker_sym'] = (- 1)
state['minor_speaker_sym'] = (- 1)
state['voice_over_sym'] = (- 1)
state['off_screen_sym'] = (- 1)
state['pause_sym'] = (- 1)
state['train_dialogues'] = '../UbuntuData/Training.dialogues.pkl'
state['test_dialogues'] = '../UbuntuData/Test.dialogues.pkl'
state['valid_dialogues'] = '../UbuntuData/Validation.dialogues.pkl'
state['dictionary'] = '../UbuntuData/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = 'UbuntuModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 75000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = False
state['patience'] = 20
return state
|
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp4():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0
state['eos_sym'] = 1
state['eod_sym'] = (- 1)
state['first_speaker_sym'] = (- 1)
state['second_speaker_sym'] = (- 1)
state['third_speaker_sym'] = (- 1)
state['minor_speaker_sym'] = (- 1)
state['voice_over_sym'] = (- 1)
state['off_screen_sym'] = (- 1)
state['pause_sym'] = (- 1)
state['train_dialogues'] = '../UbuntuData/Training.dialogues.pkl'
state['test_dialogues'] = '../UbuntuData/Test.dialogues.pkl'
state['valid_dialogues'] = '../UbuntuData/Validation.dialogues.pkl'
state['dictionary'] = '../UbuntuData/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = 'UbuntuModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 75000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
return state
|
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp5():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0
state['eos_sym'] = 1
state['eod_sym'] = (- 1)
state['first_speaker_sym'] = (- 1)
state['second_speaker_sym'] = (- 1)
state['third_speaker_sym'] = (- 1)
state['minor_speaker_sym'] = (- 1)
state['voice_over_sym'] = (- 1)
state['off_screen_sym'] = (- 1)
state['pause_sym'] = (- 1)
state['train_dialogues'] = '../UbuntuData/Training.dialogues.pkl'
state['test_dialogues'] = '../UbuntuData/Test.dialogues.pkl'
state['valid_dialogues'] = '../UbuntuData/Validation.dialogues.pkl'
state['dictionary'] = '../UbuntuData/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = 'UbuntuModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['add_latent_gaussian_per_utterance'] = False
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 75000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
return state
|
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp6():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0
state['eos_sym'] = 1
state['eod_sym'] = (- 1)
state['first_speaker_sym'] = (- 1)
state['second_speaker_sym'] = (- 1)
state['third_speaker_sym'] = (- 1)
state['minor_speaker_sym'] = (- 1)
state['voice_over_sym'] = (- 1)
state['off_screen_sym'] = (- 1)
state['pause_sym'] = (- 1)
state['train_dialogues'] = '../UbuntuData/Training.dialogues.pkl'
state['test_dialogues'] = '../UbuntuData/Test.dialogues.pkl'
state['valid_dialogues'] = '../UbuntuData/Validation.dialogues.pkl'
state['dictionary'] = '../UbuntuData/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = 'UbuntuModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 75000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
return state
|
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp7():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0
state['eos_sym'] = 1
state['eod_sym'] = (- 1)
state['first_speaker_sym'] = (- 1)
state['second_speaker_sym'] = (- 1)
state['third_speaker_sym'] = (- 1)
state['minor_speaker_sym'] = (- 1)
state['voice_over_sym'] = (- 1)
state['off_screen_sym'] = (- 1)
state['pause_sym'] = (- 1)
state['train_dialogues'] = '../UbuntuData/Training.dialogues.pkl'
state['test_dialogues'] = '../UbuntuData/Test.dialogues.pkl'
state['valid_dialogues'] = '../UbuntuData/Validation.dialogues.pkl'
state['dictionary'] = '../UbuntuData/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = 'UbuntuModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 75000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
state['kl_divergence_max_weight'] = 0.25
return state
|
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp8():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0
state['eos_sym'] = 1
state['eod_sym'] = (- 1)
state['first_speaker_sym'] = (- 1)
state['second_speaker_sym'] = (- 1)
state['third_speaker_sym'] = (- 1)
state['minor_speaker_sym'] = (- 1)
state['voice_over_sym'] = (- 1)
state['off_screen_sym'] = (- 1)
state['pause_sym'] = (- 1)
state['train_dialogues'] = '../UbuntuData/Training.dialogues.pkl'
state['test_dialogues'] = '../UbuntuData/Test.dialogues.pkl'
state['valid_dialogues'] = '../UbuntuData/Validation.dialogues.pkl'
state['dictionary'] = '../UbuntuData/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = 'UbuntuModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['add_latent_gaussian_per_utterance'] = False
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 75000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
state['kl_divergence_max_weight'] = 0.25
return state
|
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp9():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0
state['eos_sym'] = 1
state['eod_sym'] = (- 1)
state['first_speaker_sym'] = (- 1)
state['second_speaker_sym'] = (- 1)
state['third_speaker_sym'] = (- 1)
state['minor_speaker_sym'] = (- 1)
state['voice_over_sym'] = (- 1)
state['off_screen_sym'] = (- 1)
state['pause_sym'] = (- 1)
state['train_dialogues'] = '../UbuntuData/Training.dialogues.pkl'
state['test_dialogues'] = '../UbuntuData/Test.dialogues.pkl'
state['valid_dialogues'] = '../UbuntuData/Validation.dialogues.pkl'
state['dictionary'] = '../UbuntuData/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = 'UbuntuModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 75000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
state['kl_divergence_max_weight'] = 0.25
return state
|
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp10():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0
state['eos_sym'] = 1
state['eod_sym'] = (- 1)
state['first_speaker_sym'] = (- 1)
state['second_speaker_sym'] = (- 1)
state['third_speaker_sym'] = (- 1)
state['minor_speaker_sym'] = (- 1)
state['voice_over_sym'] = (- 1)
state['off_screen_sym'] = (- 1)
state['pause_sym'] = (- 1)
state['train_dialogues'] = '../UbuntuData/Training.dialogues.pkl'
state['test_dialogues'] = '../UbuntuData/Test.dialogues.pkl'
state['valid_dialogues'] = '../UbuntuData/Validation.dialogues.pkl'
state['dictionary'] = '../UbuntuData/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = 'UbuntuModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 75000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
state['kl_divergence_max_weight'] = 0.5
return state
|
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp11():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0
state['eos_sym'] = 1
state['eod_sym'] = (- 1)
state['first_speaker_sym'] = (- 1)
state['second_speaker_sym'] = (- 1)
state['third_speaker_sym'] = (- 1)
state['minor_speaker_sym'] = (- 1)
state['voice_over_sym'] = (- 1)
state['off_screen_sym'] = (- 1)
state['pause_sym'] = (- 1)
state['train_dialogues'] = '../UbuntuData/Training.dialogues.pkl'
state['test_dialogues'] = '../UbuntuData/Test.dialogues.pkl'
state['valid_dialogues'] = '../UbuntuData/Validation.dialogues.pkl'
state['dictionary'] = '../UbuntuData/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = 'UbuntuModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['add_latent_gaussian_per_utterance'] = False
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 75000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
state['kl_divergence_max_weight'] = 0.5
return state
|
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp12():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0
state['eos_sym'] = 1
state['eod_sym'] = (- 1)
state['first_speaker_sym'] = (- 1)
state['second_speaker_sym'] = (- 1)
state['third_speaker_sym'] = (- 1)
state['minor_speaker_sym'] = (- 1)
state['voice_over_sym'] = (- 1)
state['off_screen_sym'] = (- 1)
state['pause_sym'] = (- 1)
state['train_dialogues'] = '../UbuntuData/Training.dialogues.pkl'
state['test_dialogues'] = '../UbuntuData/Test.dialogues.pkl'
state['valid_dialogues'] = '../UbuntuData/Validation.dialogues.pkl'
state['dictionary'] = '../UbuntuData/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = 'UbuntuModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 75000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
state['kl_divergence_max_weight'] = 0.5
return state
|
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp13():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0
state['eos_sym'] = 1
state['eod_sym'] = (- 1)
state['first_speaker_sym'] = (- 1)
state['second_speaker_sym'] = (- 1)
state['third_speaker_sym'] = (- 1)
state['minor_speaker_sym'] = (- 1)
state['voice_over_sym'] = (- 1)
state['off_screen_sym'] = (- 1)
state['pause_sym'] = (- 1)
state['train_dialogues'] = '../UbuntuData/Training.dialogues.pkl'
state['test_dialogues'] = '../UbuntuData/Test.dialogues.pkl'
state['valid_dialogues'] = '../UbuntuData/Validation.dialogues.pkl'
state['dictionary'] = '../UbuntuData/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = 'UbuntuModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 75000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
state['kl_divergence_max_weight'] = 0.75
return state
|
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp14():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0
state['eos_sym'] = 1
state['eod_sym'] = (- 1)
state['first_speaker_sym'] = (- 1)
state['second_speaker_sym'] = (- 1)
state['third_speaker_sym'] = (- 1)
state['minor_speaker_sym'] = (- 1)
state['voice_over_sym'] = (- 1)
state['off_screen_sym'] = (- 1)
state['pause_sym'] = (- 1)
state['train_dialogues'] = '../UbuntuData/Training.dialogues.pkl'
state['test_dialogues'] = '../UbuntuData/Test.dialogues.pkl'
state['valid_dialogues'] = '../UbuntuData/Validation.dialogues.pkl'
state['dictionary'] = '../UbuntuData/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = 'UbuntuModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['add_latent_gaussian_per_utterance'] = False
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 75000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
state['kl_divergence_max_weight'] = 0.75
return state
|
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp15():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0
state['eos_sym'] = 1
state['eod_sym'] = (- 1)
state['first_speaker_sym'] = (- 1)
state['second_speaker_sym'] = (- 1)
state['third_speaker_sym'] = (- 1)
state['minor_speaker_sym'] = (- 1)
state['voice_over_sym'] = (- 1)
state['off_screen_sym'] = (- 1)
state['pause_sym'] = (- 1)
state['train_dialogues'] = '../UbuntuData/Training.dialogues.pkl'
state['test_dialogues'] = '../UbuntuData/Test.dialogues.pkl'
state['valid_dialogues'] = '../UbuntuData/Validation.dialogues.pkl'
state['dictionary'] = '../UbuntuData/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = 'UbuntuModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 75000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
state['kl_divergence_max_weight'] = 0.75
return state
|
class AtariDreamerAgent(DreamerAgent):
def __init__(self, ModelCls=AtariDreamerModel, **kwargs):
super().__init__(ModelCls=ModelCls, **kwargs)
def make_env_to_model_kwargs(self, env_spaces):
return dict(image_shape=env_spaces.observation.shape, action_shape=env_spaces.action.shape, action_dist='one_hot')
|
class DMCDreamerAgent(DreamerAgent):
def __init__(self, ModelCls=AtariDreamerModel, **kwargs):
super().__init__(ModelCls=ModelCls, **kwargs)
def make_env_to_model_kwargs(self, env_spaces):
return dict(image_shape=env_spaces.observation.shape, output_size=env_spaces.action.shape[0], action_shape=env_spaces.action.shape[0], action_dist='tanh_normal')
|
class DreamerAgent(RecurrentAgentMixin, BaseAgent):
def __init__(self, ModelCls=AgentModel, train_noise=0.4, eval_noise=0, expl_type='additive_gaussian', expl_min=0.1, expl_decay=7000, model_kwargs=None, initial_model_state_dict=None):
self.train_noise = train_noise
self.eval_noise = eval_noise
self.expl_type = expl_type
self.expl_min = expl_min
self.expl_decay = expl_decay
super().__init__(ModelCls, model_kwargs, initial_model_state_dict)
self._mode = 'train'
self._itr = 0
def make_env_to_model_kwargs(self, env_spaces):
'Generate any keyword args to the model which depend on environment interfaces.'
return dict(action_size=env_spaces.action.shape[0])
def __call__(self, observation, prev_action, init_rnn_state):
model_inputs = buffer_to((observation, prev_action, init_rnn_state), device=self.device)
return self.model(*model_inputs)
@torch.no_grad()
def step(self, observation, prev_action, prev_reward):
' "\n Compute policy\'s action distribution from inputs, and sample an\n action. Calls the model to produce mean, log_std, value estimate, and\n next recurrent state. Moves inputs to device and returns outputs back\n to CPU, for the sampler. Advances the recurrent state of the agent.\n (no grad)\n '
model_inputs = buffer_to((observation, prev_action), device=self.device)
(action, state) = self.model(*model_inputs, self.prev_rnn_state)
action = self.exploration(action)
prev_state = (self.prev_rnn_state or buffer_func(state, torch.zeros_like))
self.advance_rnn_state(state)
agent_info = DreamerAgentInfo(prev_state=prev_state)
agent_step = AgentStep(action=action, agent_info=agent_info)
return buffer_to(agent_step, device='cpu')
@torch.no_grad()
def value(self, observation, prev_action, prev_reward):
'\n Compute the value estimate for the environment state using the\n currently held recurrent state, without advancing the recurrent state,\n e.g. for the bootstrap value V(s_{T+1}), in the sampler. (no grad)\n '
agent_inputs = buffer_to((observation, prev_action), device=self.device)
(action, action_dist, value, reward, state) = self.model(*agent_inputs, self.prev_rnn_state)
return value.to('cpu')
def exploration(self, action: torch.Tensor) -> torch.Tensor:
'\n :param action: action to take, shape (1,) (if categorical), or (action dim,) (if continuous)\n :return: action of the same shape passed in, augmented with some noise\n '
if (self._mode in ['train', 'sample']):
expl_amount = self.train_noise
if self.expl_decay:
expl_amount = (expl_amount - (self._itr / self.expl_decay))
if self.expl_min:
expl_amount = max(self.expl_min, expl_amount)
elif (self._mode == 'eval'):
expl_amount = self.eval_noise
else:
raise NotImplementedError
if (self.expl_type == 'additive_gaussian'):
noise = (torch.randn(*action.shape, device=action.device) * expl_amount)
return torch.clamp((action + noise), (- 1), 1)
if (self.expl_type == 'completely_random'):
if (expl_amount == 0):
return action
else:
return ((torch.rand(*action.shape, device=action.device) * 2) - 1)
if (self.expl_type == 'epsilon_greedy'):
action_dim = self.env_model_kwargs['action_shape'][0]
if (np.random.uniform(0, 1) < expl_amount):
index = torch.randint(0, action_dim, action.shape[:(- 1)], device=action.device)
action = torch.zeros_like(action)
action[(..., index)] = 1
return action
raise NotImplementedError(self.expl_type)
|
def initialize_replay_buffer(self, examples, batch_spec, async_=False):
'Initializes a sequence replay buffer with single frame observations'
example_to_buffer = SamplesToBuffer(observation=examples['observation'], action=examples['action'], reward=examples['reward'], done=examples['done'])
replay_kwargs = dict(example=example_to_buffer, size=self.replay_size, B=batch_spec.B, rnn_state_interval=0, discount=self.discount, n_step_return=self.n_step_return)
replay_buffer = UniformSequenceReplayBuffer(**replay_kwargs)
return replay_buffer
|
def samples_to_buffer(samples):
'Defines how to add data from sampler into the replay buffer. Called\n in optimize_agent() if samples are provided to that method. In\n asynchronous mode, will be called in the memory_copier process.'
return SamplesToBuffer(observation=samples.env.observation, action=samples.agent.action, reward=samples.env.reward, done=samples.env.done)
|
class ActionRepeat(EnvWrapper):
def __init__(self, env, amount=1):
super().__init__(env)
self.amount = amount
def step(self, action):
done = False
total_reward = 0
current_step = 0
while ((current_step < self.amount) and (not done)):
(obs, reward, done, info) = self.env.step(action)
total_reward += reward
current_step += 1
return (obs, total_reward, done, info)
|
class DeepMindControl(Env):
def __init__(self, name, size=(64, 64), camera=None):
(domain, task) = name.split('_', 1)
if (domain == 'cup'):
domain = 'ball_in_cup'
if isinstance(domain, str):
self._env = suite.load(domain, task)
else:
assert (task is None)
self._env = domain()
self._size = size
if (camera is None):
camera = dict(quadruped=2).get(domain, 0)
self._camera = camera
@property
def observation_space(self):
return IntBox(low=0, high=255, shape=((3,) + self._size), dtype='uint8')
@property
def action_space(self):
spec = self._env.action_spec()
return FloatBox(low=spec.minimum, high=spec.maximum)
def step(self, action):
time_step = self._env.step(action)
_ = dict(time_step.observation)
obs = self.render()
reward = (time_step.reward or 0)
done = time_step.last()
info = EnvInfo(np.array(time_step.discount, np.float32), None, done)
return EnvStep(obs, reward, done, info)
def reset(self):
time_step = self._env.reset()
_ = dict(time_step.observation)
obs = self.render()
return obs
def render(self, *args, **kwargs):
if (kwargs.get('mode', 'rgb_array') != 'rgb_array'):
raise ValueError("Only render mode 'rgb_array' is supported.")
return self._env.physics.render(*self._size, camera_id=self._camera).transpose(2, 0, 1).copy()
@property
def horizon(self):
raise NotImplementedError
|
class AtariTrajInfo(TrajInfo):
'TrajInfo class for use with Atari Env, to store raw game score separate\n from clipped reward signal.'
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.GameScore = 0
def step(self, observation, action, reward, done, agent_info, env_info):
super().step(observation, action, reward, done, agent_info, env_info)
self.GameScore += getattr(env_info, 'game_score', 0)
|
class AtariEnv(Env):
"An efficient implementation of the classic Atari RL envrionment using the\n Arcade Learning Environment (ALE).\n\n Output `env_info` includes:\n * `game_score`: raw game score, separate from reward clipping.\n * `traj_done`: special signal which signals game-over or timeout, so that sampler doesn't reset the environment when ``done==True`` but ``traj_done==False``, which can happen when ``episodic_lives==True``.\n\n Always performs 2-frame max to avoid flickering (this is pretty fast).\n\n Screen size downsampling is done by cropping two rows and then\n downsampling by 2x using `cv2`: (210, 160) --> (80, 104). Downsampling by\n 2x is much faster than the old scheme to (84, 84), and the (80, 104) shape\n is fairly convenient for convolution filter parameters which don't cut off\n edges.\n\n The action space is an `IntBox` for the number of actions. The observation\n space is an `IntBox` with ``dtype=uint8`` to save memory; conversion to float\n should happen inside the agent's model's ``forward()`` method.\n\n (See the file for implementation details.)\n\n\n Args:\n game (str): game name\n frame_skip (int): frames per step (>=1)\n num_img_obs (int): number of frames in observation (>=1)\n clip_reward (bool): if ``True``, clip reward to np.sign(reward)\n episodic_lives (bool): if ``True``, output ``done=True`` but ``env_info[traj_done]=False`` when a life is lost\n max_start_noops (int): upper limit for random number of noop actions after reset\n repeat_action_probability (0-1): probability for sticky actions\n horizon (int): max number of steps before timeout / ``traj_done=True``\n "
def __init__(self, game='pong', frame_shape=(80, 104), frame_skip=4, num_img_obs=4, clip_reward=True, episodic_lives=True, max_start_noops=30, repeat_action_probability=0.0, horizon=27000, seed=0):
save__init__args(locals(), underscore=True)
game_path = atari_py.get_game_path(game)
if (not os.path.exists(game_path)):
raise IOError('You asked for game {} but path {} does not exist'.format(game, game_path))
self.ale = atari_py.ALEInterface()
self.ale.setFloat(b'repeat_action_probability', repeat_action_probability)
self.ale.loadROM(game_path)
self._action_set = self.ale.getMinimalActionSet()
self._action_space = IntBox(low=0, high=len(self._action_set))
self._frame_shape = frame_shape
obs_shape = (num_img_obs, frame_shape[1], frame_shape[0])
self._observation_space = IntBox(low=0, high=255, shape=obs_shape, dtype='uint8')
self._max_frame = self.ale.getScreenGrayscale()
self._raw_frame_1 = self._max_frame.copy()
self._raw_frame_2 = self._max_frame.copy()
self._obs = np.zeros(shape=obs_shape, dtype='uint8')
self.random = np.random.RandomState(seed)
self._has_fire = ('FIRE' in self.get_action_meanings())
self._has_up = ('UP' in self.get_action_meanings())
self._horizon = int(horizon)
self.reset()
def reset(self):
'Performs hard reset of ALE game.'
self.ale.reset_game()
self._reset_obs()
self._life_reset()
for _ in range(self.random.randint(0, (self._max_start_noops + 1))):
self.ale.act(0)
self._update_obs()
self._step_counter = 0
return self.get_obs()
def step(self, action):
a = self._action_set[action]
game_score = np.array(0.0, dtype='float32')
for _ in range((self._frame_skip - 1)):
game_score += self.ale.act(a)
self._get_screen(1)
game_score += self.ale.act(a)
lost_life = self._check_life()
if (lost_life and self._episodic_lives):
self._reset_obs()
self._update_obs()
reward = (np.sign(game_score) if self._clip_reward else game_score)
game_over = (self.ale.game_over() or (self._step_counter >= self.horizon))
done = (game_over or (self._episodic_lives and lost_life))
info = EnvInfo(game_score=game_score, traj_done=game_over)
self._step_counter += 1
return EnvStep(self.get_obs(), reward, done, info)
def render(self, wait=10, show_full_obs=False):
'Shows game screen via cv2, with option to show all frames in observation.'
img = self.get_obs()
if show_full_obs:
shape = img.shape
img = img.reshape((shape[0] * shape[1]), shape[2])
else:
img = img[(- 1)]
cv2.imshow(self._game, img)
cv2.waitKey(wait)
def get_obs(self):
return self._obs.copy()
def seed(self, seed):
self.random = np.random.RandomState(seed)
def _get_screen(self, frame=1):
frame = (self._raw_frame_1 if (frame == 1) else self._raw_frame_2)
self.ale.getScreenGrayscale(frame)
def _update_obs(self):
'Max of last two frames; crop two rows; downsample by 2x.'
self._get_screen(2)
np.maximum(self._raw_frame_1, self._raw_frame_2, self._max_frame)
img = cv2.resize(self._max_frame[1:(- 1)], self._frame_shape, cv2.INTER_NEAREST)
self._obs = np.concatenate([self._obs[1:], img[np.newaxis]])
def _reset_obs(self):
self._obs[:] = 0
self._max_frame[:] = 0
self._raw_frame_1[:] = 0
self._raw_frame_2[:] = 0
def _check_life(self):
lives = self.ale.lives()
lost_life = ((lives < self._lives) and (lives > 0))
if lost_life:
self._life_reset()
return lost_life
def _life_reset(self):
self.ale.act(0)
if self._has_fire:
self.ale.act(1)
if self._has_up:
self.ale.act(2)
self._lives = self.ale.lives()
@property
def game(self):
return self._game
@property
def frame_skip(self):
return self._frame_skip
@property
def num_img_obs(self):
return self._num_img_obs
@property
def clip_reward(self):
return self._clip_reward
@property
def max_start_noops(self):
return self._max_start_noops
@property
def episodic_lives(self):
return self._episodic_lives
@property
def repeat_action_probability(self):
return self._repeat_action_probability
@property
def horizon(self):
return self._horizon
def get_action_meanings(self):
return [ACTION_MEANING[i] for i in self._action_set]
|
class NormalizeActions(EnvWrapper):
def __init__(self, env):
super().__init__(env)
self._mask = np.logical_and(np.isfinite(env.action_space.low), np.isfinite(env.action_space.high))
self._low = np.where(self._mask, env.action_space.low, (- 1))
self._high = np.where(self._mask, env.action_space.high, 1)
@property
def action_space(self):
low = np.where(self._mask, (- np.ones_like(self._low)), self._low)
high = np.where(self._mask, np.ones_like(self._low), self._high)
return FloatBox(low, high, dtype=np.float32)
def step(self, action):
original = ((((action + 1) / 2) * (self._high - self._low)) + self._low)
original = np.where(self._mask, original, action)
return self.env.step(original)
|
class OneHotAction(EnvWrapper):
def __init__(self, env):
assert (isinstance(env.action_space, gym.spaces.Discrete) or isinstance(env.action_space, IntBox))
super().__init__(env)
self._dtype = np.float32
@property
def action_space(self):
shape = (self.env.action_space.n,)
space = FloatBox(low=0, high=1, shape=shape, dtype=self._dtype)
space.sample = self._sample_action
return space
def step(self, action):
index = np.argmax(action).astype(int)
reference = np.zeros_like(action)
reference[index] = 1
if (not np.allclose(reference, action, atol=1000000.0)):
raise ValueError(f'''Invalid one-hot action:
{action}''')
return self.env.step(index)
def reset(self):
return self.env.reset()
def _sample_action(self):
actions = self.env.action_space.n
index = self.random.randint(0, actions)
reference = np.zeros(actions, dtype=self._dtype)
reference[index] = 1.0
return reference
|
class TimeLimit(EnvWrapper):
def __init__(self, env, duration):
super().__init__(env)
self._duration = duration
self._step = None
def step(self, action):
assert (self._step is not None), 'Must reset environment.'
(obs, reward, done, info) = self.env.step(action)
self._step += 1
if (self._step >= self._duration):
if isinstance(info, EnvInfo):
info = EnvInfo(info.discount, info.game_score, True)
self._step = None
return EnvStep(obs, reward, done, info)
def reset(self):
self._step = 0
return self.env.reset()
|
class EnvWrapper(Env):
def __init__(self, env: Env):
self.env = env
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError("attempted to get missing private attribute '{}'".format(name))
return getattr(self.env, name)
def step(self, action):
return self.env.step(action)
def reset(self):
return self.env.reset()
@property
def action_space(self):
return self.env.action_space
@property
def observation_space(self):
return self.env.observation_space
@property
def horizon(self):
return self.env.horizon
def close(self):
self.env.close()
|
def make_wapper(base_class, wrapper_classes: Sequence=None, wrapper_kwargs: Sequence[Dict]=None):
'\n Creates the correct factory method with wrapper support.\n This would get passed as the EnvCls argument in the sampler.\n\n Examples:\n The following code would make a factory method for atari with action repeat 2\n ``factory_method = make(AtariEnv, (ActionRepeat, ), (dict(amount=2),))``\n\n :param base_class: the base environment class (eg. AtariEnv)\n :param wrapper_classes: list of wrapper classes in order inner-first, outer-last\n :param wrapper_kwargs: list of kwargs dictionaries passed to the wrapper classes\n :return: factory method\n '
if (wrapper_classes is None):
def make_env(**env_kwargs):
':return only the base environment instance'
return base_class(**env_kwargs)
return make_env
else:
assert (len(wrapper_classes) == len(wrapper_kwargs))
def make_env(**env_kwargs):
':return the wrapped environment instance'
env = base_class(**env_kwargs)
for (i, wrapper_cls) in enumerate(wrapper_classes):
w_kwargs = wrapper_kwargs[i]
if (w_kwargs is None):
w_kwargs = dict()
env = wrapper_cls(env, **w_kwargs)
return env
return make_env
|
def build_and_train(slot_affinity_code, log_dir, run_ID, config_key):
affinity = affinity_from_code(slot_affinity_code)
config = configs[config_key]
variant = load_variant(log_dir)
config = update_config(config, variant)
sampler = CpuSampler(EnvCls=AtariEnv, env_kwargs=config['env'], CollectorCls=CpuWaitResetCollector, TrajInfoCls=AtariTrajInfo, **config['sampler'])
algo = Dreamer(optim_kwargs=config['optim'], **config['algo'])
agent = AtariDreamerAgent(model_kwargs=config['model'], **config['agent'])
runner = MinibatchRl(algo=algo, agent=agent, sampler=sampler, affinity=affinity, **config['runner'])
name = (config['env']['game'] + str(config['sampler']['batch_T']))
with logger_context(log_dir, run_ID, name, config):
runner.train()
|
def build_and_train(slot_affinity_code, log_dir, run_ID, config_key):
affinity = affinity_from_code(slot_affinity_code)
config = configs[config_key]
variant = load_variant(log_dir)
config = update_config(config, variant)
sampler = GpuSampler(EnvCls=AtariEnv, env_kwargs=config['env'], CollectorCls=GpuWaitResetCollector, TrajInfoCls=AtariTrajInfo, **config['sampler'])
algo = Dreamer(optim_kwargs=config['optim'], **config['algo'])
agent = AtariDreamerAgent(model_kwargs=config['model'], **config['agent'])
runner = MinibatchRl(algo=algo, agent=agent, sampler=sampler, affinity=affinity, **config['runner'])
name = config['env']['game']
with logger_context(log_dir, run_ID, name, config):
runner.train()
|
class ActionDecoder(nn.Module):
def __init__(self, action_size, feature_size, hidden_size, layers, dist='tanh_normal', activation=nn.ELU, min_std=0.0001, init_std=5, mean_scale=5):
super().__init__()
self.action_size = action_size
self.feature_size = feature_size
self.hidden_size = hidden_size
self.layers = layers
self.dist = dist
self.activation = activation
self.min_std = min_std
self.init_std = init_std
self.mean_scale = mean_scale
self.feedforward_model = self.build_model()
self.raw_init_std = np.log((np.exp(self.init_std) - 1))
def build_model(self):
model = [nn.Linear(self.feature_size, self.hidden_size)]
model += [self.activation()]
for i in range(1, self.layers):
model += [nn.Linear(self.hidden_size, self.hidden_size)]
model += [self.activation()]
if (self.dist == 'tanh_normal'):
model += [nn.Linear(self.hidden_size, (self.action_size * 2))]
elif ((self.dist == 'one_hot') or (self.dist == 'relaxed_one_hot')):
model += [nn.Linear(self.hidden_size, self.action_size)]
else:
raise NotImplementedError(f'{self.dist} not implemented')
return nn.Sequential(*model)
def forward(self, state_features):
x = self.feedforward_model(state_features)
dist = None
if (self.dist == 'tanh_normal'):
(mean, std) = torch.chunk(x, 2, (- 1))
mean = (self.mean_scale * torch.tanh((mean / self.mean_scale)))
std = (F.softplus((std + self.raw_init_std)) + self.min_std)
dist = torch.distributions.Normal(mean, std)
dist = torch.distributions.TransformedDistribution(dist, TanhBijector())
dist = torch.distributions.Independent(dist, 1)
dist = SampleDist(dist)
elif (self.dist == 'one_hot'):
dist = torch.distributions.OneHotCategorical(logits=x)
elif (self.dist == 'relaxed_one_hot'):
dist = torch.distributions.RelaxedOneHotCategorical(0.1, logits=x)
return dist
|
class AgentModel(nn.Module):
def __init__(self, action_shape, stochastic_size=30, deterministic_size=200, hidden_size=200, image_shape=(3, 64, 64), action_hidden_size=200, action_layers=3, action_dist='one_hot', reward_shape=(1,), reward_layers=3, reward_hidden=300, value_shape=(1,), value_layers=3, value_hidden=200, dtype=torch.float, use_pcont=False, pcont_layers=3, pcont_hidden=200, **kwargs):
super().__init__()
self.observation_encoder = ObservationEncoder(shape=image_shape)
encoder_embed_size = self.observation_encoder.embed_size
decoder_embed_size = (stochastic_size + deterministic_size)
self.observation_decoder = ObservationDecoder(embed_size=decoder_embed_size, shape=image_shape)
self.action_shape = action_shape
output_size = np.prod(action_shape)
self.transition = RSSMTransition(output_size, stochastic_size, deterministic_size, hidden_size)
self.representation = RSSMRepresentation(self.transition, encoder_embed_size, output_size, stochastic_size, deterministic_size, hidden_size)
self.rollout = RSSMRollout(self.representation, self.transition)
feature_size = (stochastic_size + deterministic_size)
self.action_size = output_size
self.action_dist = action_dist
self.action_decoder = ActionDecoder(output_size, feature_size, action_hidden_size, action_layers, action_dist)
self.reward_model = DenseModel(feature_size, reward_shape, reward_layers, reward_hidden)
self.value_model = DenseModel(feature_size, value_shape, value_layers, value_hidden)
self.dtype = dtype
self.stochastic_size = stochastic_size
self.deterministic_size = deterministic_size
if use_pcont:
self.pcont = DenseModel(feature_size, (1,), pcont_layers, pcont_hidden, dist='binary')
def forward(self, observation: torch.Tensor, prev_action: torch.Tensor=None, prev_state: RSSMState=None):
state = self.get_state_representation(observation, prev_action, prev_state)
(action, action_dist) = self.policy(state)
value = self.value_model(get_feat(state))
reward = self.reward_model(get_feat(state))
return (action, action_dist, value, reward, state)
def policy(self, state: RSSMState):
feat = get_feat(state)
action_dist = self.action_decoder(feat)
if (self.action_dist == 'tanh_normal'):
if self.training:
action = action_dist.rsample()
else:
action = action_dist.mode()
elif (self.action_dist == 'one_hot'):
action = action_dist.sample()
action = ((action + action_dist.probs) - action_dist.probs.detach())
elif (self.action_dist == 'relaxed_one_hot'):
action = action_dist.rsample()
else:
action = action_dist.sample()
return (action, action_dist)
def get_state_representation(self, observation: torch.Tensor, prev_action: torch.Tensor=None, prev_state: RSSMState=None):
'\n\n :param observation: size(batch, channels, width, height)\n :param prev_action: size(batch, action_size)\n :param prev_state: RSSMState: size(batch, state_size)\n :return: RSSMState\n '
obs_embed = self.observation_encoder(observation)
if (prev_action is None):
prev_action = torch.zeros(observation.size(0), self.action_size, device=observation.device, dtype=observation.dtype)
if (prev_state is None):
prev_state = self.representation.initial_state(prev_action.size(0), device=prev_action.device, dtype=prev_action.dtype)
(_, state) = self.representation(obs_embed, prev_action, prev_state)
return state
def get_state_transition(self, prev_action: torch.Tensor, prev_state: RSSMState):
'\n\n :param prev_action: size(batch, action_size)\n :param prev_state: RSSMState: size(batch, state_size)\n :return: RSSMState\n '
state = self.transition(prev_action, prev_state)
return state
def forward(self, observation: torch.Tensor, prev_action: torch.Tensor=None, prev_state: RSSMState=None):
return_spec = ModelReturnSpec(None, None)
raise NotImplementedError()
|
class AtariDreamerModel(AgentModel):
def forward(self, observation: torch.Tensor, prev_action: torch.Tensor=None, prev_state: RSSMState=None):
(lead_dim, T, B, img_shape) = infer_leading_dims(observation, 3)
observation = ((observation.reshape((T * B), *img_shape).type(self.dtype) / 255.0) - 0.5)
prev_action = prev_action.reshape((T * B), (- 1)).to(self.dtype)
if (prev_state is None):
prev_state = self.representation.initial_state(prev_action.size(0), device=prev_action.device, dtype=self.dtype)
state = self.get_state_representation(observation, prev_action, prev_state)
(action, action_dist) = self.policy(state)
return_spec = ModelReturnSpec(action, state)
return_spec = buffer_func(return_spec, restore_leading_dims, lead_dim, T, B)
return return_spec
|
class DenseModel(nn.Module):
def __init__(self, feature_size: int, output_shape: tuple, layers: int, hidden_size: int, dist='normal', activation=nn.ELU):
super().__init__()
self._output_shape = output_shape
self._layers = layers
self._hidden_size = hidden_size
self._dist = dist
self.activation = activation
self._feature_size = feature_size
self.model = self.build_model()
def build_model(self):
model = [nn.Linear(self._feature_size, self._hidden_size)]
model += [self.activation()]
for i in range((self._layers - 1)):
model += [nn.Linear(self._hidden_size, self._hidden_size)]
model += [self.activation()]
model += [nn.Linear(self._hidden_size, int(np.prod(self._output_shape)))]
return nn.Sequential(*model)
def forward(self, features):
dist_inputs = self.model(features)
reshaped_inputs = torch.reshape(dist_inputs, (features.shape[:(- 1)] + self._output_shape))
if (self._dist == 'normal'):
return td.independent.Independent(td.Normal(reshaped_inputs, 1), len(self._output_shape))
if (self._dist == 'binary'):
return td.independent.Independent(td.Bernoulli(logits=reshaped_inputs), len(self._output_shape))
raise NotImplementedError(self._dist)
|
class SampleDist():
def __init__(self, dist: torch.distributions.Distribution, samples=100):
self._dist = dist
self._samples = samples
@property
def name(self):
return 'SampleDist'
def __getattr__(self, name):
return getattr(self._dist, name)
def mean(self):
dist = self._dist.expand((self._samples, *self._dist.batch_shape))
sample = dist.rsample()
return torch.mean(sample, 0)
def mode(self):
dist = self._dist.expand((self._samples, *self._dist.batch_shape))
sample = dist.rsample()
logprob = dist.log_prob(sample)
batch_size = sample.size(1)
feature_size = sample.size(2)
indices = torch.argmax(logprob, dim=0).reshape(1, batch_size, 1).expand(1, batch_size, feature_size)
return torch.gather(sample, 0, indices).squeeze(0)
def entropy(self):
dist = self._dist.expand((self._samples, *self._dist.batch_shape))
sample = dist.rsample()
logprob = dist.log_prob(sample)
return (- torch.mean(logprob, 0))
def sample(self):
return self._dist.sample()
|
class ObservationEncoder(nn.Module):
def __init__(self, depth=32, stride=2, shape=(3, 64, 64), activation=nn.ReLU):
super().__init__()
self.convolutions = nn.Sequential(nn.Conv2d(shape[0], (1 * depth), 4, stride), activation(), nn.Conv2d((1 * depth), (2 * depth), 4, stride), activation(), nn.Conv2d((2 * depth), (4 * depth), 4, stride), activation(), nn.Conv2d((4 * depth), (8 * depth), 4, stride), activation())
self.shape = shape
self.stride = stride
self.depth = depth
def forward(self, obs):
batch_shape = obs.shape[:(- 3)]
img_shape = obs.shape[(- 3):]
embed = self.convolutions(obs.reshape((- 1), *img_shape))
embed = torch.reshape(embed, (*batch_shape, (- 1)))
return embed
@property
def embed_size(self):
conv1_shape = conv_out_shape(self.shape[1:], 0, 4, self.stride)
conv2_shape = conv_out_shape(conv1_shape, 0, 4, self.stride)
conv3_shape = conv_out_shape(conv2_shape, 0, 4, self.stride)
conv4_shape = conv_out_shape(conv3_shape, 0, 4, self.stride)
embed_size = ((8 * self.depth) * np.prod(conv4_shape).item())
return embed_size
|
class ObservationDecoder(nn.Module):
def __init__(self, depth=32, stride=2, activation=nn.ReLU, embed_size=1024, shape=(3, 64, 64)):
super().__init__()
self.depth = depth
self.shape = shape
(c, h, w) = shape
conv1_kernel_size = 6
conv2_kernel_size = 6
conv3_kernel_size = 5
conv4_kernel_size = 5
padding = 0
conv1_shape = conv_out_shape((h, w), padding, conv1_kernel_size, stride)
conv1_pad = output_padding_shape((h, w), conv1_shape, padding, conv1_kernel_size, stride)
conv2_shape = conv_out_shape(conv1_shape, padding, conv2_kernel_size, stride)
conv2_pad = output_padding_shape(conv1_shape, conv2_shape, padding, conv2_kernel_size, stride)
conv3_shape = conv_out_shape(conv2_shape, padding, conv3_kernel_size, stride)
conv3_pad = output_padding_shape(conv2_shape, conv3_shape, padding, conv3_kernel_size, stride)
conv4_shape = conv_out_shape(conv3_shape, padding, conv4_kernel_size, stride)
conv4_pad = output_padding_shape(conv3_shape, conv4_shape, padding, conv4_kernel_size, stride)
self.conv_shape = ((32 * depth), *conv4_shape)
self.linear = nn.Linear(embed_size, ((32 * depth) * np.prod(conv4_shape).item()))
self.decoder = nn.Sequential(nn.ConvTranspose2d((32 * depth), (4 * depth), conv4_kernel_size, stride, output_padding=conv4_pad), activation(), nn.ConvTranspose2d((4 * depth), (2 * depth), conv3_kernel_size, stride, output_padding=conv3_pad), activation(), nn.ConvTranspose2d((2 * depth), (1 * depth), conv2_kernel_size, stride, output_padding=conv2_pad), activation(), nn.ConvTranspose2d((1 * depth), shape[0], conv1_kernel_size, stride, output_padding=conv1_pad))
def forward(self, x):
'\n :param x: size(*batch_shape, embed_size)\n :return: obs_dist = size(*batch_shape, *self.shape)\n '
batch_shape = x.shape[:(- 1)]
embed_size = x.shape[(- 1)]
squeezed_size = np.prod(batch_shape).item()
x = x.reshape(squeezed_size, embed_size)
x = self.linear(x)
x = torch.reshape(x, (squeezed_size, *self.conv_shape))
x = self.decoder(x)
mean = torch.reshape(x, (*batch_shape, *self.shape))
obs_dist = td.Independent(td.Normal(mean, 1), len(self.shape))
return obs_dist
|
def conv_out(h_in, padding, kernel_size, stride):
return int((((((h_in + (2.0 * padding)) - (kernel_size - 1.0)) - 1.0) / stride) + 1.0))
|
def output_padding(h_in, conv_out, padding, kernel_size, stride):
return ((((h_in - ((conv_out - 1) * stride)) + (2 * padding)) - (kernel_size - 1)) - 1)
|
def conv_out_shape(h_in, padding, kernel_size, stride):
return tuple((conv_out(x, padding, kernel_size, stride) for x in h_in))
|
def output_padding_shape(h_in, conv_out, padding, kernel_size, stride):
return tuple((output_padding(h_in[i], conv_out[i], padding, kernel_size, stride) for i in range(len(h_in))))
|
def stack_states(rssm_states: list, dim):
return RSSMState(torch.stack([state.mean for state in rssm_states], dim=dim), torch.stack([state.std for state in rssm_states], dim=dim), torch.stack([state.stoch for state in rssm_states], dim=dim), torch.stack([state.deter for state in rssm_states], dim=dim))
|
def get_feat(rssm_state: RSSMState):
return torch.cat((rssm_state.stoch, rssm_state.deter), dim=(- 1))
|
def get_dist(rssm_state: RSSMState):
return td.independent.Independent(td.Normal(rssm_state.mean, rssm_state.std), 1)
|
class TransitionBase(nn.Module):
def __init__(self):
super().__init__()
def forward(self, prev_action, prev_state):
':return: next state'
raise NotImplementedError
|
class RepresentationBase(nn.Module):
def __init__(self):
super().__init__()
def forward(self, obs_embed, prev_action, prev_state):
':return: next state'
raise NotImplementedError
|
class RollOutModule(nn.Module):
def __init__(self):
super().__init__()
def forward(self, steps, obs_embed, prev_action, prev_state):
raise NotImplementedError
|
class RSSMTransition(TransitionBase):
def __init__(self, action_size, stochastic_size=30, deterministic_size=200, hidden_size=200, activation=nn.ELU, distribution=td.Normal):
super().__init__()
self._action_size = action_size
self._stoch_size = stochastic_size
self._deter_size = deterministic_size
self._hidden_size = hidden_size
self._activation = activation
self._cell = nn.GRUCell(hidden_size, deterministic_size)
self._rnn_input_model = self._build_rnn_input_model()
self._stochastic_prior_model = self._build_stochastic_model()
self._dist = distribution
def _build_rnn_input_model(self):
rnn_input_model = [nn.Linear((self._action_size + self._stoch_size), self._hidden_size)]
rnn_input_model += [self._activation()]
return nn.Sequential(*rnn_input_model)
def _build_stochastic_model(self):
stochastic_model = [nn.Linear(self._hidden_size, self._hidden_size)]
stochastic_model += [self._activation()]
stochastic_model += [nn.Linear(self._hidden_size, (2 * self._stoch_size))]
return nn.Sequential(*stochastic_model)
def initial_state(self, batch_size, **kwargs):
return RSSMState(torch.zeros(batch_size, self._stoch_size, **kwargs), torch.zeros(batch_size, self._stoch_size, **kwargs), torch.zeros(batch_size, self._stoch_size, **kwargs), torch.zeros(batch_size, self._deter_size, **kwargs))
def forward(self, prev_action: torch.Tensor, prev_state: RSSMState):
rnn_input = self._rnn_input_model(torch.cat([prev_action, prev_state.stoch], dim=(- 1)))
deter_state = self._cell(rnn_input, prev_state.deter)
(mean, std) = torch.chunk(self._stochastic_prior_model(deter_state), 2, dim=(- 1))
std = (tf.softplus(std) + 0.1)
dist = self._dist(mean, std)
stoch_state = dist.rsample()
return RSSMState(mean, std, stoch_state, deter_state)
|
class RSSMRepresentation(RepresentationBase):
def __init__(self, transition_model: RSSMTransition, obs_embed_size, action_size, stochastic_size=30, deterministic_size=200, hidden_size=200, activation=nn.ELU, distribution=td.Normal):
super().__init__()
self._transition_model = transition_model
self._obs_embed_size = obs_embed_size
self._action_size = action_size
self._stoch_size = stochastic_size
self._deter_size = deterministic_size
self._hidden_size = hidden_size
self._activation = activation
self._dist = distribution
self._stochastic_posterior_model = self._build_stochastic_model()
def _build_stochastic_model(self):
stochastic_model = [nn.Linear((self._deter_size + self._obs_embed_size), self._hidden_size)]
stochastic_model += [self._activation()]
stochastic_model += [nn.Linear(self._hidden_size, (2 * self._stoch_size))]
return nn.Sequential(*stochastic_model)
def initial_state(self, batch_size, **kwargs):
return RSSMState(torch.zeros(batch_size, self._stoch_size, **kwargs), torch.zeros(batch_size, self._stoch_size, **kwargs), torch.zeros(batch_size, self._stoch_size, **kwargs), torch.zeros(batch_size, self._deter_size, **kwargs))
def forward(self, obs_embed: torch.Tensor, prev_action: torch.Tensor, prev_state: RSSMState):
prior_state = self._transition_model(prev_action, prev_state)
x = torch.cat([prior_state.deter, obs_embed], (- 1))
(mean, std) = torch.chunk(self._stochastic_posterior_model(x), 2, dim=(- 1))
std = (tf.softplus(std) + 0.1)
dist = self._dist(mean, std)
stoch_state = dist.rsample()
posterior_state = RSSMState(mean, std, stoch_state, prior_state.deter)
return (prior_state, posterior_state)
|
class RSSMRollout(RollOutModule):
def __init__(self, representation_model: RSSMRepresentation, transition_model: RSSMTransition):
super().__init__()
self.representation_model = representation_model
self.transition_model = transition_model
def forward(self, steps: int, obs_embed: torch.Tensor, action: torch.Tensor, prev_state: RSSMState):
return self.rollout_representation(steps, obs_embed, action, prev_state)
def rollout_representation(self, steps: int, obs_embed: torch.Tensor, action: torch.Tensor, prev_state: RSSMState):
'\n Roll out the model with actions and observations from data.\n :param steps: number of steps to roll out\n :param obs_embed: size(time_steps, batch_size, embedding_size)\n :param action: size(time_steps, batch_size, action_size)\n :param prev_state: RSSM state, size(batch_size, state_size)\n :return: prior, posterior states. size(time_steps, batch_size, state_size)\n '
priors = []
posteriors = []
for t in range(steps):
(prior_state, posterior_state) = self.representation_model(obs_embed[t], action[t], prev_state)
priors.append(prior_state)
posteriors.append(posterior_state)
prev_state = posterior_state
prior = stack_states(priors, dim=0)
post = stack_states(posteriors, dim=0)
return (prior, post)
def rollout_transition(self, steps: int, action: torch.Tensor, prev_state: RSSMState):
'\n Roll out the model with actions from data.\n :param steps: number of steps to roll out\n :param action: size(time_steps, batch_size, action_size)\n :param prev_state: RSSM state, size(batch_size, state_size)\n :return: prior states. size(time_steps, batch_size, state_size)\n '
priors = []
state = prev_state
for t in range(steps):
state = self.transition_model(action[t], state)
priors.append(state)
return stack_states(priors, dim=0)
def rollout_policy(self, steps: int, policy, prev_state: RSSMState):
'\n Roll out the model with a policy function.\n :param steps: number of steps to roll out\n :param policy: RSSMState -> action\n :param prev_state: RSSM state, size(batch_size, state_size)\n :return: next states size(time_steps, batch_size, state_size),\n actions size(time_steps, batch_size, action_size)\n '
state = prev_state
next_states = []
actions = []
state = buffer_method(state, 'detach')
for t in range(steps):
(action, _) = policy(buffer_method(state, 'detach'))
state = self.transition_model(action, state)
next_states.append(state)
actions.append(action)
next_states = stack_states(next_states, dim=0)
actions = torch.stack(actions, dim=0)
return (next_states, actions)
|
def get_log_dir(experiment_name):
yyyymmdd = datetime.datetime.today().strftime('%Y%m%d')
log_dir = osp.join(LOG_DIR, 'local', yyyymmdd, experiment_name)
return log_dir
|
def log_exps_tree(exp_dir, log_dirs, runs_per_setting):
os.makedirs(exp_dir, exist_ok=True)
with open(osp.join(exp_dir, 'experiments_tree.txt'), 'w') as f:
f.write(f'''Experiment manager process ID: {os.getpid()}.
''')
f.write(f'''Number of settings (experiments) to run: {len(log_dirs)} ({(runs_per_setting * len(log_dirs))}).
''')
[f.write((log_dir + '\n')) for log_dir in log_dirs]
|
def log_num_launched(exp_dir, n, total):
with open(osp.join(exp_dir, 'num_launched.txt'), 'w') as f:
f.write(f'''Experiments launched so far: {n} out of {total}.
''')
|
def launch_experiment(script, run_slot, affinity_code, log_dir, variant, run_ID, args):
'Launches one learning run using ``subprocess.Popen()`` to call the\n python script. Calls the script as:\n ``python {script} {slot_affinity_code} {log_dir} {run_ID} {*args}``\n If ``affinity_code["all_cpus"]`` is provided, then the call is prepended\n with ``tasket -c ..`` and the listed cpus (this is the most sure way to\n keep the run limited to these CPU cores). Also saves the `variant` file.\n Returns the process handle, which can be monitored.\n '
slot_affinity_code = prepend_run_slot(run_slot, affinity_code)
affinity = affinity_from_code(slot_affinity_code)
call_list = list()
if (isinstance(affinity, dict) and affinity.get('all_cpus', False)):
cpus = ','.join((str(c) for c in affinity['all_cpus']))
elif (isinstance(affinity, list) and affinity[0].get('all_cpus', False)):
cpus = ','.join((str(c) for aff in affinity for c in aff['all_cpus']))
else:
cpus = ()
if cpus:
call_list += ['taskset', '-c', cpus]
call_list += ['python', script, slot_affinity_code, log_dir, str(run_ID)]
call_list += [str(a) for a in args]
save_variant(variant, log_dir)
print('\ncall string:\n', ' '.join(call_list))
p = subprocess.Popen(call_list)
return p
|
def run_experiments(script, affinity_code, experiment_title, runs_per_setting, variants, log_dirs, common_args=None, runs_args=None, data_dir=None):
"Call in a script to run a set of experiments locally on a machine. Uses\n the ``launch_experiment()`` function for each individual run, which is a\n call to the ``script`` file. The number of experiments to run at the same\n time is determined from the ``affinity_code``, which expresses the hardware\n resources of the machine and how much resource each run gets (e.g. 4 GPU\n machine, 2 GPUs per run). Experiments are queued and run in sequence, with\n the intention to avoid hardware overlap. Inputs ``variants`` and ``log_dirs``\n should be lists of the same length, containing each experiment configuration\n and where to save its log files (which have the same name, so can't exist\n in the same folder).\n\n Hint:\n To monitor progress, view the `num_launched.txt` file and `experiments_tree.txt`\n file in the experiment root directory, and also check the length of each\n `progress.csv` file, e.g. ``wc -l experiment-directory/.../run_*/progress.csv``.\n "
n_run_slots = get_n_run_slots(affinity_code)
exp_dir = get_log_dir(experiment_title)
procs = ([None] * n_run_slots)
common_args = (() if (common_args is None) else common_args)
assert (len(variants) == len(log_dirs))
if (runs_args is None):
runs_args = ([()] * len(variants))
assert (len(runs_args) == len(variants))
log_exps_tree(exp_dir, log_dirs, runs_per_setting)
(num_launched, total) = (0, (runs_per_setting * len(variants)))
for run_ID in range(runs_per_setting):
for (variant, log_dir, run_args) in zip(variants, log_dirs, runs_args):
launched = False
log_dir = osp.join(exp_dir, log_dir)
os.makedirs(log_dir, exist_ok=True)
while (not launched):
for (run_slot, p) in enumerate(procs):
if ((p is None) or (p.poll() is not None)):
procs[run_slot] = launch_experiment(script=script, run_slot=run_slot, affinity_code=affinity_code, log_dir=log_dir, variant=variant, run_ID=run_ID, args=(common_args + run_args))
launched = True
num_launched += 1
log_num_launched(exp_dir, num_launched, total)
break
if (not launched):
time.sleep(10)
for p in procs:
if (p is not None):
p.wait()
|
def video_summary(tag, video, step=None, fps=20):
writer: SummaryWriter = logger.get_tf_summary_writer()
writer.add_video(tag=tag, vid_tensor=video, global_step=step, fps=fps)
|
def get_parameters(modules: Iterable[Module]):
'\n Given a list of torch modules, returns a list of their parameters.\n :param modules: iterable of modules\n :returns: a list of parameters\n '
model_parameters = []
for module in modules:
model_parameters += list(module.parameters())
return model_parameters
|
class FreezeParameters():
def __init__(self, modules: Iterable[Module]):
"\n Context manager to locally freeze gradients.\n In some cases with can speed up computation because gradients aren't calculated for these listed modules.\n example:\n ```\n with FreezeParameters([module]):\n output_tensor = module(input_tensor)\n ```\n :param modules: iterable of modules. used to call .parameters() to freeze gradients.\n "
self.modules = modules
self.param_states = [p.requires_grad for p in get_parameters(self.modules)]
def __enter__(self):
for param in get_parameters(self.modules):
param.requires_grad = False
def __exit__(self, exc_type, exc_val, exc_tb):
for (i, param) in enumerate(get_parameters(self.modules)):
param.requires_grad = self.param_states[i]
|
def build_and_train(log_dir, game='pong', run_ID=0, cuda_idx=None, eval=False, save_model='last', load_model_path=None):
params = (torch.load(load_model_path) if load_model_path else {})
agent_state_dict = params.get('agent_state_dict')
optimizer_state_dict = params.get('optimizer_state_dict')
action_repeat = 2
env_kwargs = dict(name=game, action_repeat=action_repeat, size=(64, 64), grayscale=False, life_done=True, sticky_actions=True)
factory_method = make_wapper(AtariEnv, [OneHotAction, TimeLimit], [dict(), dict(duration=(1000 / action_repeat))])
sampler = SerialSampler(EnvCls=factory_method, TrajInfoCls=AtariTrajInfo, env_kwargs=env_kwargs, eval_env_kwargs=env_kwargs, batch_T=1, batch_B=1, max_decorrelation_steps=0, eval_n_envs=10, eval_max_steps=int(10000.0), eval_max_trajectories=5)
algo = Dreamer(horizon=10, kl_scale=0.1, use_pcont=True, initial_optim_state_dict=optimizer_state_dict)
agent = AtariDreamerAgent(train_noise=0.4, eval_noise=0, expl_type='epsilon_greedy', expl_min=0.1, expl_decay=(2000 / 0.3), initial_model_state_dict=agent_state_dict, model_kwargs=dict(use_pcont=True))
runner_cls = (MinibatchRlEval if eval else MinibatchRl)
runner = runner_cls(algo=algo, agent=agent, sampler=sampler, n_steps=5000000.0, log_interval_steps=1000.0, affinity=dict(cuda_idx=cuda_idx))
config = dict(game=game)
name = ('dreamer_' + game)
with logger_context(log_dir, run_ID, name, config, snapshot_mode=save_model, override_prefix=True, use_summary_writer=True):
runner.train()
|
def build_and_train(log_dir, game='cartpole_balance', run_ID=0, cuda_idx=None, eval=False, save_model='last', load_model_path=None):
params = (torch.load(load_model_path) if load_model_path else {})
agent_state_dict = params.get('agent_state_dict')
optimizer_state_dict = params.get('optimizer_state_dict')
action_repeat = 2
factory_method = make_wapper(DeepMindControl, [ActionRepeat, NormalizeActions, TimeLimit], [dict(amount=action_repeat), dict(), dict(duration=(1000 / action_repeat))])
sampler = SerialSampler(EnvCls=factory_method, TrajInfoCls=TrajInfo, env_kwargs=dict(name=game), eval_env_kwargs=dict(name=game), batch_T=1, batch_B=1, max_decorrelation_steps=0, eval_n_envs=10, eval_max_steps=int(10000.0), eval_max_trajectories=5)
algo = Dreamer(initial_optim_state_dict=optimizer_state_dict)
agent = DMCDreamerAgent(train_noise=0.3, eval_noise=0, expl_type='additive_gaussian', expl_min=None, expl_decay=None, initial_model_state_dict=agent_state_dict)
runner_cls = (MinibatchRlEval if eval else MinibatchRl)
runner = runner_cls(algo=algo, agent=agent, sampler=sampler, n_steps=5000000.0, log_interval_steps=1000.0, affinity=dict(cuda_idx=cuda_idx))
config = dict(game=game)
name = ('dreamer_' + game)
with logger_context(log_dir, run_ID, name, config, snapshot_mode=save_model, override_prefix=True, use_summary_writer=True):
runner.train()
|
@pytest.mark.parametrize('dist', ['tanh_normal', 'one_hot', 'relaxed_one_hot', 'not_implemented_dist'])
def test_action_decoder(dist):
batch_size = 4
action_size = 10
feature_size = 20
hidden_size = 40
layers = 5
try:
action_decoder = ActionDecoder(action_size, feature_size, hidden_size, layers, dist)
except NotImplementedError:
return
features = torch.randn(batch_size, feature_size)
action_dist = action_decoder(features)
if (dist == 'tanh_normal'):
action_mean = action_dist.mean()
action_mode = action_dist.mode()
action_ent = action_dist.entropy()
assert isinstance(action_mean, torch.Tensor)
assert (action_mean.shape == (batch_size, action_size))
assert isinstance(action_mode, torch.Tensor)
assert (action_mode.shape == (batch_size, action_size))
assert isinstance(action_ent, torch.Tensor)
assert (action_ent.shape == (batch_size,))
true_action = torch.randn(batch_size, action_size)
loss = torch.sum(((action_mean - true_action) ** 2))
loss += torch.sum(((action_mode - true_action) ** 2))
loss += (- torch.sum(action_ent))
loss.backward()
elif (dist == 'one_hot'):
action_mean = action_dist.mean
action_ent = action_dist.entropy()
assert isinstance(action_mean, torch.Tensor)
assert (action_mean.shape == (batch_size, action_size))
assert isinstance(action_ent, torch.Tensor)
assert (action_ent.shape == (batch_size,))
|
@pytest.mark.parametrize('dist', ['tanh_normal', 'one_hot', 'relaxed_one_hot'])
def test_agent(dist):
batch_size = 1
action_shape = (2,)
deterministic_size = 200
obs_shape = (3, 64, 64)
agent_model = AgentModel(action_shape, deterministic_size=deterministic_size, image_shape=obs_shape, action_dist=dist)
observation = torch.randn(batch_size, *obs_shape)
prev_action = None
prev_state = None
state = agent_model.get_state_representation(observation, prev_action, prev_state)
assert (state.deter.shape == (batch_size, deterministic_size))
(action, action_dist) = agent_model.policy(state)
assert (action.shape == (batch_size, *action_shape))
agent_model.eval()
try:
_ = agent_model(observation, prev_action, prev_state)
except NotImplementedError:
pass
next_state = agent_model.get_state_transition(action, state)
assert (next_state.deter.shape == (batch_size, deterministic_size))
|
@pytest.mark.parametrize('dist', ['normal', 'binary'])
def test_dense_model(dist):
shape = (1,)
units = 20
feature_size = 20
layers = 5
batch_size = 2
features = torch.randn((batch_size, feature_size))
try:
dense = DenseModel(feature_size, shape, layers, units, dist)
except NotImplementedError:
return
output = dense(features)
sample = output.sample()
assert isinstance(sample, torch.Tensor)
assert (sample.size() == (batch_size, *shape))
|
def test_dist():
batch_size = 4
dist_size = 3
samples = 10
mean = torch.randn(batch_size, dist_size)
std = torch.rand(batch_size, dist_size)
dist = torch.distributions.Normal(mean, std)
transform = TanhBijector()
sign = transform.sign
dist = torch.distributions.TransformedDistribution(dist, transform)
dist = torch.distributions.Independent(dist, 1)
dist = SampleDist(dist, samples)
name = dist.name
assert (dist.event_shape == (dist_size,))
assert (dist.batch_shape == (batch_size,))
|
def test_observation_encoder(shape=(3, 64, 64)):
encoder = ObservationEncoder()
batch_size = 2
(c, h, w) = shape
image_obs = torch.randn(batch_size, c, h, w)
with torch.no_grad():
embedding: torch.Tensor = encoder(image_obs)
assert (embedding.size(0) == batch_size)
assert (embedding.size(1) == 1024)
|
def test_observation_decoder(shape=(3, 64, 64)):
decoder = ObservationDecoder()
batch_size = 2
(c, h, w) = shape
embedding = torch.randn(batch_size, 1024)
with torch.no_grad():
obs_dist: torch.distributions.Normal = decoder(embedding)
obs_sample: torch.Tensor = obs_dist.sample()
assert (obs_sample.size(0) == batch_size)
assert (obs_sample.size(1) == c)
assert (obs_sample.size(2) == h)
assert (obs_sample.size(3) == w)
horizon = 4
embedding = torch.randn(batch_size, horizon, 1024)
with torch.no_grad():
obs_dist: torch.distributions.Normal = decoder(embedding)
obs_sample: torch.Tensor = obs_dist.sample()
assert (obs_sample.size(0) == batch_size)
assert (obs_sample.size(1) == horizon)
assert (obs_sample.size(2) == c)
assert (obs_sample.size(3) == h)
assert (obs_sample.size(4) == w)
horizon = 4
embedding = torch.randn(batch_size, horizon, 1024)
with torch.no_grad():
obs_dist: torch.distributions.Normal = decoder(embedding)
obs_sample: torch.Tensor = obs_dist.sample()
assert (obs_sample.size(0) == batch_size)
assert (obs_sample.size(1) == horizon)
assert (obs_sample.size(2) == c)
assert (obs_sample.size(3) == h)
assert (obs_sample.size(4) == w)
|
@pytest.mark.parametrize('shape', [(3, 64, 64), (4, 104, 64)])
def test_observation(shape):
batch_size = 2
(c, h, w) = shape
encoder = ObservationEncoder(shape=shape)
decoder = ObservationDecoder(embed_size=encoder.embed_size, shape=shape)
image_obs = torch.randn(batch_size, c, h, w)
with torch.no_grad():
obs_dist: torch.distributions.Normal = decoder(encoder(image_obs))
obs_sample: torch.Tensor = obs_dist.sample()
assert (obs_sample.size(0) == batch_size)
assert (obs_sample.size(1) == c)
assert (obs_sample.size(2) == h)
assert (obs_sample.size(3) == w)
embedding = torch.randn(batch_size, encoder.embed_size)
with torch.no_grad():
embedding: torch.Tensor = encoder(decoder(embedding).sample())
assert (embedding.size(0) == batch_size)
assert (embedding.size(1) == encoder.embed_size)
|
def test_observation_reconstruction(shape=(4, 104, 64)):
batch_size = 2
(c, h, w) = shape
depth = 32
stride = 2
activation = torch.nn.ReLU
conv1 = torch.nn.Conv2d(c, (1 * depth), 6, stride)
conv1_shape = conv_out_shape((h, w), 0, 6, 2)
conv1_pad = output_padding_shape((h, w), conv1_shape, 0, 6, 2)
conv2 = torch.nn.Conv2d((1 * depth), (2 * depth), 6, stride)
conv2_shape = conv_out_shape(conv1_shape, 0, 6, 2)
conv2_pad = output_padding_shape(conv1_shape, conv2_shape, 0, 6, 2)
conv3 = torch.nn.Conv2d((2 * depth), (4 * depth), 5, stride)
conv3_shape = conv_out_shape(conv2_shape, 0, 5, 2)
conv3_pad = output_padding_shape(conv2_shape, conv3_shape, 0, 5, 2)
conv4 = torch.nn.Conv2d((4 * depth), (32 * depth), 5, stride)
conv4_shape = conv_out_shape(conv3_shape, 0, 5, 2)
conv4_pad = output_padding_shape(conv3_shape, conv4_shape, 0, 5, 2)
decoder = torch.nn.Sequential(torch.nn.ConvTranspose2d((32 * depth), (4 * depth), 5, stride, output_padding=conv4_pad), activation(), torch.nn.ConvTranspose2d((4 * depth), (2 * depth), 5, stride, output_padding=conv3_pad), activation(), torch.nn.ConvTranspose2d((2 * depth), (1 * depth), 6, stride, output_padding=conv2_pad), activation(), torch.nn.ConvTranspose2d((1 * depth), shape[0], 6, stride, output_padding=conv1_pad))
image_obs = torch.randn(batch_size, c, h, w)
x1 = conv1(image_obs)
x2 = conv2(x1)
x3 = conv3(x2)
x4 = conv4(x3)
assert (x4.shape == (batch_size, (32 * depth), *conv4_shape))
reconstructed = decoder(x4)
assert (reconstructed.shape == image_obs.shape)
|
def test_rssm():
action_size = 10
obs_embed_size = 100
stochastic_size = 30
deterministic_size = 200
batch_size = 4
transition_model = RSSMTransition(action_size, stochastic_size, deterministic_size)
representation_model = RSSMRepresentation(transition_model, obs_embed_size, action_size, stochastic_size, deterministic_size)
obs_embed: torch.Tensor = torch.randn(batch_size, obs_embed_size)
prev_action: torch.Tensor = torch.randn(batch_size, action_size)
prev_state = representation_model.initial_state(batch_size)
(prior, posterior) = representation_model(obs_embed, prev_action, prev_state)
assert (prior.stoch.size(1) == stochastic_size)
assert (prior.deter.size(1) == deterministic_size)
assert (posterior.stoch.size(1) == stochastic_size)
assert (posterior.deter.size(1) == deterministic_size)
|
def test_rollouts():
action_size = 10
obs_embed_size = 100
stochastic_size = 30
deterministic_size = 200
batch_size = 4
time_steps = 10
transition_model = RSSMTransition(action_size, stochastic_size, deterministic_size)
representation_model = RSSMRepresentation(transition_model, obs_embed_size, action_size, stochastic_size, deterministic_size)
rollout_module = RSSMRollout(representation_model, transition_model)
obs_embed: torch.Tensor = torch.randn(time_steps, batch_size, obs_embed_size)
action: torch.Tensor = torch.randn(time_steps, batch_size, action_size)
prev_state: RSSMState = representation_model.initial_state(batch_size)
(prior, post) = rollout_module(time_steps, obs_embed, action, prev_state)
assert isinstance(prior, RSSMState)
assert isinstance(post, RSSMState)
assert (prior.mean.shape == (time_steps, batch_size, stochastic_size))
assert (post.mean.shape == (time_steps, batch_size, stochastic_size))
assert (prior.std.shape == (time_steps, batch_size, stochastic_size))
assert (post.std.shape == (time_steps, batch_size, stochastic_size))
assert (prior.stoch.shape == (time_steps, batch_size, stochastic_size))
assert (post.stoch.shape == (time_steps, batch_size, stochastic_size))
assert (prior.deter.shape == (time_steps, batch_size, deterministic_size))
assert (post.deter.shape == (time_steps, batch_size, deterministic_size))
prior = rollout_module.rollout_transition(time_steps, action, transition_model.initial_state(batch_size))
assert isinstance(prior, RSSMState)
assert (prior.mean.shape == (time_steps, batch_size, stochastic_size))
assert (prior.std.shape == (time_steps, batch_size, stochastic_size))
assert (prior.stoch.shape == (time_steps, batch_size, stochastic_size))
assert (prior.deter.shape == (time_steps, batch_size, deterministic_size))
def policy(state):
action = torch.randn(state.stoch.size(0), action_size)
mean = torch.randn(state.stoch.size(0), action_size)
std = torch.rand(state.stoch.size(0), action_size)
action_dist = SampleDist(torch.distributions.Normal(mean, std))
return (action, action_dist)
(prior, actions) = rollout_module.rollout_policy(time_steps, policy, post[(- 1)])
assert isinstance(prior, RSSMState)
assert (prior.mean.shape == (time_steps, batch_size, stochastic_size))
assert (prior.std.shape == (time_steps, batch_size, stochastic_size))
assert (prior.stoch.shape == (time_steps, batch_size, stochastic_size))
assert (prior.deter.shape == (time_steps, batch_size, deterministic_size))
assert isinstance(actions, torch.Tensor)
assert (actions.shape == (time_steps, batch_size, action_size))
|
def test_freeze_parameters():
linear_module_1 = nn.Linear(4, 3)
linear_module_2 = nn.Linear(3, 2)
input_tensor = torch.randn(4)
with FreezeParameters([linear_module_2]):
output_tensor = linear_module_2(linear_module_1(input_tensor))
assert (output_tensor.grad_fn is not None)
input_tensor = torch.randn(3)
with FreezeParameters([linear_module_2]):
output_tensor = linear_module_2(input_tensor)
assert (output_tensor.grad_fn is None)
input_tensor = torch.randn(4)
with FreezeParameters([linear_module_1, linear_module_2]):
output_tensor = linear_module_2(linear_module_1(input_tensor))
assert (output_tensor.grad_fn is None)
linear_module_2.weight.requires_grad = False
linear_module_2.bias.requires_grad = True
with FreezeParameters([linear_module_2]):
output_tensor = linear_module_2(linear_module_1(input_tensor))
assert (output_tensor.grad_fn is not None)
assert (not linear_module_2.weight.requires_grad)
assert linear_module_2.bias.requires_grad
with FreezeParameters([linear_module_1, linear_module_2]):
output_tensor = linear_module_2(linear_module_1(input_tensor))
assert (output_tensor.grad_fn is None)
assert (not linear_module_2.weight.requires_grad)
assert linear_module_2.bias.requires_grad
|
def test_get_parameters():
linear_module_1 = nn.Linear(4, 3)
linear_module_2 = nn.Linear(3, 2)
params = get_parameters([linear_module_1])
assert (len(params) == 2)
params = get_parameters([linear_module_1, linear_module_2])
assert (len(params) == 4)
|
def restrict_gpu_memory(per_process_gpu_memory_fraction: float=0.9):
import os
import tensorflow as tf
import keras
thread_count = os.environ.get('OMP_NUM_THREADS')
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=per_process_gpu_memory_fraction)
config = (tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True, intra_op_parallelism_threads=thread_count) if thread_count else tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True))
keras.backend.tensorflow_backend.set_session(tf.Session(config=config))
|
class DataDirectories():
def __init__(self, data_directory: Path=(home_directory() / 'speechless-data')):
self.data_directory = data_directory
self.corpus_base_directory = (data_directory / 'corpus')
self.spectrogram_cache_base_directory = (data_directory / 'spectrogram-cache')
self.tensorboard_log_base_directory = (data_directory / 'logs')
self.nets_base_directory = (data_directory / 'nets')
self.kenlm_base_directory = (data_directory / 'kenlm')
self.recording_directory = (data_directory / 'recordings')
self.test_results_directory = (data_directory / 'test-results')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.