Sanchit Gandhi commited on
Commit
5ac9d3e
·
1 Parent(s): 05330ab
run_librispeech.sh CHANGED
@@ -8,15 +8,14 @@ CUDA_VISIBLE_DEVICES=0 python run_speech_recognition_seq2seq.py \
8
  --output_dir="./" \
9
  --preprocessing_num_workers="1" \
10
  --length_column_name="input_length" \
11
- --overwrite_output_dir \
12
- --num_train_epochs="20" \
13
  --per_device_train_batch_size="8" \
14
  --per_device_eval_batch_size="8" \
15
  --gradient_accumulation_steps="4" \
16
  --generation_max_length="40" \
17
  --generation_num_beams="1" \
18
- --learning_rate="1e-5" \
19
- --warmup_steps="1500" \
20
  --evaluation_strategy="steps" \
21
  --text_column_name="text" \
22
  --save_steps="1500" \
 
8
  --output_dir="./" \
9
  --preprocessing_num_workers="1" \
10
  --length_column_name="input_length" \
11
+ --num_train_epochs="10" \
 
12
  --per_device_train_batch_size="8" \
13
  --per_device_eval_batch_size="8" \
14
  --gradient_accumulation_steps="4" \
15
  --generation_max_length="40" \
16
  --generation_num_beams="1" \
17
+ --learning_rate="5e-6" \
18
+ --warmup_steps="1" \
19
  --evaluation_strategy="steps" \
20
  --text_column_name="text" \
21
  --save_steps="1500" \
wandb/run-20220320_205254-1enb65m6/files/config.yaml CHANGED
@@ -12485,7 +12485,14 @@ _wandb:
12485
  - 1
12486
  - 5
12487
  - 11
 
 
 
 
 
12488
  3:
 
 
12489
  - 13
12490
  4: 3.9.5
12491
  5: 0.12.10
 
12485
  - 1
12486
  - 5
12487
  - 11
12488
+ 2:
12489
+ - 1
12490
+ - 5
12491
+ - 11
12492
+ - 12
12493
  3:
12494
+ - 1
12495
+ - 7
12496
  - 13
12497
  4: 3.9.5
12498
  5: 0.12.10
wandb/run-20220320_205254-1enb65m6/files/output.log CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ab7dd953f1ec711da0aa165a5dae6a13dfe4303b7fe23a35630709d5011ccbbe
3
- size 31575096
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0169d0daa9288c0eeb7bbd1d3c62348a7358d672a5f5b5def66932d7759bcec
3
+ size 31627506
wandb/run-20220320_205254-1enb65m6/files/wandb-summary.json CHANGED
The diff for this file is too large to render. See raw diff
 
wandb/run-20220320_205254-1enb65m6/logs/debug-internal.log CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cefbd139808a342db993460b8a43d01c6eb8cd82b3650ede266e425fe6c7eb12
3
- size 26448246
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:839cd8feb7fb2ceb2534b1bb74b29c971b037965b85bfba51173ba9363b75a44
3
+ size 26491330
wandb/run-20220320_205254-1enb65m6/logs/debug.log CHANGED
@@ -25,3 +25,133 @@ config: {}
25
  2022-03-20 20:52:56,162 INFO MainThread:15624 [wandb_init.py:init():651] run started, returning control to user process
26
  2022-03-20 20:52:56,164 INFO MainThread:15624 [wandb_run.py:_config_callback():966] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'torch.float32', 'use_bfloat16': False, 'pruned_heads': {}, 'tie_word_embeddings': False, 'is_encoder_decoder': True, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 50, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'chunk_size_feed_forward': 0, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'architectures': ['SpeechEncoderDecoderModel'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': 1, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': 0, 'task_specific_params': None, 'problem_type': None, '_name_or_path': './', 'transformers_version': None, 'decoder': {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'is_encoder_decoder': False, 'is_decoder': True, 'cross_attention_hidden_size': None, 'add_cross_attention': True, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'chunk_size_feed_forward': 0, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'architectures': ['RobertaForMaskedLM'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 0, 'pad_token_id': 1, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'roberta-large', 'transformers_version': '4.17.0.dev0', 'vocab_size': 50265, 'hidden_size': 1024, 'num_hidden_layers': 24, 'num_attention_heads': 16, 'hidden_act': 'gelu', 'intermediate_size': 4096, 'hidden_dropout_prob': 0.1, 'attention_probs_dropout_prob': 0.1, 'max_position_embeddings': 514, 'type_vocab_size': 1, 'initializer_range': 0.02, 'layer_norm_eps': 1e-05, 'position_embedding_type': 'absolute', 'use_cache': False, 'classifier_dropout': None, 'model_type': 'roberta'}, 'encoder': {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'chunk_size_feed_forward': 0, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'architectures': ['Wav2Vec2ForPreTraining'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': 0, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'facebook/wav2vec2-large-lv60', 'transformers_version': '4.17.0.dev0', 'feat_extract_dropout': 0.0, 'gradient_checkpointing': False, 'hidden_dropout_prob': 0.1, 'num_feat_extract_layers': 7, 'hidden_size': 1024, 'feat_extract_norm': 'layer', 'feat_extract_activation': 'gelu', 'conv_dim': [512, 512, 512, 512, 512, 512, 512], 'conv_stride': [5, 2, 2, 2, 2, 2, 2], 'conv_kernel': [10, 3, 3, 3, 3, 2, 2], 'conv_bias': True, 'num_conv_pos_embeddings': 128, 'num_conv_pos_embedding_groups': 16, 'num_hidden_layers': 24, 'intermediate_size': 4096, 'hidden_act': 'gelu', 'num_attention_heads': 16, 'hidden_dropout': 0.1, 'attention_dropout': 0.1, 'activation_dropout': 0.1, 'feat_proj_dropout': 0.0, 'final_dropout': 0.0, 'layerdrop': 0.0, 'layer_norm_eps': 1e-05, 'initializer_range': 0.02, 'vocab_size': 32, 'do_stable_layer_norm': True, 'use_weighted_layer_sum': False, 'apply_spec_augment': True, 'mask_time_prob': 0.1, 'mask_time_length': 10, 'mask_time_min_masks': 2, 'mask_feature_prob': 0.0, 'mask_feature_length': 10, 'mask_feature_min_masks': 0, 'num_codevectors_per_group': 320, 'num_codevector_groups': 2, 'contrastive_logits_temperature': 0.1, 'feat_quantizer_dropout': 0.0, 'num_negatives': 100, 'codevector_dim': 768, 'proj_codevector_dim': 768, 'diversity_loss_weight': 0.1, 'ctc_loss_reduction': 'sum', 'ctc_zero_infinity': False, 'add_adapter': True, 'adapter_kernel_size': 3, 'adapter_stride': 2, 'num_adapter_layers': 3, 'output_hidden_size': 1024, 'classifier_proj_size': 256, 'tdnn_dim': [512, 512, 512, 512, 1500], 'tdnn_kernel': [5, 3, 3, 1, 1], 'tdnn_dilation': [1, 2, 3, 1, 1], 'xvector_output_dim': 512, 'model_type': 'wav2vec2'}, 'model_type': 'speech-encoder-decoder', 'processor_class': 'Wav2Vec2Processor', 'use_cache': False, 'output_dir': './', 'overwrite_output_dir': True, 'do_train': True, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'steps', 'prediction_loss_only': False, 'per_device_train_batch_size': 8, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': 'None', 'per_gpu_eval_batch_size': 'None', 'gradient_accumulation_steps': 4, 'eval_accumulation_steps': 'None', 'learning_rate': 1e-05, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 20.0, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'warmup_ratio': 0.0, 'warmup_steps': 1500, 'log_level': -1, 'log_level_replica': -1, 'log_on_each_node': True, 'logging_dir': './runs/Mar20_20-52-19_sanchit--v100', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 1, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 1500, 'save_total_limit': 1, 'save_on_each_node': False, 'no_cuda': False, 'seed': 42, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'amp', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': 'None', 'local_rank': -1, 'xpu_backend': 'None', 'tpu_num_cores': 'None', 'tpu_metrics_debug': False, 'debug': '[]', 'dataloader_drop_last': False, 'eval_steps': 1500, 'dataloader_num_workers': 0, 'past_index': -1, 'run_name': './', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': 'None', 'load_best_model_at_end': False, 'metric_for_best_model': 'None', 'greater_is_better': 'None', 'ignore_data_skip': False, 'sharded_ddp': '[]', 'deepspeed': 'None', 'label_smoothing_factor': 0.0, 'optim': 'adamw_hf', 'adafactor': False, 'group_by_length': True, 'length_column_name': 'input_length', 'report_to': "['tensorboard', 'wandb']", 'ddp_find_unused_parameters': 'None', 'ddp_bucket_cap_mb': 'None', 'dataloader_pin_memory': True, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': 'None', 'hub_model_id': 'None', 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'gradient_checkpointing': True, 'fp16_backend': 'auto', 'push_to_hub_model_id': 'None', 'push_to_hub_organization': 'None', 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', '_n_gpu': 1, 'mp_parameters': '', 'sortish_sampler': False, 'predict_with_generate': True, 'generation_max_length': 40, 'generation_num_beams': 1, 'train_batch_size': 8, 'eval_batch_size': 8}
27
  2022-03-20 20:52:56,167 INFO MainThread:15624 [wandb_watch.py:watch():43] Watching
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  2022-03-20 20:52:56,162 INFO MainThread:15624 [wandb_init.py:init():651] run started, returning control to user process
26
  2022-03-20 20:52:56,164 INFO MainThread:15624 [wandb_run.py:_config_callback():966] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'torch.float32', 'use_bfloat16': False, 'pruned_heads': {}, 'tie_word_embeddings': False, 'is_encoder_decoder': True, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 50, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'chunk_size_feed_forward': 0, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'architectures': ['SpeechEncoderDecoderModel'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': None, 'pad_token_id': 1, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': 0, 'task_specific_params': None, 'problem_type': None, '_name_or_path': './', 'transformers_version': None, 'decoder': {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'is_encoder_decoder': False, 'is_decoder': True, 'cross_attention_hidden_size': None, 'add_cross_attention': True, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'chunk_size_feed_forward': 0, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'architectures': ['RobertaForMaskedLM'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 0, 'pad_token_id': 1, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'roberta-large', 'transformers_version': '4.17.0.dev0', 'vocab_size': 50265, 'hidden_size': 1024, 'num_hidden_layers': 24, 'num_attention_heads': 16, 'hidden_act': 'gelu', 'intermediate_size': 4096, 'hidden_dropout_prob': 0.1, 'attention_probs_dropout_prob': 0.1, 'max_position_embeddings': 514, 'type_vocab_size': 1, 'initializer_range': 0.02, 'layer_norm_eps': 1e-05, 'position_embedding_type': 'absolute', 'use_cache': False, 'classifier_dropout': None, 'model_type': 'roberta'}, 'encoder': {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': None, 'use_bfloat16': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'chunk_size_feed_forward': 0, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'architectures': ['Wav2Vec2ForPreTraining'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': 0, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'facebook/wav2vec2-large-lv60', 'transformers_version': '4.17.0.dev0', 'feat_extract_dropout': 0.0, 'gradient_checkpointing': False, 'hidden_dropout_prob': 0.1, 'num_feat_extract_layers': 7, 'hidden_size': 1024, 'feat_extract_norm': 'layer', 'feat_extract_activation': 'gelu', 'conv_dim': [512, 512, 512, 512, 512, 512, 512], 'conv_stride': [5, 2, 2, 2, 2, 2, 2], 'conv_kernel': [10, 3, 3, 3, 3, 2, 2], 'conv_bias': True, 'num_conv_pos_embeddings': 128, 'num_conv_pos_embedding_groups': 16, 'num_hidden_layers': 24, 'intermediate_size': 4096, 'hidden_act': 'gelu', 'num_attention_heads': 16, 'hidden_dropout': 0.1, 'attention_dropout': 0.1, 'activation_dropout': 0.1, 'feat_proj_dropout': 0.0, 'final_dropout': 0.0, 'layerdrop': 0.0, 'layer_norm_eps': 1e-05, 'initializer_range': 0.02, 'vocab_size': 32, 'do_stable_layer_norm': True, 'use_weighted_layer_sum': False, 'apply_spec_augment': True, 'mask_time_prob': 0.1, 'mask_time_length': 10, 'mask_time_min_masks': 2, 'mask_feature_prob': 0.0, 'mask_feature_length': 10, 'mask_feature_min_masks': 0, 'num_codevectors_per_group': 320, 'num_codevector_groups': 2, 'contrastive_logits_temperature': 0.1, 'feat_quantizer_dropout': 0.0, 'num_negatives': 100, 'codevector_dim': 768, 'proj_codevector_dim': 768, 'diversity_loss_weight': 0.1, 'ctc_loss_reduction': 'sum', 'ctc_zero_infinity': False, 'add_adapter': True, 'adapter_kernel_size': 3, 'adapter_stride': 2, 'num_adapter_layers': 3, 'output_hidden_size': 1024, 'classifier_proj_size': 256, 'tdnn_dim': [512, 512, 512, 512, 1500], 'tdnn_kernel': [5, 3, 3, 1, 1], 'tdnn_dilation': [1, 2, 3, 1, 1], 'xvector_output_dim': 512, 'model_type': 'wav2vec2'}, 'model_type': 'speech-encoder-decoder', 'processor_class': 'Wav2Vec2Processor', 'use_cache': False, 'output_dir': './', 'overwrite_output_dir': True, 'do_train': True, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'steps', 'prediction_loss_only': False, 'per_device_train_batch_size': 8, 'per_device_eval_batch_size': 8, 'per_gpu_train_batch_size': 'None', 'per_gpu_eval_batch_size': 'None', 'gradient_accumulation_steps': 4, 'eval_accumulation_steps': 'None', 'learning_rate': 1e-05, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 20.0, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'warmup_ratio': 0.0, 'warmup_steps': 1500, 'log_level': -1, 'log_level_replica': -1, 'log_on_each_node': True, 'logging_dir': './runs/Mar20_20-52-19_sanchit--v100', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 1, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 1500, 'save_total_limit': 1, 'save_on_each_node': False, 'no_cuda': False, 'seed': 42, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'amp', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': 'None', 'local_rank': -1, 'xpu_backend': 'None', 'tpu_num_cores': 'None', 'tpu_metrics_debug': False, 'debug': '[]', 'dataloader_drop_last': False, 'eval_steps': 1500, 'dataloader_num_workers': 0, 'past_index': -1, 'run_name': './', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': 'None', 'load_best_model_at_end': False, 'metric_for_best_model': 'None', 'greater_is_better': 'None', 'ignore_data_skip': False, 'sharded_ddp': '[]', 'deepspeed': 'None', 'label_smoothing_factor': 0.0, 'optim': 'adamw_hf', 'adafactor': False, 'group_by_length': True, 'length_column_name': 'input_length', 'report_to': "['tensorboard', 'wandb']", 'ddp_find_unused_parameters': 'None', 'ddp_bucket_cap_mb': 'None', 'dataloader_pin_memory': True, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': 'None', 'hub_model_id': 'None', 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'gradient_checkpointing': True, 'fp16_backend': 'auto', 'push_to_hub_model_id': 'None', 'push_to_hub_organization': 'None', 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', '_n_gpu': 1, 'mp_parameters': '', 'sortish_sampler': False, 'predict_with_generate': True, 'generation_max_length': 40, 'generation_num_beams': 1, 'train_batch_size': 8, 'eval_batch_size': 8}
27
  2022-03-20 20:52:56,167 INFO MainThread:15624 [wandb_watch.py:watch():43] Watching
28
+ 2022-03-22 05:39:56,507 INFO MainThread:15624 [wandb_run.py:_atexit_cleanup():1797] got exitcode: 1
29
+ 2022-03-22 05:39:56,512 INFO MainThread:15624 [wandb_run.py:_restore():1769] restore
30
+ 2022-03-22 05:39:59,228 INFO MainThread:15624 [wandb_run.py:_wait_for_finish():1929] got exit ret: file_counts {
31
+ wandb_count: 1
32
+ }
33
+ pusher_stats {
34
+ uploaded_bytes: 2043
35
+ total_bytes: 2043
36
+ }
37
+
38
+ 2022-03-22 05:39:59,345 INFO MainThread:15624 [wandb_run.py:_wait_for_finish():1929] got exit ret: file_counts {
39
+ wandb_count: 1
40
+ }
41
+ pusher_stats {
42
+ uploaded_bytes: 2043
43
+ total_bytes: 2043
44
+ }
45
+
46
+ 2022-03-22 05:39:59,527 INFO MainThread:15624 [wandb_run.py:_wait_for_finish():1929] got exit ret: file_counts {
47
+ wandb_count: 1
48
+ }
49
+ pusher_stats {
50
+ uploaded_bytes: 2043
51
+ total_bytes: 2043
52
+ }
53
+
54
+ 2022-03-22 05:40:00,612 INFO MainThread:15624 [wandb_run.py:_wait_for_finish():1929] got exit ret: file_counts {
55
+ wandb_count: 1
56
+ }
57
+ pusher_stats {
58
+ uploaded_bytes: 2043
59
+ total_bytes: 2043
60
+ }
61
+
62
+ 2022-03-22 05:40:01,560 INFO MainThread:15624 [wandb_run.py:_wait_for_finish():1929] got exit ret: file_counts {
63
+ wandb_count: 5
64
+ }
65
+ pusher_stats {
66
+ uploaded_bytes: 2043
67
+ total_bytes: 33718196
68
+ }
69
+
70
+ 2022-03-22 05:40:01,665 INFO MainThread:15624 [wandb_run.py:_wait_for_finish():1929] got exit ret: file_counts {
71
+ wandb_count: 5
72
+ }
73
+ pusher_stats {
74
+ uploaded_bytes: 2043
75
+ total_bytes: 33718196
76
+ }
77
+
78
+ 2022-03-22 05:40:01,767 INFO MainThread:15624 [wandb_run.py:_wait_for_finish():1929] got exit ret: file_counts {
79
+ wandb_count: 5
80
+ }
81
+ pusher_stats {
82
+ uploaded_bytes: 16893634
83
+ total_bytes: 33718196
84
+ }
85
+
86
+ 2022-03-22 05:40:01,869 INFO MainThread:15624 [wandb_run.py:_wait_for_finish():1929] got exit ret: file_counts {
87
+ wandb_count: 5
88
+ }
89
+ pusher_stats {
90
+ uploaded_bytes: 28772034
91
+ total_bytes: 33718196
92
+ }
93
+
94
+ 2022-03-22 05:40:01,971 INFO MainThread:15624 [wandb_run.py:_wait_for_finish():1929] got exit ret: file_counts {
95
+ wandb_count: 5
96
+ }
97
+ pusher_stats {
98
+ uploaded_bytes: 33718196
99
+ total_bytes: 33718196
100
+ }
101
+
102
+ 2022-03-22 05:40:02,073 INFO MainThread:15624 [wandb_run.py:_wait_for_finish():1929] got exit ret: file_counts {
103
+ wandb_count: 5
104
+ }
105
+ pusher_stats {
106
+ uploaded_bytes: 33718196
107
+ total_bytes: 33718196
108
+ }
109
+
110
+ 2022-03-22 05:40:02,175 INFO MainThread:15624 [wandb_run.py:_wait_for_finish():1929] got exit ret: file_counts {
111
+ wandb_count: 5
112
+ }
113
+ pusher_stats {
114
+ uploaded_bytes: 33718196
115
+ total_bytes: 33718196
116
+ }
117
+
118
+ 2022-03-22 05:40:02,277 INFO MainThread:15624 [wandb_run.py:_wait_for_finish():1929] got exit ret: file_counts {
119
+ wandb_count: 5
120
+ }
121
+ pusher_stats {
122
+ uploaded_bytes: 33718196
123
+ total_bytes: 33718196
124
+ }
125
+
126
+ 2022-03-22 05:40:02,379 INFO MainThread:15624 [wandb_run.py:_wait_for_finish():1929] got exit ret: file_counts {
127
+ wandb_count: 5
128
+ }
129
+ pusher_stats {
130
+ uploaded_bytes: 33718196
131
+ total_bytes: 33718196
132
+ }
133
+
134
+ 2022-03-22 05:40:03,057 INFO MainThread:15624 [wandb_run.py:_wait_for_finish():1929] got exit ret: file_counts {
135
+ wandb_count: 5
136
+ }
137
+ pusher_stats {
138
+ uploaded_bytes: 33718196
139
+ total_bytes: 33718196
140
+ }
141
+
142
+ 2022-03-22 05:40:03,519 INFO MainThread:15624 [wandb_run.py:_wait_for_finish():1929] got exit ret: done: true
143
+ exit_result {
144
+ }
145
+ file_counts {
146
+ wandb_count: 5
147
+ }
148
+ pusher_stats {
149
+ uploaded_bytes: 33718196
150
+ total_bytes: 33718196
151
+ }
152
+ local_info {
153
+ }
154
+
155
+ 2022-03-22 05:40:04,692 INFO MainThread:15624 [wandb_run.py:_append_history():2144] rendering history
156
+ 2022-03-22 05:40:04,693 INFO MainThread:15624 [wandb_run.py:_append_summary():2102] rendering summary
157
+ 2022-03-22 05:40:04,695 INFO MainThread:15624 [wandb_run.py:_append_files():2194] logging synced files
wandb/run-20220320_205254-1enb65m6/run-1enb65m6.wandb CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f1d7d4a4e6c9b4221e5f96b4e4f6fe0fac59678a71ea4c94a99e1ffd5be856f2
3
- size 1309654315
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6cad91af09babd9051be77dd9ffdbb1ec67164a18af9405959ff886c1d6c5d19
3
+ size 1311444200