Josh Cole
commited on
Commit
·
fea66a8
1
Parent(s):
a363672
update model
Browse files- Generate.ipynb +33 -37
- pytorch_model.bin +1 -1
- training_args.bin +2 -2
- vocab.json +1 -1
Generate.ipynb
CHANGED
|
@@ -9,7 +9,7 @@
|
|
| 9 |
{
|
| 10 |
"data": {
|
| 11 |
"application/vnd.jupyter.widget-view+json": {
|
| 12 |
-
"model_id": "
|
| 13 |
"version_major": 2,
|
| 14 |
"version_minor": 0
|
| 15 |
},
|
|
@@ -28,7 +28,7 @@
|
|
| 28 |
},
|
| 29 |
{
|
| 30 |
"cell_type": "code",
|
| 31 |
-
"execution_count":
|
| 32 |
"id": "38bdf299-f60d-43ea-9230-df1be861e406",
|
| 33 |
"metadata": {},
|
| 34 |
"outputs": [
|
|
@@ -43,7 +43,7 @@
|
|
| 43 |
{
|
| 44 |
"data": {
|
| 45 |
"application/vnd.jupyter.widget-view+json": {
|
| 46 |
-
"model_id": "
|
| 47 |
"version_major": 2,
|
| 48 |
"version_minor": 0
|
| 49 |
},
|
|
@@ -62,14 +62,14 @@
|
|
| 62 |
},
|
| 63 |
{
|
| 64 |
"cell_type": "code",
|
| 65 |
-
"execution_count":
|
| 66 |
"id": "75b32151-eb53-4476-8c1f-7e6da72e173e",
|
| 67 |
"metadata": {},
|
| 68 |
"outputs": [
|
| 69 |
{
|
| 70 |
"data": {
|
| 71 |
"application/vnd.jupyter.widget-view+json": {
|
| 72 |
-
"model_id": "
|
| 73 |
"version_major": 2,
|
| 74 |
"version_minor": 0
|
| 75 |
},
|
|
@@ -88,7 +88,7 @@
|
|
| 88 |
" return {\"vocab\": [vocab], \"all_text\": [all_text]}\n",
|
| 89 |
"\n",
|
| 90 |
"vocabs = ds.map(extract_all_chars, batched=True, batch_size=-1, keep_in_memory=True, remove_columns=ds.column_names[\"train\"])\n",
|
| 91 |
-
"vocab_list = list(set(vocabs[\"train\"][\"vocab\"][0])
|
| 92 |
"vocab_dict = {v: k for k, v in enumerate(vocab_list)}\n",
|
| 93 |
"vocab_dict[\"|\"] = vocab_dict[\" \"]\n",
|
| 94 |
"del vocab_dict[\" \"]\n",
|
|
@@ -102,7 +102,7 @@
|
|
| 102 |
},
|
| 103 |
{
|
| 104 |
"cell_type": "code",
|
| 105 |
-
"execution_count":
|
| 106 |
"id": "d214872e-d4b1-4aa7-be07-8a1591961968",
|
| 107 |
"metadata": {},
|
| 108 |
"outputs": [],
|
|
@@ -111,14 +111,14 @@
|
|
| 111 |
"from transformers import Wav2Vec2FeatureExtractor\n",
|
| 112 |
"from transformers import Wav2Vec2Processor\n",
|
| 113 |
"\n",
|
| 114 |
-
"tokenizer = Wav2Vec2CTCTokenizer(\"./vocab.json\", unk_token=\"[UNK]\", pad_token=\"[PAD]\", word_delimiter_token=\"
|
| 115 |
"feature_extractor = Wav2Vec2FeatureExtractor(feature_size=1, sampling_rate=16000, padding_value=0.0, do_normalize=True, return_attention_mask=False)\n",
|
| 116 |
"processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)"
|
| 117 |
]
|
| 118 |
},
|
| 119 |
{
|
| 120 |
"cell_type": "code",
|
| 121 |
-
"execution_count":
|
| 122 |
"id": "e906c45f-6971-43c3-ad0a-b13363100bdf",
|
| 123 |
"metadata": {},
|
| 124 |
"outputs": [],
|
|
@@ -137,7 +137,7 @@
|
|
| 137 |
},
|
| 138 |
{
|
| 139 |
"cell_type": "code",
|
| 140 |
-
"execution_count":
|
| 141 |
"id": "8c083db6-eab5-4f25-9a08-eab50d2d30ac",
|
| 142 |
"metadata": {},
|
| 143 |
"outputs": [
|
|
@@ -151,7 +151,7 @@
|
|
| 151 |
{
|
| 152 |
"data": {
|
| 153 |
"application/vnd.jupyter.widget-view+json": {
|
| 154 |
-
"model_id": "
|
| 155 |
"version_major": 2,
|
| 156 |
"version_minor": 0
|
| 157 |
},
|
|
@@ -169,7 +169,7 @@
|
|
| 169 |
},
|
| 170 |
{
|
| 171 |
"cell_type": "code",
|
| 172 |
-
"execution_count":
|
| 173 |
"id": "50c9a6ad-9e79-4a1c-a5ce-6e1f73a96e4d",
|
| 174 |
"metadata": {},
|
| 175 |
"outputs": [],
|
|
@@ -242,7 +242,7 @@
|
|
| 242 |
},
|
| 243 |
{
|
| 244 |
"cell_type": "code",
|
| 245 |
-
"execution_count":
|
| 246 |
"id": "1025ffdf-cb83-4895-89ab-a98bc3fab642",
|
| 247 |
"metadata": {},
|
| 248 |
"outputs": [],
|
|
@@ -253,7 +253,7 @@
|
|
| 253 |
},
|
| 254 |
{
|
| 255 |
"cell_type": "code",
|
| 256 |
-
"execution_count":
|
| 257 |
"id": "71351cf4-6d00-40ae-89cc-cedb87073625",
|
| 258 |
"metadata": {},
|
| 259 |
"outputs": [
|
|
@@ -353,10 +353,10 @@
|
|
| 353 |
"}\n",
|
| 354 |
"\n",
|
| 355 |
"loading weights file https://huggingface.co/facebook/wav2vec2-base/resolve/main/pytorch_model.bin from cache at /home/sharpcoder/.cache/huggingface/transformers/ef45231897ce572a660ebc5a63d3702f1a6041c4c5fb78cbec330708531939b3.fcae05302a685f7904c551c8ea571e8bc2a2c4a1777ea81ad66e47f7883a650a\n",
|
| 356 |
-
"Some weights of the model checkpoint at facebook/wav2vec2-base were not used when initializing Wav2Vec2ForCTC: ['
|
| 357 |
"- This IS expected if you are initializing Wav2Vec2ForCTC from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
|
| 358 |
"- This IS NOT expected if you are initializing Wav2Vec2ForCTC from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n",
|
| 359 |
-
"Some weights of Wav2Vec2ForCTC were not initialized from the model checkpoint at facebook/wav2vec2-base and are newly initialized: ['lm_head.
|
| 360 |
"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
|
| 361 |
]
|
| 362 |
}
|
|
@@ -391,11 +391,11 @@
|
|
| 391 |
"from transformers import Trainer\n",
|
| 392 |
"\n",
|
| 393 |
"training_args = TrainingArguments(\n",
|
| 394 |
-
" output_dir=\"
|
| 395 |
" group_by_length=True,\n",
|
| 396 |
" per_device_train_batch_size=8,\n",
|
| 397 |
" evaluation_strategy=\"steps\",\n",
|
| 398 |
-
" num_train_epochs=
|
| 399 |
" fp16=False,\n",
|
| 400 |
" gradient_checkpointing=True,\n",
|
| 401 |
" save_steps=500,\n",
|
|
@@ -431,17 +431,11 @@
|
|
| 431 |
"The following columns in the training set don't have a corresponding argument in `Wav2Vec2ForCTC.forward` and have been ignored: input_length.\n",
|
| 432 |
"***** Running training *****\n",
|
| 433 |
" Num examples = 1\n",
|
| 434 |
-
" Num Epochs =
|
| 435 |
" Instantaneous batch size per device = 8\n",
|
| 436 |
" Total train batch size (w. parallel, distributed & accumulation) = 8\n",
|
| 437 |
" Gradient Accumulation steps = 1\n",
|
| 438 |
-
" Total optimization steps =
|
| 439 |
-
"/home/sharpcoder/.local/lib/python3.10/site-packages/transformers/feature_extraction_utils.py:158: UserWarning: Creating a tensor from a list of numpy.ndarrays is extremely slow. Please consider converting the list to a single numpy.ndarray with numpy.array() before converting to a tensor. (Triggered internally at ../torch/csrc/utils/tensor_new.cpp:210.)\n",
|
| 440 |
-
" tensor = as_tensor(value)\n",
|
| 441 |
-
"/home/sharpcoder/.local/lib/python3.10/site-packages/transformers/models/wav2vec2/modeling_wav2vec2.py:882: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor').\n",
|
| 442 |
-
" return (input_length - kernel_size) // stride + 1\n",
|
| 443 |
-
"/home/sharpcoder/.local/lib/python3.10/site-packages/torch/autocast_mode.py:162: UserWarning: User provided device_type of 'cuda', but CUDA is not available. Disabling\n",
|
| 444 |
-
" warnings.warn('User provided device_type of \\'cuda\\', but CUDA is not available. Disabling')\n"
|
| 445 |
]
|
| 446 |
},
|
| 447 |
{
|
|
@@ -450,8 +444,8 @@
|
|
| 450 |
"\n",
|
| 451 |
" <div>\n",
|
| 452 |
" \n",
|
| 453 |
-
" <progress value='
|
| 454 |
-
" [
|
| 455 |
" </div>\n",
|
| 456 |
" <table border=\"1\" class=\"dataframe\">\n",
|
| 457 |
" <thead>\n",
|
|
@@ -486,7 +480,7 @@
|
|
| 486 |
{
|
| 487 |
"data": {
|
| 488 |
"text/plain": [
|
| 489 |
-
"TrainOutput(global_step=
|
| 490 |
]
|
| 491 |
},
|
| 492 |
"execution_count": 46,
|
|
@@ -501,17 +495,17 @@
|
|
| 501 |
{
|
| 502 |
"cell_type": "code",
|
| 503 |
"execution_count": 47,
|
| 504 |
-
"id": "
|
| 505 |
"metadata": {},
|
| 506 |
"outputs": [
|
| 507 |
{
|
| 508 |
"name": "stderr",
|
| 509 |
"output_type": "stream",
|
| 510 |
"text": [
|
| 511 |
-
"Saving model checkpoint to
|
| 512 |
-
"Configuration saved in
|
| 513 |
-
"Model weights saved in
|
| 514 |
-
"Configuration saved in
|
| 515 |
]
|
| 516 |
},
|
| 517 |
{
|
|
@@ -521,18 +515,20 @@
|
|
| 521 |
"traceback": [
|
| 522 |
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
| 523 |
"\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)",
|
| 524 |
-
"Input \u001b[0;32mIn [47]\u001b[0m, in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mtrainer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpush_to_hub\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
|
| 525 |
"File \u001b[0;32m~/.local/lib/python3.10/site-packages/transformers/trainer.py:2677\u001b[0m, in \u001b[0;36mTrainer.push_to_hub\u001b[0;34m(self, commit_message, blocking, **kwargs)\u001b[0m\n\u001b[1;32m 2674\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mis_world_process_zero():\n\u001b[1;32m 2675\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m\n\u001b[0;32m-> 2677\u001b[0m git_head_commit_url \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrepo\u001b[49m\u001b[38;5;241m.\u001b[39mpush_to_hub(commit_message\u001b[38;5;241m=\u001b[39mcommit_message, blocking\u001b[38;5;241m=\u001b[39mblocking)\n\u001b[1;32m 2678\u001b[0m \u001b[38;5;66;03m# push separately the model card to be independant from the rest of the model\u001b[39;00m\n\u001b[1;32m 2679\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39margs\u001b[38;5;241m.\u001b[39mshould_save:\n",
|
| 526 |
"\u001b[0;31mAttributeError\u001b[0m: 'Trainer' object has no attribute 'repo'"
|
| 527 |
]
|
| 528 |
}
|
| 529 |
],
|
| 530 |
-
"source": [
|
|
|
|
|
|
|
| 531 |
},
|
| 532 |
{
|
| 533 |
"cell_type": "code",
|
| 534 |
"execution_count": null,
|
| 535 |
-
"id": "
|
| 536 |
"metadata": {},
|
| 537 |
"outputs": [],
|
| 538 |
"source": []
|
|
|
|
| 9 |
{
|
| 10 |
"data": {
|
| 11 |
"application/vnd.jupyter.widget-view+json": {
|
| 12 |
+
"model_id": "7afc85b57ea24d31a2fdcc2b1f5c9ace",
|
| 13 |
"version_major": 2,
|
| 14 |
"version_minor": 0
|
| 15 |
},
|
|
|
|
| 28 |
},
|
| 29 |
{
|
| 30 |
"cell_type": "code",
|
| 31 |
+
"execution_count": 2,
|
| 32 |
"id": "38bdf299-f60d-43ea-9230-df1be861e406",
|
| 33 |
"metadata": {},
|
| 34 |
"outputs": [
|
|
|
|
| 43 |
{
|
| 44 |
"data": {
|
| 45 |
"application/vnd.jupyter.widget-view+json": {
|
| 46 |
+
"model_id": "18cae671f8fd4f9baac804c91fee03bf",
|
| 47 |
"version_major": 2,
|
| 48 |
"version_minor": 0
|
| 49 |
},
|
|
|
|
| 62 |
},
|
| 63 |
{
|
| 64 |
"cell_type": "code",
|
| 65 |
+
"execution_count": 22,
|
| 66 |
"id": "75b32151-eb53-4476-8c1f-7e6da72e173e",
|
| 67 |
"metadata": {},
|
| 68 |
"outputs": [
|
| 69 |
{
|
| 70 |
"data": {
|
| 71 |
"application/vnd.jupyter.widget-view+json": {
|
| 72 |
+
"model_id": "0611b2fa6cf740d6925d03cf3ba525a2",
|
| 73 |
"version_major": 2,
|
| 74 |
"version_minor": 0
|
| 75 |
},
|
|
|
|
| 88 |
" return {\"vocab\": [vocab], \"all_text\": [all_text]}\n",
|
| 89 |
"\n",
|
| 90 |
"vocabs = ds.map(extract_all_chars, batched=True, batch_size=-1, keep_in_memory=True, remove_columns=ds.column_names[\"train\"])\n",
|
| 91 |
+
"vocab_list = list(set(vocabs[\"train\"][\"vocab\"][0]))\n",
|
| 92 |
"vocab_dict = {v: k for k, v in enumerate(vocab_list)}\n",
|
| 93 |
"vocab_dict[\"|\"] = vocab_dict[\" \"]\n",
|
| 94 |
"del vocab_dict[\" \"]\n",
|
|
|
|
| 102 |
},
|
| 103 |
{
|
| 104 |
"cell_type": "code",
|
| 105 |
+
"execution_count": 23,
|
| 106 |
"id": "d214872e-d4b1-4aa7-be07-8a1591961968",
|
| 107 |
"metadata": {},
|
| 108 |
"outputs": [],
|
|
|
|
| 111 |
"from transformers import Wav2Vec2FeatureExtractor\n",
|
| 112 |
"from transformers import Wav2Vec2Processor\n",
|
| 113 |
"\n",
|
| 114 |
+
"tokenizer = Wav2Vec2CTCTokenizer(\"./vocab.json\", unk_token=\"[UNK]\", pad_token=\"[PAD]\", word_delimiter_token=\" \")\n",
|
| 115 |
"feature_extractor = Wav2Vec2FeatureExtractor(feature_size=1, sampling_rate=16000, padding_value=0.0, do_normalize=True, return_attention_mask=False)\n",
|
| 116 |
"processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)"
|
| 117 |
]
|
| 118 |
},
|
| 119 |
{
|
| 120 |
"cell_type": "code",
|
| 121 |
+
"execution_count": 24,
|
| 122 |
"id": "e906c45f-6971-43c3-ad0a-b13363100bdf",
|
| 123 |
"metadata": {},
|
| 124 |
"outputs": [],
|
|
|
|
| 137 |
},
|
| 138 |
{
|
| 139 |
"cell_type": "code",
|
| 140 |
+
"execution_count": 25,
|
| 141 |
"id": "8c083db6-eab5-4f25-9a08-eab50d2d30ac",
|
| 142 |
"metadata": {},
|
| 143 |
"outputs": [
|
|
|
|
| 151 |
{
|
| 152 |
"data": {
|
| 153 |
"application/vnd.jupyter.widget-view+json": {
|
| 154 |
+
"model_id": "ae21f7b6a50241e4ab4dd2b5c7c5689c",
|
| 155 |
"version_major": 2,
|
| 156 |
"version_minor": 0
|
| 157 |
},
|
|
|
|
| 169 |
},
|
| 170 |
{
|
| 171 |
"cell_type": "code",
|
| 172 |
+
"execution_count": 26,
|
| 173 |
"id": "50c9a6ad-9e79-4a1c-a5ce-6e1f73a96e4d",
|
| 174 |
"metadata": {},
|
| 175 |
"outputs": [],
|
|
|
|
| 242 |
},
|
| 243 |
{
|
| 244 |
"cell_type": "code",
|
| 245 |
+
"execution_count": 27,
|
| 246 |
"id": "1025ffdf-cb83-4895-89ab-a98bc3fab642",
|
| 247 |
"metadata": {},
|
| 248 |
"outputs": [],
|
|
|
|
| 253 |
},
|
| 254 |
{
|
| 255 |
"cell_type": "code",
|
| 256 |
+
"execution_count": 35,
|
| 257 |
"id": "71351cf4-6d00-40ae-89cc-cedb87073625",
|
| 258 |
"metadata": {},
|
| 259 |
"outputs": [
|
|
|
|
| 353 |
"}\n",
|
| 354 |
"\n",
|
| 355 |
"loading weights file https://huggingface.co/facebook/wav2vec2-base/resolve/main/pytorch_model.bin from cache at /home/sharpcoder/.cache/huggingface/transformers/ef45231897ce572a660ebc5a63d3702f1a6041c4c5fb78cbec330708531939b3.fcae05302a685f7904c551c8ea571e8bc2a2c4a1777ea81ad66e47f7883a650a\n",
|
| 356 |
+
"Some weights of the model checkpoint at facebook/wav2vec2-base were not used when initializing Wav2Vec2ForCTC: ['project_hid.bias', 'quantizer.weight_proj.bias', 'project_q.weight', 'project_hid.weight', 'quantizer.weight_proj.weight', 'quantizer.codevectors', 'project_q.bias']\n",
|
| 357 |
"- This IS expected if you are initializing Wav2Vec2ForCTC from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
|
| 358 |
"- This IS NOT expected if you are initializing Wav2Vec2ForCTC from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n",
|
| 359 |
+
"Some weights of Wav2Vec2ForCTC were not initialized from the model checkpoint at facebook/wav2vec2-base and are newly initialized: ['lm_head.weight', 'lm_head.bias']\n",
|
| 360 |
"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
|
| 361 |
]
|
| 362 |
}
|
|
|
|
| 391 |
"from transformers import Trainer\n",
|
| 392 |
"\n",
|
| 393 |
"training_args = TrainingArguments(\n",
|
| 394 |
+
" output_dir=\"./\",\n",
|
| 395 |
" group_by_length=True,\n",
|
| 396 |
" per_device_train_batch_size=8,\n",
|
| 397 |
" evaluation_strategy=\"steps\",\n",
|
| 398 |
+
" num_train_epochs=3,\n",
|
| 399 |
" fp16=False,\n",
|
| 400 |
" gradient_checkpointing=True,\n",
|
| 401 |
" save_steps=500,\n",
|
|
|
|
| 431 |
"The following columns in the training set don't have a corresponding argument in `Wav2Vec2ForCTC.forward` and have been ignored: input_length.\n",
|
| 432 |
"***** Running training *****\n",
|
| 433 |
" Num examples = 1\n",
|
| 434 |
+
" Num Epochs = 3\n",
|
| 435 |
" Instantaneous batch size per device = 8\n",
|
| 436 |
" Total train batch size (w. parallel, distributed & accumulation) = 8\n",
|
| 437 |
" Gradient Accumulation steps = 1\n",
|
| 438 |
+
" Total optimization steps = 3\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 439 |
]
|
| 440 |
},
|
| 441 |
{
|
|
|
|
| 444 |
"\n",
|
| 445 |
" <div>\n",
|
| 446 |
" \n",
|
| 447 |
+
" <progress value='3' max='3' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
|
| 448 |
+
" [3/3 00:02, Epoch 3/3]\n",
|
| 449 |
" </div>\n",
|
| 450 |
" <table border=\"1\" class=\"dataframe\">\n",
|
| 451 |
" <thead>\n",
|
|
|
|
| 480 |
{
|
| 481 |
"data": {
|
| 482 |
"text/plain": [
|
| 483 |
+
"TrainOutput(global_step=3, training_loss=10.471563975016275, metrics={'train_runtime': 3.8966, 'train_samples_per_second': 0.77, 'train_steps_per_second': 0.77, 'total_flos': 94374986431680.0, 'train_loss': 10.471563975016275, 'epoch': 3.0})"
|
| 484 |
]
|
| 485 |
},
|
| 486 |
"execution_count": 46,
|
|
|
|
| 495 |
{
|
| 496 |
"cell_type": "code",
|
| 497 |
"execution_count": 47,
|
| 498 |
+
"id": "333d43cf-add3-4d78-bbca-b44c638519fe",
|
| 499 |
"metadata": {},
|
| 500 |
"outputs": [
|
| 501 |
{
|
| 502 |
"name": "stderr",
|
| 503 |
"output_type": "stream",
|
| 504 |
"text": [
|
| 505 |
+
"Saving model checkpoint to ./\n",
|
| 506 |
+
"Configuration saved in ./config.json\n",
|
| 507 |
+
"Model weights saved in ./pytorch_model.bin\n",
|
| 508 |
+
"Configuration saved in ./preprocessor_config.json\n"
|
| 509 |
]
|
| 510 |
},
|
| 511 |
{
|
|
|
|
| 515 |
"traceback": [
|
| 516 |
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
| 517 |
"\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)",
|
| 518 |
+
"Input \u001b[0;32mIn [47]\u001b[0m, in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mtrainer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpush_to_hub\u001b[49m\u001b[43m(\u001b[49m\u001b[43mhub_model_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43msharpcoder/wav2vec2_bjorn\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n",
|
| 519 |
"File \u001b[0;32m~/.local/lib/python3.10/site-packages/transformers/trainer.py:2677\u001b[0m, in \u001b[0;36mTrainer.push_to_hub\u001b[0;34m(self, commit_message, blocking, **kwargs)\u001b[0m\n\u001b[1;32m 2674\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mis_world_process_zero():\n\u001b[1;32m 2675\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m\n\u001b[0;32m-> 2677\u001b[0m git_head_commit_url \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrepo\u001b[49m\u001b[38;5;241m.\u001b[39mpush_to_hub(commit_message\u001b[38;5;241m=\u001b[39mcommit_message, blocking\u001b[38;5;241m=\u001b[39mblocking)\n\u001b[1;32m 2678\u001b[0m \u001b[38;5;66;03m# push separately the model card to be independant from the rest of the model\u001b[39;00m\n\u001b[1;32m 2679\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39margs\u001b[38;5;241m.\u001b[39mshould_save:\n",
|
| 520 |
"\u001b[0;31mAttributeError\u001b[0m: 'Trainer' object has no attribute 'repo'"
|
| 521 |
]
|
| 522 |
}
|
| 523 |
],
|
| 524 |
+
"source": [
|
| 525 |
+
"trainer.push_to_hub(hub_model_id=\"sharpcoder/wav2vec2_bjorn\")"
|
| 526 |
+
]
|
| 527 |
},
|
| 528 |
{
|
| 529 |
"cell_type": "code",
|
| 530 |
"execution_count": null,
|
| 531 |
+
"id": "a5cb9a88-2443-4bd9-85ac-12bf80a9e325",
|
| 532 |
"metadata": {},
|
| 533 |
"outputs": [],
|
| 534 |
"source": []
|
pytorch_model.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 377667031
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ea220cc133930f98791c7b7a1d76d68b159241b625a40a783d4e05d2c93c11d7
|
| 3 |
size 377667031
|
training_args.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:19c7738f5655571cd7c062b8a732e09ad439c7c98c6a054da91449f8906026bf
|
| 3 |
+
size 2735
|
vocab.json
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
{"w": 0, "
|
|
|
|
| 1 |
+
{"w": 0, "a": 1, "o": 3, "e": 4, "j": 5, "n": 6, "p": 7, "l": 8, ".": 9, "i": 10, "b": 11, "d": 12, "h": 13, "r": 14, "y": 15, "m": 16, "s": 17, "|": 2, "[UNK]": 18, "[PAD]": 19}
|