Spaces:
Sleeping
Sleeping
Commit
·
5830a30
1
Parent(s):
a64a4fd
correction
Browse files
app.py
CHANGED
|
@@ -306,15 +306,16 @@ VOCAB_FILE = "vocabs_enhanced.pkl"
|
|
| 306 |
CHECKPOINT_FILE = "checkpoint_enhanced.pt"
|
| 307 |
|
| 308 |
# IMPORTANT: Update this with your actual Hugging Face repository ID
|
| 309 |
-
REPO_ID =
|
| 310 |
-
HF_TOKEN = os.environ.get("HF_TOKEN") # Set this as a secret in your Space settings
|
| 311 |
|
| 312 |
|
| 313 |
def download_existing_models():
|
| 314 |
"""Download existing model files from the Hugging Face Hub if available."""
|
| 315 |
try:
|
| 316 |
api = HfApi()
|
| 317 |
-
files = api.list_repo_files(REPO_ID, token=HF_TOKEN)
|
|
|
|
| 318 |
|
| 319 |
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
| 320 |
|
|
@@ -326,7 +327,7 @@ def download_existing_models():
|
|
| 326 |
model_path = hf_hub_download(
|
| 327 |
repo_id=REPO_ID,
|
| 328 |
filename=MODEL_FILE,
|
| 329 |
-
token=HF_TOKEN,
|
| 330 |
local_dir=OUTPUT_DIR,
|
| 331 |
force_download=True # Always get latest version
|
| 332 |
)
|
|
@@ -339,7 +340,7 @@ def download_existing_models():
|
|
| 339 |
vocab_path = hf_hub_download(
|
| 340 |
repo_id=REPO_ID,
|
| 341 |
filename=VOCAB_FILE,
|
| 342 |
-
token=HF_TOKEN,
|
| 343 |
local_dir=OUTPUT_DIR,
|
| 344 |
force_download=True # Always get latest version
|
| 345 |
)
|
|
@@ -352,7 +353,7 @@ def download_existing_models():
|
|
| 352 |
checkpoint_path = hf_hub_download(
|
| 353 |
repo_id=REPO_ID,
|
| 354 |
filename=CHECKPOINT_FILE,
|
| 355 |
-
token=HF_TOKEN,
|
| 356 |
local_dir=OUTPUT_DIR,
|
| 357 |
force_download=True
|
| 358 |
)
|
|
@@ -454,7 +455,7 @@ def train_model(dataset_file, progress=gr.Progress()):
|
|
| 454 |
path_or_fileobj=model_path,
|
| 455 |
path_in_repo=MODEL_FILE,
|
| 456 |
repo_id=REPO_ID,
|
| 457 |
-
token=HF_TOKEN,
|
| 458 |
commit_message="Update trained model"
|
| 459 |
)
|
| 460 |
upload_status.append(MODEL_FILE)
|
|
@@ -468,7 +469,7 @@ def train_model(dataset_file, progress=gr.Progress()):
|
|
| 468 |
path_or_fileobj=vocab_path,
|
| 469 |
path_in_repo=VOCAB_FILE,
|
| 470 |
repo_id=REPO_ID,
|
| 471 |
-
token=HF_TOKEN,
|
| 472 |
commit_message="Update vocabulary"
|
| 473 |
)
|
| 474 |
upload_status.append(VOCAB_FILE)
|
|
@@ -483,7 +484,7 @@ def train_model(dataset_file, progress=gr.Progress()):
|
|
| 483 |
path_or_fileobj=checkpoint_path,
|
| 484 |
path_in_repo=CHECKPOINT_FILE,
|
| 485 |
repo_id=REPO_ID,
|
| 486 |
-
token=HF_TOKEN,
|
| 487 |
commit_message="Update checkpoint"
|
| 488 |
)
|
| 489 |
upload_status.append(CHECKPOINT_FILE)
|
|
@@ -539,7 +540,8 @@ def download_models_from_hub():
|
|
| 539 |
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
| 540 |
|
| 541 |
api = HfApi()
|
| 542 |
-
files = api.list_repo_files(REPO_ID, token=HF_TOKEN)
|
|
|
|
| 543 |
|
| 544 |
downloaded_files = []
|
| 545 |
|
|
@@ -549,7 +551,7 @@ def download_models_from_hub():
|
|
| 549 |
model_path = hf_hub_download(
|
| 550 |
repo_id=REPO_ID,
|
| 551 |
filename=MODEL_FILE,
|
| 552 |
-
token=HF_TOKEN,
|
| 553 |
local_dir=OUTPUT_DIR,
|
| 554 |
force_download=True
|
| 555 |
)
|
|
@@ -563,7 +565,7 @@ def download_models_from_hub():
|
|
| 563 |
vocab_path = hf_hub_download(
|
| 564 |
repo_id=REPO_ID,
|
| 565 |
filename=VOCAB_FILE,
|
| 566 |
-
token=HF_TOKEN,
|
| 567 |
local_dir=OUTPUT_DIR,
|
| 568 |
force_download=True
|
| 569 |
)
|
|
|
|
| 306 |
CHECKPOINT_FILE = "checkpoint_enhanced.pt"
|
| 307 |
|
| 308 |
# IMPORTANT: Update this with your actual Hugging Face repository ID
|
| 309 |
+
REPO_ID = "heerjtdev/LSTM_CRF" # Replace with your repo ID
|
| 310 |
+
# HF_TOKEN = os.environ.get("HF_TOKEN") # Set this as a secret in your Space settings
|
| 311 |
|
| 312 |
|
| 313 |
def download_existing_models():
|
| 314 |
"""Download existing model files from the Hugging Face Hub if available."""
|
| 315 |
try:
|
| 316 |
api = HfApi()
|
| 317 |
+
#files = api.list_repo_files(REPO_ID, token=HF_TOKEN)
|
| 318 |
+
files = api.list_repo_files(REPO_ID)
|
| 319 |
|
| 320 |
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
| 321 |
|
|
|
|
| 327 |
model_path = hf_hub_download(
|
| 328 |
repo_id=REPO_ID,
|
| 329 |
filename=MODEL_FILE,
|
| 330 |
+
# token=HF_TOKEN,
|
| 331 |
local_dir=OUTPUT_DIR,
|
| 332 |
force_download=True # Always get latest version
|
| 333 |
)
|
|
|
|
| 340 |
vocab_path = hf_hub_download(
|
| 341 |
repo_id=REPO_ID,
|
| 342 |
filename=VOCAB_FILE,
|
| 343 |
+
# token=HF_TOKEN,
|
| 344 |
local_dir=OUTPUT_DIR,
|
| 345 |
force_download=True # Always get latest version
|
| 346 |
)
|
|
|
|
| 353 |
checkpoint_path = hf_hub_download(
|
| 354 |
repo_id=REPO_ID,
|
| 355 |
filename=CHECKPOINT_FILE,
|
| 356 |
+
# token=HF_TOKEN,
|
| 357 |
local_dir=OUTPUT_DIR,
|
| 358 |
force_download=True
|
| 359 |
)
|
|
|
|
| 455 |
path_or_fileobj=model_path,
|
| 456 |
path_in_repo=MODEL_FILE,
|
| 457 |
repo_id=REPO_ID,
|
| 458 |
+
# token=HF_TOKEN,
|
| 459 |
commit_message="Update trained model"
|
| 460 |
)
|
| 461 |
upload_status.append(MODEL_FILE)
|
|
|
|
| 469 |
path_or_fileobj=vocab_path,
|
| 470 |
path_in_repo=VOCAB_FILE,
|
| 471 |
repo_id=REPO_ID,
|
| 472 |
+
# token=HF_TOKEN,
|
| 473 |
commit_message="Update vocabulary"
|
| 474 |
)
|
| 475 |
upload_status.append(VOCAB_FILE)
|
|
|
|
| 484 |
path_or_fileobj=checkpoint_path,
|
| 485 |
path_in_repo=CHECKPOINT_FILE,
|
| 486 |
repo_id=REPO_ID,
|
| 487 |
+
# token=HF_TOKEN,
|
| 488 |
commit_message="Update checkpoint"
|
| 489 |
)
|
| 490 |
upload_status.append(CHECKPOINT_FILE)
|
|
|
|
| 540 |
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
| 541 |
|
| 542 |
api = HfApi()
|
| 543 |
+
#files = api.list_repo_files(REPO_ID, token=HF_TOKEN)
|
| 544 |
+
files = api.list_repo_files(REPO_ID)
|
| 545 |
|
| 546 |
downloaded_files = []
|
| 547 |
|
|
|
|
| 551 |
model_path = hf_hub_download(
|
| 552 |
repo_id=REPO_ID,
|
| 553 |
filename=MODEL_FILE,
|
| 554 |
+
# token=HF_TOKEN,
|
| 555 |
local_dir=OUTPUT_DIR,
|
| 556 |
force_download=True
|
| 557 |
)
|
|
|
|
| 565 |
vocab_path = hf_hub_download(
|
| 566 |
repo_id=REPO_ID,
|
| 567 |
filename=VOCAB_FILE,
|
| 568 |
+
# token=HF_TOKEN,
|
| 569 |
local_dir=OUTPUT_DIR,
|
| 570 |
force_download=True
|
| 571 |
)
|