Spaces:
Sleeping
Sleeping
Upload folder using huggingface_hub
Browse files- load_model.py +4 -4
load_model.py
CHANGED
|
@@ -2,8 +2,8 @@ import torch
|
|
| 2 |
from model import FuturesModel, CustomTokenizer, build_vocabulary
|
| 3 |
|
| 4 |
def load_model_and_tokenizer(
|
| 5 |
-
model_path='
|
| 6 |
-
dataset_path='
|
| 7 |
vocab_size=5000,
|
| 8 |
):
|
| 9 |
"""Loads the trained FuturesModel and CustomTokenizer."""
|
|
@@ -47,8 +47,8 @@ if __name__ == "__main__":
|
|
| 47 |
print("="*80)
|
| 48 |
|
| 49 |
# Correct paths for running from the root directory
|
| 50 |
-
model_path = '
|
| 51 |
-
dataset_path = '
|
| 52 |
|
| 53 |
try:
|
| 54 |
model, tokenizer = load_model_and_tokenizer(
|
|
|
|
| 2 |
from model import FuturesModel, CustomTokenizer, build_vocabulary
|
| 3 |
|
| 4 |
def load_model_and_tokenizer(
|
| 5 |
+
model_path='checkpoint_best.pt',
|
| 6 |
+
dataset_path='futures_dataset_v2.json',
|
| 7 |
vocab_size=5000,
|
| 8 |
):
|
| 9 |
"""Loads the trained FuturesModel and CustomTokenizer."""
|
|
|
|
| 47 |
print("="*80)
|
| 48 |
|
| 49 |
# Correct paths for running from the root directory
|
| 50 |
+
model_path = 'checkpoint_best.pt'
|
| 51 |
+
dataset_path = 'futures_dataset_v2.json'
|
| 52 |
|
| 53 |
try:
|
| 54 |
model, tokenizer = load_model_and_tokenizer(
|