Spaces:
Sleeping
Sleeping
Deploy temporary working model: DialoGPT while awaiting Llama 3.1 access
Browse files
app.py
CHANGED
|
@@ -62,8 +62,9 @@ async def load_model():
|
|
| 62 |
|
| 63 |
logger.info("Loading model with transformers...")
|
| 64 |
|
| 65 |
-
# Use
|
| 66 |
-
|
|
|
|
| 67 |
|
| 68 |
# Get HF token from environment
|
| 69 |
hf_token = os.getenv("HF_TOKEN")
|
|
|
|
| 62 |
|
| 63 |
logger.info("Loading model with transformers...")
|
| 64 |
|
| 65 |
+
# Use a working model while waiting for Llama 3.1 access
|
| 66 |
+
# TODO: Change back to "meta-llama/Llama-3.1-8B-Instruct" once you have access
|
| 67 |
+
base_model_name = "microsoft/DialoGPT-medium"
|
| 68 |
|
| 69 |
# Get HF token from environment
|
| 70 |
hf_token = os.getenv("HF_TOKEN")
|