Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -12,7 +12,7 @@ class Query(BaseModel):
|
|
| 12 |
# Initialize FastAPI app
|
| 13 |
app = FastAPI(title="Financial Chatbot API")
|
| 14 |
|
| 15 |
-
# Load your fine-tuned model and tokenizer
|
| 16 |
model_name = "Phoenix21/meta-llama-Llama-3.2-3B-2025-03-13-checkpoints"
|
| 17 |
model = AutoModelForCausalLM.from_pretrained(
|
| 18 |
model_name,
|
|
@@ -39,7 +39,7 @@ def generate(query: Query):
|
|
| 39 |
response = chat_pipe(prompt)[0]["generated_text"]
|
| 40 |
return {"response": response}
|
| 41 |
|
| 42 |
-
# Run the app using uvicorn
|
| 43 |
if __name__ == "__main__":
|
| 44 |
-
port = int(os.environ.get("PORT",
|
| 45 |
uvicorn.run(app, host="0.0.0.0", port=port)
|
|
|
|
| 12 |
# Initialize FastAPI app
|
| 13 |
app = FastAPI(title="Financial Chatbot API")
|
| 14 |
|
| 15 |
+
# Load your fine-tuned model and tokenizer using the updated model name
|
| 16 |
model_name = "Phoenix21/meta-llama-Llama-3.2-3B-2025-03-13-checkpoints"
|
| 17 |
model = AutoModelForCausalLM.from_pretrained(
|
| 18 |
model_name,
|
|
|
|
| 39 |
response = chat_pipe(prompt)[0]["generated_text"]
|
| 40 |
return {"response": response}
|
| 41 |
|
| 42 |
+
# Run the app using uvicorn; default port is 7860 (as expected by Hugging Face Spaces)
|
| 43 |
if __name__ == "__main__":
|
| 44 |
+
port = int(os.environ.get("PORT", 7860))
|
| 45 |
uvicorn.run(app, host="0.0.0.0", port=port)
|