Update app.py
Browse files
app.py
CHANGED
|
@@ -5,6 +5,7 @@ from huggingface_hub import login
|
|
| 5 |
from fastapi import FastAPI, Request
|
| 6 |
from fastapi.responses import JSONResponse
|
| 7 |
import uvicorn
|
|
|
|
| 8 |
|
| 9 |
# Cấu hình logging
|
| 10 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
|
@@ -23,7 +24,7 @@ try:
|
|
| 23 |
qa_pipeline = pipeline(
|
| 24 |
"question-answering",
|
| 25 |
model="nguyenvulebinh/vi-mrc-base",
|
| 26 |
-
device=0 if torch.cuda.is_available() else -1
|
| 27 |
)
|
| 28 |
logging.info("Model loaded successfully")
|
| 29 |
except Exception as e:
|
|
@@ -52,4 +53,4 @@ async def api_answer(request: Request):
|
|
| 52 |
|
| 53 |
if __name__ == "__main__":
|
| 54 |
logging.info("Starting FastAPI...")
|
| 55 |
-
uvicorn.run(app, host="0.0.0.0", port=8000) # Sử dụng cổng khác để test
|
|
|
|
| 5 |
from fastapi import FastAPI, Request
|
| 6 |
from fastapi.responses import JSONResponse
|
| 7 |
import uvicorn
|
| 8 |
+
import torch # Thêm dòng import này
|
| 9 |
|
| 10 |
# Cấu hình logging
|
| 11 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
|
|
|
| 24 |
qa_pipeline = pipeline(
|
| 25 |
"question-answering",
|
| 26 |
model="nguyenvulebinh/vi-mrc-base",
|
| 27 |
+
device=0 if torch.cuda.is_available() else -1 # Sử dụng torch đã import
|
| 28 |
)
|
| 29 |
logging.info("Model loaded successfully")
|
| 30 |
except Exception as e:
|
|
|
|
| 53 |
|
| 54 |
if __name__ == "__main__":
|
| 55 |
logging.info("Starting FastAPI...")
|
| 56 |
+
uvicorn.run(app, host="0.0.0.0", port=8000) # Sử dụng cổng khác để test
|