Spaces:
Sleeping
Sleeping
File size: 1,770 Bytes
525e35d fb8d240 525e35d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
from fastapi import FastAPI, Request
from fastapi.responses import HTMLResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from transformers import AutoTokenizer, AutoModelForQuestionAnswering, pipeline
import os
app = FastAPI(title="QA Dashboard Pro")
MODEL_PATH = "MedhaCodes/qna_finetuned_model"
qa_pipeline = pipeline(
"question-answering",
model=AutoModelForQuestionAnswering.from_pretrained(MODEL_PATH),
tokenizer=AutoTokenizer.from_pretrained(MODEL_PATH)
)
# Mount static files (CSS, JS)
app.mount(
"/static",
StaticFiles(directory=os.path.join(os.path.dirname(__file__), "static")),
name="static"
)
# Load templates
templates = Jinja2Templates(directory="templates")
@app.get("/", response_class=HTMLResponse)
async def home(request: Request):
return templates.TemplateResponse("index.html", {"request": request})
@app.post("/predict")
async def predict(request: Request):
data = await request.json()
context = data.get("context")
questions_text = data.get("question")
if not context or not questions_text:
return JSONResponse({"error": "Please provide both context and question"}, status_code=400)
questions = [q.strip() for q in questions_text.strip().split("\n") if q.strip()]
answers = []
for i, q in enumerate(questions, start=1):
try:
result = qa_pipeline(question=q, context=context)
answers.append({
"question": q,
"answer": result["answer"],
"score": round(result["score"], 4)
})
except Exception as e:
answers.append({"question": q, "answer": f"Error: {e}", "score": 0})
return {"results": answers}
|