MedhaCodes commited on
Commit
8e13353
Β·
verified Β·
1 Parent(s): 8b27d1c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -37
app.py CHANGED
@@ -1,52 +1,66 @@
1
- from fastapi import FastAPI, Request
2
- from fastapi.responses import HTMLResponse, JSONResponse
3
- from fastapi.templating import Jinja2Templates
4
  from transformers import AutoTokenizer, AutoModelForQuestionAnswering, pipeline
5
 
6
- app = FastAPI(title="QA Dashboard Pro")
7
-
8
- # Load your fine-tuned model
9
  MODEL_PATH = "MedhaCodes/qna_finetuned_model"
 
 
10
  qa_pipeline = pipeline(
11
  "question-answering",
12
  model=AutoModelForQuestionAnswering.from_pretrained(MODEL_PATH),
13
  tokenizer=AutoTokenizer.from_pretrained(MODEL_PATH)
14
  )
15
 
16
- templates = Jinja2Templates(directory="templates")
17
-
18
- @app.get("/", response_class=HTMLResponse)
19
- async def home(request: Request):
20
- return templates.TemplateResponse("index.html", {"request": request})
21
-
22
- @app.post("/predict")
23
- async def predict(request: Request):
24
- data = await request.json()
25
- context = data.get("context")
26
- questions_text = data.get("question")
27
-
28
  if not context or not questions_text:
29
- return JSONResponse({"error": "Please provide both context and question"}, status_code=400)
30
 
31
- # Handle multiple questions line-by-line
32
  questions = [q.strip() for q in questions_text.strip().split("\n") if q.strip()]
33
  answers = []
34
 
35
  for i, q in enumerate(questions, start=1):
36
- try:
37
- result = qa_pipeline(question=q, context=context)
38
- answer = result["answer"]
39
- score = result["score"]
40
- answers.append({
41
- "question": q,
42
- "answer": answer,
43
- "score": round(score, 4)
44
- })
45
- except Exception as e:
46
- answers.append({
47
- "question": q,
48
- "answer": f"Error: {str(e)}",
49
- "score": 0
50
- })
51
-
52
- return {"results": answers}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
 
 
2
  from transformers import AutoTokenizer, AutoModelForQuestionAnswering, pipeline
3
 
4
+ # βœ… Load your fine-tuned model
5
+ # Use your Hugging Face model repo name (e.g., "MedhaCodes/qna_finetuned_model")
6
+ # or use "saved_model" if running locally
7
  MODEL_PATH = "MedhaCodes/qna_finetuned_model"
8
+
9
+ # Load the model and tokenizer
10
  qa_pipeline = pipeline(
11
  "question-answering",
12
  model=AutoModelForQuestionAnswering.from_pretrained(MODEL_PATH),
13
  tokenizer=AutoTokenizer.from_pretrained(MODEL_PATH)
14
  )
15
 
16
+ # Function to process multiple questions
17
+ def answer_multiple_questions(context, questions_text):
 
 
 
 
 
 
 
 
 
 
18
  if not context or not questions_text:
19
+ return "⚠️ Please provide both context and questions."
20
 
21
+ # Split questions by line
22
  questions = [q.strip() for q in questions_text.strip().split("\n") if q.strip()]
23
  answers = []
24
 
25
  for i, q in enumerate(questions, start=1):
26
+ result = qa_pipeline(question=q, context=context)
27
+ answer = result["answer"]
28
+ score = result["score"]
29
+ answers.append(f"Q{i}: {q}\nAnswer: {answer}\nConfidence: {score:.4f}\n{'-'*60}")
30
+
31
+ return "\n".join(answers)
32
+
33
+ # Gradio Interface
34
+ interface = gr.Interface(
35
+ fn=answer_multiple_questions,
36
+ inputs=[
37
+ gr.Textbox(
38
+ label="πŸ“˜ Context (Paragraph/Text)",
39
+ lines=20,
40
+ placeholder="Enter or paste a long text context here..."
41
+ ),
42
+ gr.Textbox(
43
+ label="❓ Questions (one per line)",
44
+ lines=10,
45
+ placeholder="Type each question on a new line..."
46
+ ),
47
+ ],
48
+ outputs=gr.Textbox(
49
+ label="🧩 Answers",
50
+ lines=25,
51
+ placeholder="Model responses will appear here..."
52
+ ),
53
+ title="πŸ€– Machine Learning QA System",
54
+ description=(
55
+ "This Gradio app uses a fine-tuned RoBERTa model to answer questions from a given context.\n\n"
56
+ "πŸ‘‰ Enter a long paragraph in the context box.\n"
57
+ "πŸ‘‰ Write multiple questions (one per line).\n"
58
+ "πŸ‘‰ Get all answers organized line by line!"
59
+ ),
60
+ theme="soft", # optional theme
61
+ allow_flagging="never"
62
+ )
63
+
64
+ # Launch app
65
+ if __name__ == "__main__":
66
+ interface.launch()