Shridhartd's picture
Update app.py
9085743 verified
import os, json, gradio as gr
from huggingface_hub import InferenceClient
MODEL_ID = "tiiuae/falcon-7b-instruct" # keep the big model
client = InferenceClient(model=MODEL_ID, token=os.getenv("HF_TOKEN"))
SYSTEM_PROMPT = """You are an evaluator of undergraduate research problem statements.
Return only a JSON object with the following keys:
G1,G2,G3,G4,G5 (Yes/No) and S1–S5 (0-3) and comments. df
"""
def assess(statement):
prompt = f"{SYSTEM_PROMPT}\nProblem statement:\n\"\"\"{statement}\"\"\"\nChecklist:"
response = client.text_generation(
prompt,
max_new_tokens=256,
temperature=0.1,
top_p=0.9,
stream=False,
)
# Get first JSON block
start = response.find("{")
end = response.rfind("}") + 1
json_str = response[start:end] if start != -1 else "{}"
try:
checklist = json.loads(json_str)
gate_fail = any(checklist.get(f"G{i}") == "No" for i in range(1,6))
score = sum(int(checklist.get(f"S{i}",0)) for i in range(1,6))
if gate_fail:
verdict = "❌ Mandatory gatekeeper criterion failed."
elif score >= 11:
verdict = "✅ Meets requirements (Exceptional/Proficient)."
elif score >= 8:
verdict = "⚠️ Adequate but needs refinement."
else:
verdict = "❌ Needs major improvement."
except Exception as e:
verdict = f"⚠️ JSON parse error: {e}"
return json_str, verdict
iface = gr.Interface(
fn=assess,
inputs=gr.Textbox(lines=8, label="Paste REU Problem Statement"),
outputs=[gr.Code(label="Checklist JSON"), gr.Textbox(label="Verdict")],
title="🧠 REU Problem Statement Evaluator",
description="Uses the Hugging Face Inference API so no large model is loaded in the Space itself."
)
iface.launch()