""" MicroGuard — RAG Faithfulness Detector Free, real-time, privacy-preserving quality checker for RAG systems. """ import gradio as gr import torch import torch.nn.functional as F import time import json import os from huggingface_hub import login from transformers import AutoModelForCausalLM, AutoTokenizer from peft import PeftModel # Login with HF token from Space secrets (needed for gated models like Gemma) HF_TOKEN = os.environ.get("HF_TOKEN") if HF_TOKEN: login(token=HF_TOKEN) print("Logged in to HuggingFace") # ─── Configuration ─── MODEL_CONFIGS = { "Gemma-270M (Fastest)": { "base": "google/gemma-3-270m-it", "adapter": "tarun5986/MicroGuard-Gemma-270M", }, "Qwen-0.5B (Balanced)": { "base": "Qwen/Qwen2.5-0.5B-Instruct", "adapter": "tarun5986/MicroGuard-Qwen-0.5B", }, "Gemma-1B (Best Accuracy)": { "base": "google/gemma-3-1b-it", "adapter": "tarun5986/MicroGuard-Gemma-1B", }, } DEFAULT_MODEL = "Qwen-0.5B (Balanced)" current_model = None current_tokenizer = None current_model_name = None faithful_ids = None unfaithful_ids = None DEVICE = "cuda" if torch.cuda.is_available() else "cpu" DTYPE = torch.float16 if DEVICE == "cuda" else torch.float32 SYSTEM_PROMPT = "You are a faithfulness evaluator for RAG systems. You must respond with exactly one word." USER_TEMPLATE = """Context: {context} Question: {query} Answer: {answer} Is every claim in the answer fully supported by the context? Respond with exactly one word: FAITHFUL or UNFAITHFUL.""" def load_model(model_choice): global current_model, current_tokenizer, current_model_name, faithful_ids, unfaithful_ids if model_choice == current_model_name: return f"Model loaded: {model_choice}" config = MODEL_CONFIGS[model_choice] try: tokenizer = AutoTokenizer.from_pretrained(config["base"], trust_remote_code=True, token=HF_TOKEN) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token base_model = AutoModelForCausalLM.from_pretrained( config["base"], torch_dtype=DTYPE, trust_remote_code=True, token=HF_TOKEN ) model = PeftModel.from_pretrained(base_model, config["adapter"], token=HF_TOKEN) model = model.to(DEVICE) model.eval() current_model = model current_tokenizer = tokenizer current_model_name = model_choice faithful_ids = tokenizer.encode("FAITHFUL", add_special_tokens=False) unfaithful_ids = tokenizer.encode("UNFAITHFUL", add_special_tokens=False) return f"Loaded: {model_choice}" except Exception as e: return f"Error: {str(e)}" def check_faithfulness(context, question, answer, model_choice): global current_model, current_tokenizer, faithful_ids, unfaithful_ids if not context or not answer: return "", "Please provide both context and answer.", "" if model_choice != current_model_name: status = load_model(model_choice) if "Error" in status: return "", status, "" context = context[:900] question = (question or "N/A")[:200] answer = answer[:400] msg = USER_TEMPLATE.format(context=context, query=question, answer=answer) messages = [{"role": "user", "content": SYSTEM_PROMPT + "\n\n" + msg}] try: prompt = current_tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) except Exception: prompt = f"<|im_start|>user\n{SYSTEM_PROMPT}\n\n{msg}<|im_end|>\n<|im_start|>assistant\n" inputs = current_tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512) inputs = {k: v.to(DEVICE) for k, v in inputs.items()} start_time = time.time() with torch.no_grad(): outputs = current_model(**inputs) logits = outputs.logits[:, -1, :] f_score = logits[0, faithful_ids[0]].item() u_score = logits[0, unfaithful_ids[0]].item() latency = (time.time() - start_time) * 1000 # Calibrated threshold: require faithful logit to be 0.9 higher than unfaithful # This improves balanced accuracy from 67% to 72% by catching more hallucinations CALIBRATION_THRESHOLD = 0.9 margin = f_score - u_score scores = torch.tensor([f_score, u_score]) probs = F.softmax(scores, dim=0) confidence = probs.max().item() * 100 if margin > CALIBRATION_THRESHOLD: verdict = "FAITHFUL" color = "#22c55e" explanation = "The answer appears to be supported by the provided context." else: verdict = "UNFAITHFUL" color = "#ef4444" explanation = "The answer may contain claims not supported by the context." result_html = f"""
{verdict}
Confidence: {confidence:.1f}%
{latency:.0f}ms | {current_model_name} | Zero API cost
""" details = f"""**{explanation}** | Metric | Value | |--------|-------| | Verdict | {verdict} | | Confidence | {confidence:.1f}% | | Latency | {latency:.0f}ms | | Model | {current_model_name} | *All processing runs locally. No data sent to external APIs.*""" return result_html, details, f"{latency:.0f}ms" # ─── Examples ─── EXAMPLES = [ [ "The Eiffel Tower is a wrought-iron lattice tower on the Champ de Mars in Paris, France. It was designed by engineer Gustave Eiffel and built from 1887 to 1889 as the centerpiece of the 1889 World's Fair.", "Who designed the Eiffel Tower?", "The Eiffel Tower was designed by engineer Gustave Eiffel. It was built between 1887 and 1889 for the World's Fair in Paris.", ], [ "The Eiffel Tower is a wrought-iron lattice tower on the Champ de Mars in Paris, France. It was designed by engineer Gustave Eiffel and built from 1887 to 1889 as the centerpiece of the 1889 World's Fair.", "Who designed the Eiffel Tower?", "The Eiffel Tower was designed by Alexander Graham Bell in 1920 and is located in London, England.", ], [ "Python was conceived in the late 1980s by Guido van Rossum at Centrum Wiskunde & Informatica (CWI) in the Netherlands. Its first version, Python 0.9.0, was released in February 1991.", "When was Python first released?", "Python was first released in February 1991. It was created by Guido van Rossum while working at CWI in the Netherlands.", ], [ "Python was conceived in the late 1980s by Guido van Rossum at Centrum Wiskunde & Informatica (CWI) in the Netherlands. Its first version, Python 0.9.0, was released in February 1991.", "When was Python first released?", "Python was created by James Gosling at Sun Microsystems and first released in 1995. It is primarily used for building Android applications.", ], ] # ─── Interface ─── DESCRIPTION = """ # MicroGuard: RAG Faithfulness Detector **Instantly check if your RAG system's answers are faithful to the retrieved context.** No API keys. No data leaves your device. Completely free. Built on fine-tuned sub-1B parameter language models. [Paper](https://github.com/tarun-ks/MicroGuard) | [Models](https://huggingface.co/tarun5986) | [GitHub](https://github.com/tarun-ks/MicroGuard) """ with gr.Blocks( title="MicroGuard — RAG Faithfulness Detector", theme=gr.themes.Soft(), css=".gradio-container {max-width: 900px !important}", ) as demo: gr.Markdown(DESCRIPTION) with gr.Row(): model_selector = gr.Dropdown( choices=list(MODEL_CONFIGS.keys()), value=DEFAULT_MODEL, label="Model", scale=3, ) latency_display = gr.Textbox(label="Latency", interactive=False, scale=1) with gr.Row(): with gr.Column(scale=3): context_input = gr.Textbox( label="Retrieved Context", placeholder="Paste the document or passage your RAG system retrieved...", lines=6, ) question_input = gr.Textbox( label="User Question (optional)", placeholder="What did the user ask?", lines=1, ) answer_input = gr.Textbox( label="Generated Answer", placeholder="Paste the answer your RAG system generated...", lines=3, ) check_btn = gr.Button("Check Faithfulness", variant="primary", size="lg") with gr.Column(scale=2): result_html = gr.HTML() details_output = gr.Markdown() check_btn.click( fn=check_faithfulness, inputs=[context_input, question_input, answer_input, model_selector], outputs=[result_html, details_output, latency_display], ) gr.Examples( examples=EXAMPLES, inputs=[context_input, question_input, answer_input], label="Try these examples (first two faithful, last two unfaithful)", ) gr.Markdown(""" --- **How it works:** MicroGuard fine-tunes small language models with LoRA on 127K+ faithfulness-labeled examples from RAGBench, RAGTruth, and HaluBench. At inference, constrained decoding compares FAITHFUL vs UNFAITHFUL logits for deterministic classification with zero garbage outputs. **Models:** [Qwen-0.5B](https://huggingface.co/tarun5986/MicroGuard-Qwen-0.5B) | [SmolLM-135M](https://huggingface.co/tarun5986/MicroGuard-SmolLM-135M) | [TinyLlama-1.1B](https://huggingface.co/tarun5986/MicroGuard-TinyLlama-1.1B) | [Gemma-270M](https://huggingface.co/tarun5986/MicroGuard-Gemma-270M) | [Gemma-1B](https://huggingface.co/tarun5986/MicroGuard-Gemma-1B) """) if __name__ == "__main__": print(f"Loading default model: {DEFAULT_MODEL}") load_model(DEFAULT_MODEL) print("Starting MicroGuard demo...") demo.launch()