Spaces:
Building
Building
File size: 10,222 Bytes
eab01ab 0fe3aa6 eab01ab 0fe3aa6 eab01ab 7ed29b9 eab01ab 7ed29b9 eab01ab 7ed29b9 eab01ab 7ed29b9 eab01ab 7ed29b9 eab01ab 7ed29b9 eab01ab 631d657 eab01ab 631d657 eab01ab 77f6ca6 eab01ab | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 | """
MicroGuard β RAG Faithfulness Detector
Free, real-time, privacy-preserving quality checker for RAG systems.
"""
import gradio as gr
import torch
import torch.nn.functional as F
import time
import json
import os
from huggingface_hub import login
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
# Login with HF token from Space secrets (needed for gated models like Gemma)
HF_TOKEN = os.environ.get("HF_TOKEN")
if HF_TOKEN:
login(token=HF_TOKEN)
print("Logged in to HuggingFace")
# βββ Configuration βββ
MODEL_CONFIGS = {
"Gemma-270M (Fastest)": {
"base": "google/gemma-3-270m-it",
"adapter": "tarun5986/MicroGuard-Gemma-270M",
},
"Qwen-0.5B (Balanced)": {
"base": "Qwen/Qwen2.5-0.5B-Instruct",
"adapter": "tarun5986/MicroGuard-Qwen-0.5B",
},
"Gemma-1B (Best Accuracy)": {
"base": "google/gemma-3-1b-it",
"adapter": "tarun5986/MicroGuard-Gemma-1B",
},
}
DEFAULT_MODEL = "Qwen-0.5B (Balanced)"
current_model = None
current_tokenizer = None
current_model_name = None
faithful_ids = None
unfaithful_ids = None
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
DTYPE = torch.float16 if DEVICE == "cuda" else torch.float32
SYSTEM_PROMPT = "You are a faithfulness evaluator for RAG systems. You must respond with exactly one word."
USER_TEMPLATE = """Context: {context}
Question: {query}
Answer: {answer}
Is every claim in the answer fully supported by the context? Respond with exactly one word: FAITHFUL or UNFAITHFUL."""
def load_model(model_choice):
global current_model, current_tokenizer, current_model_name, faithful_ids, unfaithful_ids
if model_choice == current_model_name:
return f"Model loaded: {model_choice}"
config = MODEL_CONFIGS[model_choice]
try:
tokenizer = AutoTokenizer.from_pretrained(config["base"], trust_remote_code=True, token=HF_TOKEN)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
base_model = AutoModelForCausalLM.from_pretrained(
config["base"], torch_dtype=DTYPE, trust_remote_code=True, token=HF_TOKEN
)
model = PeftModel.from_pretrained(base_model, config["adapter"], token=HF_TOKEN)
model = model.to(DEVICE)
model.eval()
current_model = model
current_tokenizer = tokenizer
current_model_name = model_choice
faithful_ids = tokenizer.encode("FAITHFUL", add_special_tokens=False)
unfaithful_ids = tokenizer.encode("UNFAITHFUL", add_special_tokens=False)
return f"Loaded: {model_choice}"
except Exception as e:
return f"Error: {str(e)}"
def check_faithfulness(context, question, answer, model_choice):
global current_model, current_tokenizer, faithful_ids, unfaithful_ids
if not context or not answer:
return "", "Please provide both context and answer.", ""
if model_choice != current_model_name:
status = load_model(model_choice)
if "Error" in status:
return "", status, ""
context = context[:900]
question = (question or "N/A")[:200]
answer = answer[:400]
msg = USER_TEMPLATE.format(context=context, query=question, answer=answer)
messages = [{"role": "user", "content": SYSTEM_PROMPT + "\n\n" + msg}]
try:
prompt = current_tokenizer.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
except Exception:
prompt = f"<|im_start|>user\n{SYSTEM_PROMPT}\n\n{msg}<|im_end|>\n<|im_start|>assistant\n"
inputs = current_tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512)
inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
start_time = time.time()
with torch.no_grad():
outputs = current_model(**inputs)
logits = outputs.logits[:, -1, :]
f_score = logits[0, faithful_ids[0]].item()
u_score = logits[0, unfaithful_ids[0]].item()
latency = (time.time() - start_time) * 1000
# Calibrated threshold: require faithful logit to be 0.9 higher than unfaithful
# This improves balanced accuracy from 67% to 72% by catching more hallucinations
CALIBRATION_THRESHOLD = 0.9
margin = f_score - u_score
scores = torch.tensor([f_score, u_score])
probs = F.softmax(scores, dim=0)
confidence = probs.max().item() * 100
if margin > CALIBRATION_THRESHOLD:
verdict = "FAITHFUL"
color = "#22c55e"
explanation = "The answer appears to be supported by the provided context."
else:
verdict = "UNFAITHFUL"
color = "#ef4444"
explanation = "The answer may contain claims not supported by the context."
result_html = f"""
<div style="text-align: center; padding: 30px; border-radius: 12px; background: linear-gradient(135deg, {color}15, {color}05);">
<div style="font-size: 56px; font-weight: 800; color: {color}; margin: 0; letter-spacing: 2px;">{verdict}</div>
<div style="font-size: 18px; color: #666; margin-top: 8px;">Confidence: {confidence:.1f}%</div>
<div style="font-size: 13px; color: #999; margin-top: 4px;">{latency:.0f}ms | {current_model_name} | Zero API cost</div>
</div>
"""
details = f"""**{explanation}**
| Metric | Value |
|--------|-------|
| Verdict | {verdict} |
| Confidence | {confidence:.1f}% |
| Latency | {latency:.0f}ms |
| Model | {current_model_name} |
*All processing runs locally. No data sent to external APIs.*"""
return result_html, details, f"{latency:.0f}ms"
# βββ Examples βββ
EXAMPLES = [
[
"The Eiffel Tower is a wrought-iron lattice tower on the Champ de Mars in Paris, France. It was designed by engineer Gustave Eiffel and built from 1887 to 1889 as the centerpiece of the 1889 World's Fair.",
"Who designed the Eiffel Tower?",
"The Eiffel Tower was designed by engineer Gustave Eiffel. It was built between 1887 and 1889 for the World's Fair in Paris.",
],
[
"The Eiffel Tower is a wrought-iron lattice tower on the Champ de Mars in Paris, France. It was designed by engineer Gustave Eiffel and built from 1887 to 1889 as the centerpiece of the 1889 World's Fair.",
"Who designed the Eiffel Tower?",
"The Eiffel Tower was designed by Alexander Graham Bell in 1920 and is located in London, England.",
],
[
"Python was conceived in the late 1980s by Guido van Rossum at Centrum Wiskunde & Informatica (CWI) in the Netherlands. Its first version, Python 0.9.0, was released in February 1991.",
"When was Python first released?",
"Python was first released in February 1991. It was created by Guido van Rossum while working at CWI in the Netherlands.",
],
[
"Python was conceived in the late 1980s by Guido van Rossum at Centrum Wiskunde & Informatica (CWI) in the Netherlands. Its first version, Python 0.9.0, was released in February 1991.",
"When was Python first released?",
"Python was created by James Gosling at Sun Microsystems and first released in 1995. It is primarily used for building Android applications.",
],
]
# βββ Interface βββ
DESCRIPTION = """
# MicroGuard: RAG Faithfulness Detector
**Instantly check if your RAG system's answers are faithful to the retrieved context.**
No API keys. No data leaves your device. Completely free.
Built on fine-tuned sub-1B parameter language models. [Paper](https://github.com/tarun-ks/MicroGuard) | [Models](https://huggingface.co/tarun5986) | [GitHub](https://github.com/tarun-ks/MicroGuard)
"""
with gr.Blocks(
title="MicroGuard β RAG Faithfulness Detector",
theme=gr.themes.Soft(),
css=".gradio-container {max-width: 900px !important}",
) as demo:
gr.Markdown(DESCRIPTION)
with gr.Row():
model_selector = gr.Dropdown(
choices=list(MODEL_CONFIGS.keys()),
value=DEFAULT_MODEL,
label="Model",
scale=3,
)
latency_display = gr.Textbox(label="Latency", interactive=False, scale=1)
with gr.Row():
with gr.Column(scale=3):
context_input = gr.Textbox(
label="Retrieved Context",
placeholder="Paste the document or passage your RAG system retrieved...",
lines=6,
)
question_input = gr.Textbox(
label="User Question (optional)",
placeholder="What did the user ask?",
lines=1,
)
answer_input = gr.Textbox(
label="Generated Answer",
placeholder="Paste the answer your RAG system generated...",
lines=3,
)
check_btn = gr.Button("Check Faithfulness", variant="primary", size="lg")
with gr.Column(scale=2):
result_html = gr.HTML()
details_output = gr.Markdown()
check_btn.click(
fn=check_faithfulness,
inputs=[context_input, question_input, answer_input, model_selector],
outputs=[result_html, details_output, latency_display],
)
gr.Examples(
examples=EXAMPLES,
inputs=[context_input, question_input, answer_input],
label="Try these examples (first two faithful, last two unfaithful)",
)
gr.Markdown("""
---
**How it works:** MicroGuard fine-tunes small language models with LoRA on 127K+ faithfulness-labeled examples from RAGBench, RAGTruth, and HaluBench. At inference, constrained decoding compares FAITHFUL vs UNFAITHFUL logits for deterministic classification with zero garbage outputs.
**Models:** [Qwen-0.5B](https://huggingface.co/tarun5986/MicroGuard-Qwen-0.5B) | [SmolLM-135M](https://huggingface.co/tarun5986/MicroGuard-SmolLM-135M) | [TinyLlama-1.1B](https://huggingface.co/tarun5986/MicroGuard-TinyLlama-1.1B) | [Gemma-270M](https://huggingface.co/tarun5986/MicroGuard-Gemma-270M) | [Gemma-1B](https://huggingface.co/tarun5986/MicroGuard-Gemma-1B)
""")
if __name__ == "__main__":
print(f"Loading default model: {DEFAULT_MODEL}")
load_model(DEFAULT_MODEL)
print("Starting MicroGuard demo...")
demo.launch()
|